mirror of
https://gitee.com/fastnlp/fastNLP.git
synced 2024-11-30 03:07:59 +08:00
添加 tutorial_5 和 tutorial_6 可运行 ipynb
This commit is contained in:
parent
c4b2a8bac3
commit
3b2362d697
603
tutorials/tutorial_5_loss_optimizer.ipynb
Normal file
603
tutorials/tutorial_5_loss_optimizer.ipynb
Normal file
@ -0,0 +1,603 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# 使用Trainer和Tester快速训练和测试"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 数据读入和处理"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/remote-home/ynzheng/anaconda3/envs/now/lib/python3.8/site-packages/FastNLP-0.5.0-py3.8.egg/fastNLP/io/loader/classification.py:340: UserWarning: SST2's test file has no target.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"In total 3 datasets:\n",
|
||||
"\ttest has 1821 instances.\n",
|
||||
"\ttrain has 67349 instances.\n",
|
||||
"\tdev has 872 instances.\n",
|
||||
"In total 2 vocabs:\n",
|
||||
"\twords has 16292 entries.\n",
|
||||
"\ttarget has 2 entries.\n",
|
||||
"\n",
|
||||
"+-----------------------------------+--------+-----------------------------------+---------+\n",
|
||||
"| raw_words | target | words | seq_len |\n",
|
||||
"+-----------------------------------+--------+-----------------------------------+---------+\n",
|
||||
"| hide new secretions from the p... | 1 | [4110, 97, 12009, 39, 2, 6843,... | 7 |\n",
|
||||
"+-----------------------------------+--------+-----------------------------------+---------+\n",
|
||||
"Vocabulary(['hide', 'new', 'secretions', 'from', 'the']...)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from fastNLP.io import SST2Pipe\n",
|
||||
"\n",
|
||||
"pipe = SST2Pipe()\n",
|
||||
"databundle = pipe.process_from_file()\n",
|
||||
"vocab = databundle.get_vocab('words')\n",
|
||||
"print(databundle)\n",
|
||||
"print(databundle.get_dataset('train')[0])\n",
|
||||
"print(databundle.get_vocab('words'))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"4925 872 75\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"train_data = databundle.get_dataset('train')[:5000]\n",
|
||||
"train_data, test_data = train_data.split(0.015)\n",
|
||||
"dev_data = databundle.get_dataset('dev')\n",
|
||||
"print(len(train_data),len(dev_data),len(test_data))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"+-------------+-----------+--------+-------+---------+\n",
|
||||
"| field_names | raw_words | target | words | seq_len |\n",
|
||||
"+-------------+-----------+--------+-------+---------+\n",
|
||||
"| is_input | False | False | True | True |\n",
|
||||
"| is_target | False | True | False | False |\n",
|
||||
"| ignore_type | | False | False | False |\n",
|
||||
"| pad_value | | 0 | 0 | 0 |\n",
|
||||
"+-------------+-----------+--------+-------+---------+\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"<prettytable.PrettyTable at 0x7f49ec540160>"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"train_data.print_field_meta()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 使用内置模型训练"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from fastNLP.models import CNNText\n",
|
||||
"\n",
|
||||
"#词嵌入的维度\n",
|
||||
"EMBED_DIM = 100\n",
|
||||
"\n",
|
||||
"#使用CNNText的时候第一个参数输入一个tuple,作为模型定义embedding的参数\n",
|
||||
"#还可以传入 kernel_nums, kernel_sizes, padding, dropout的自定义值\n",
|
||||
"model_cnn = CNNText((len(vocab),EMBED_DIM), num_classes=2, dropout=0.1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from fastNLP import AccuracyMetric\n",
|
||||
"from fastNLP import Const\n",
|
||||
"\n",
|
||||
"# metrics=AccuracyMetric() 在本例中与下面这行代码等价\n",
|
||||
"metrics=AccuracyMetric(pred=Const.OUTPUT, target=Const.TARGET)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from fastNLP import CrossEntropyLoss\n",
|
||||
"\n",
|
||||
"# loss = CrossEntropyLoss() 在本例中与下面这行代码等价\n",
|
||||
"loss = CrossEntropyLoss(pred=Const.OUTPUT, target=Const.TARGET)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# 这表示构建了一个损失函数类,由func计算损失函数,其中将从模型返回值或者DataSet的target=True的field\n",
|
||||
"# 当中找到一个参数名为`pred`的参数传入func一个参数名为`input`的参数;找到一个参数名为`label`的参数\n",
|
||||
"# 传入func作为一个名为`target`的参数\n",
|
||||
"#下面自己构建了一个交叉熵函数,和之后直接使用fastNLP中的交叉熵函数是一个效果\n",
|
||||
"import torch\n",
|
||||
"from fastNLP import LossFunc\n",
|
||||
"func = torch.nn.functional.cross_entropy\n",
|
||||
"loss_func = LossFunc(func, input=Const.OUTPUT, target=Const.TARGET)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import torch.optim as optim\n",
|
||||
"\n",
|
||||
"#使用 torch.optim 定义优化器\n",
|
||||
"optimizer=optim.RMSprop(model_cnn.parameters(), lr=0.01, alpha=0.99, eps=1e-08, weight_decay=0, momentum=0, centered=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"input fields after batch(if batch size is 2):\n",
|
||||
"\twords: (1)type:torch.Tensor (2)dtype:torch.int64, (3)shape:torch.Size([2, 4]) \n",
|
||||
"\tseq_len: (1)type:torch.Tensor (2)dtype:torch.int64, (3)shape:torch.Size([2]) \n",
|
||||
"target fields after batch(if batch size is 2):\n",
|
||||
"\ttarget: (1)type:torch.Tensor (2)dtype:torch.int64, (3)shape:torch.Size([2]) \n",
|
||||
"\n",
|
||||
"training epochs started 2020-02-27-11-31-25\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=3080.0), HTML(value='')), layout=Layout(d…"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\r",
|
||||
"Evaluate data in 0.75 seconds!\n",
|
||||
"\r",
|
||||
"Evaluation on dev at Epoch 1/10. Step:308/3080: \n",
|
||||
"\r",
|
||||
"AccuracyMetric: acc=0.751147\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\r",
|
||||
"Evaluate data in 0.83 seconds!\n",
|
||||
"\r",
|
||||
"Evaluation on dev at Epoch 2/10. Step:616/3080: \n",
|
||||
"\r",
|
||||
"AccuracyMetric: acc=0.755734\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\r",
|
||||
"Evaluate data in 1.32 seconds!\n",
|
||||
"\r",
|
||||
"Evaluation on dev at Epoch 3/10. Step:924/3080: \n",
|
||||
"\r",
|
||||
"AccuracyMetric: acc=0.758028\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\r",
|
||||
"Evaluate data in 0.88 seconds!\n",
|
||||
"\r",
|
||||
"Evaluation on dev at Epoch 4/10. Step:1232/3080: \n",
|
||||
"\r",
|
||||
"AccuracyMetric: acc=0.741972\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\r",
|
||||
"Evaluate data in 0.96 seconds!\n",
|
||||
"\r",
|
||||
"Evaluation on dev at Epoch 5/10. Step:1540/3080: \n",
|
||||
"\r",
|
||||
"AccuracyMetric: acc=0.728211\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\r",
|
||||
"Evaluate data in 0.87 seconds!\n",
|
||||
"\r",
|
||||
"Evaluation on dev at Epoch 6/10. Step:1848/3080: \n",
|
||||
"\r",
|
||||
"AccuracyMetric: acc=0.755734\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\r",
|
||||
"Evaluate data in 1.04 seconds!\n",
|
||||
"\r",
|
||||
"Evaluation on dev at Epoch 7/10. Step:2156/3080: \n",
|
||||
"\r",
|
||||
"AccuracyMetric: acc=0.732798\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\r",
|
||||
"Evaluate data in 0.57 seconds!\n",
|
||||
"\r",
|
||||
"Evaluation on dev at Epoch 8/10. Step:2464/3080: \n",
|
||||
"\r",
|
||||
"AccuracyMetric: acc=0.747706\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\r",
|
||||
"Evaluate data in 0.48 seconds!\n",
|
||||
"\r",
|
||||
"Evaluation on dev at Epoch 9/10. Step:2772/3080: \n",
|
||||
"\r",
|
||||
"AccuracyMetric: acc=0.732798\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\r",
|
||||
"Evaluate data in 0.48 seconds!\n",
|
||||
"\r",
|
||||
"Evaluation on dev at Epoch 10/10. Step:3080/3080: \n",
|
||||
"\r",
|
||||
"AccuracyMetric: acc=0.740826\n",
|
||||
"\n",
|
||||
"\r\n",
|
||||
"In Epoch:3/Step:924, got best dev performance:\n",
|
||||
"AccuracyMetric: acc=0.758028\n",
|
||||
"Reloaded the best model.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'best_eval': {'AccuracyMetric': {'acc': 0.758028}},\n",
|
||||
" 'best_epoch': 3,\n",
|
||||
" 'best_step': 924,\n",
|
||||
" 'seconds': 160.58}"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from fastNLP import Trainer\n",
|
||||
"\n",
|
||||
"#训练的轮数和batch size\n",
|
||||
"N_EPOCHS = 10\n",
|
||||
"BATCH_SIZE = 16\n",
|
||||
"\n",
|
||||
"#如果在定义trainer的时候没有传入optimizer参数,模型默认的优化器为torch.optim.Adam且learning rate为lr=4e-3\n",
|
||||
"#这里只使用了loss作为损失函数输入,感兴趣可以尝试其他损失函数(如之前自定义的loss_func)作为输入\n",
|
||||
"trainer = Trainer(model=model_cnn, train_data=train_data, dev_data=dev_data, loss=loss, metrics=metrics,\n",
|
||||
"optimizer=optimizer,n_epochs=N_EPOCHS, batch_size=BATCH_SIZE)\n",
|
||||
"trainer.train()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=5.0), HTML(value='')), layout=Layout(disp…"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\r",
|
||||
"Evaluate data in 0.43 seconds!\n",
|
||||
"[tester] \n",
|
||||
"AccuracyMetric: acc=0.773333\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'AccuracyMetric': {'acc': 0.773333}}"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from fastNLP import Tester\n",
|
||||
"\n",
|
||||
"tester = Tester(test_data, model_cnn, metrics=AccuracyMetric())\n",
|
||||
"tester.test()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python Now",
|
||||
"language": "python",
|
||||
"name": "now"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
681
tutorials/tutorial_6_datasetiter.ipynb
Normal file
681
tutorials/tutorial_6_datasetiter.ipynb
Normal file
@ -0,0 +1,681 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# 使用Trainer和Tester快速训练和测试"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 数据读入和处理"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/remote-home/ynzheng/anaconda3/envs/now/lib/python3.8/site-packages/FastNLP-0.5.0-py3.8.egg/fastNLP/io/loader/classification.py:340: UserWarning: SST2's test file has no target.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"In total 3 datasets:\n",
|
||||
"\ttest has 1821 instances.\n",
|
||||
"\ttrain has 67349 instances.\n",
|
||||
"\tdev has 872 instances.\n",
|
||||
"In total 2 vocabs:\n",
|
||||
"\twords has 16292 entries.\n",
|
||||
"\ttarget has 2 entries.\n",
|
||||
"\n",
|
||||
"+-----------------------------------+--------+-----------------------------------+---------+\n",
|
||||
"| raw_words | target | words | seq_len |\n",
|
||||
"+-----------------------------------+--------+-----------------------------------+---------+\n",
|
||||
"| hide new secretions from the p... | 1 | [4110, 97, 12009, 39, 2, 6843,... | 7 |\n",
|
||||
"+-----------------------------------+--------+-----------------------------------+---------+\n",
|
||||
"Vocabulary(['hide', 'new', 'secretions', 'from', 'the']...)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from fastNLP.io import SST2Pipe\n",
|
||||
"\n",
|
||||
"pipe = SST2Pipe()\n",
|
||||
"databundle = pipe.process_from_file()\n",
|
||||
"vocab = databundle.get_vocab('words')\n",
|
||||
"print(databundle)\n",
|
||||
"print(databundle.get_dataset('train')[0])\n",
|
||||
"print(databundle.get_vocab('words'))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"4925 872 75\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"train_data = databundle.get_dataset('train')[:5000]\n",
|
||||
"train_data, test_data = train_data.split(0.015)\n",
|
||||
"dev_data = databundle.get_dataset('dev')\n",
|
||||
"print(len(train_data),len(dev_data),len(test_data))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"+-------------+-----------+--------+-------+---------+\n",
|
||||
"| field_names | raw_words | target | words | seq_len |\n",
|
||||
"+-------------+-----------+--------+-------+---------+\n",
|
||||
"| is_input | False | False | True | True |\n",
|
||||
"| is_target | False | True | False | False |\n",
|
||||
"| ignore_type | | False | False | False |\n",
|
||||
"| pad_value | | 0 | 0 | 0 |\n",
|
||||
"+-------------+-----------+--------+-------+---------+\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"<prettytable.PrettyTable at 0x7f0db03d0640>"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"train_data.print_field_meta()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from fastNLP import AccuracyMetric\n",
|
||||
"from fastNLP import Const\n",
|
||||
"\n",
|
||||
"# metrics=AccuracyMetric() 在本例中与下面这行代码等价\n",
|
||||
"metrics=AccuracyMetric(pred=Const.OUTPUT, target=Const.TARGET)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## DataSetIter初探"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"batch_x: {'words': tensor([[ 13, 830, 7746, 174, 3, 47, 6, 83, 5752, 15,\n",
|
||||
" 2177, 15, 63, 57, 406, 84, 1009, 4973, 27, 17,\n",
|
||||
" 13785, 3, 533, 3687, 15623, 39, 375, 8, 15624, 8,\n",
|
||||
" 1323, 4398, 7],\n",
|
||||
" [ 1045, 11113, 16, 104, 5, 4, 176, 1824, 1704, 3,\n",
|
||||
" 2, 18, 11, 4, 1018, 432, 143, 33, 245, 308,\n",
|
||||
" 7, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
|
||||
" 0, 0, 0]]), 'seq_len': tensor([33, 21])}\n",
|
||||
"batch_y: {'target': tensor([1, 0])}\n",
|
||||
"batch_x: {'words': tensor([[ 14, 10, 4, 311, 5, 154, 1418, 609, 7],\n",
|
||||
" [ 14, 10, 437, 32, 78, 3, 78, 437, 7]]), 'seq_len': tensor([9, 9])}\n",
|
||||
"batch_y: {'target': tensor([0, 1])}\n",
|
||||
"batch_x: {'words': tensor([[ 4, 277, 685, 18, 7],\n",
|
||||
" [15618, 3204, 5, 1675, 0]]), 'seq_len': tensor([5, 4])}\n",
|
||||
"batch_y: {'target': tensor([1, 1])}\n",
|
||||
"batch_x: {'words': tensor([[ 2, 155, 3, 4426, 3, 239, 3, 739, 5, 1136,\n",
|
||||
" 41, 43, 2427, 736, 2, 648, 10, 15620, 2285, 7],\n",
|
||||
" [ 24, 95, 28, 46, 8, 336, 38, 239, 8, 2133,\n",
|
||||
" 2, 18, 10, 15622, 1421, 6, 61, 5, 387, 7]]), 'seq_len': tensor([20, 20])}\n",
|
||||
"batch_y: {'target': tensor([0, 0])}\n",
|
||||
"batch_x: {'words': tensor([[ 879, 96, 8, 1026, 12, 8067, 11, 13623, 8, 15619,\n",
|
||||
" 4, 673, 662, 15, 4, 1154, 240, 639, 417, 7],\n",
|
||||
" [ 45, 752, 327, 180, 10, 15621, 16, 72, 8904, 9,\n",
|
||||
" 1217, 7, 0, 0, 0, 0, 0, 0, 0, 0]]), 'seq_len': tensor([20, 12])}\n",
|
||||
"batch_y: {'target': tensor([0, 1])}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from fastNLP import BucketSampler\n",
|
||||
"from fastNLP import DataSetIter\n",
|
||||
"\n",
|
||||
"tmp_data = dev_data[:10]\n",
|
||||
"# 定义一个Batch,传入DataSet,规定batch_size和去batch的规则。\n",
|
||||
"# 顺序(Sequential),随机(Random),相似长度组成一个batch(Bucket)\n",
|
||||
"sampler = BucketSampler(batch_size=2, seq_len_field_name='seq_len')\n",
|
||||
"batch = DataSetIter(batch_size=2, dataset=tmp_data, sampler=sampler)\n",
|
||||
"for batch_x, batch_y in batch:\n",
|
||||
" print(\"batch_x: \",batch_x)\n",
|
||||
" print(\"batch_y: \", batch_y)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"batch_x: {'words': tensor([[ 13, 830, 7746, 174, 3, 47, 6, 83, 5752, 15,\n",
|
||||
" 2177, 15, 63, 57, 406, 84, 1009, 4973, 27, 17,\n",
|
||||
" 13785, 3, 533, 3687, 15623, 39, 375, 8, 15624, 8,\n",
|
||||
" 1323, 4398, 7],\n",
|
||||
" [ 1045, 11113, 16, 104, 5, 4, 176, 1824, 1704, 3,\n",
|
||||
" 2, 18, 11, 4, 1018, 432, 143, 33, 245, 308,\n",
|
||||
" 7, -1, -1, -1, -1, -1, -1, -1, -1, -1,\n",
|
||||
" -1, -1, -1]]), 'seq_len': tensor([33, 21])}\n",
|
||||
"batch_y: {'target': tensor([1, 0])}\n",
|
||||
"batch_x: {'words': tensor([[ 14, 10, 4, 311, 5, 154, 1418, 609, 7],\n",
|
||||
" [ 14, 10, 437, 32, 78, 3, 78, 437, 7]]), 'seq_len': tensor([9, 9])}\n",
|
||||
"batch_y: {'target': tensor([0, 1])}\n",
|
||||
"batch_x: {'words': tensor([[ 2, 155, 3, 4426, 3, 239, 3, 739, 5, 1136,\n",
|
||||
" 41, 43, 2427, 736, 2, 648, 10, 15620, 2285, 7],\n",
|
||||
" [ 24, 95, 28, 46, 8, 336, 38, 239, 8, 2133,\n",
|
||||
" 2, 18, 10, 15622, 1421, 6, 61, 5, 387, 7]]), 'seq_len': tensor([20, 20])}\n",
|
||||
"batch_y: {'target': tensor([0, 0])}\n",
|
||||
"batch_x: {'words': tensor([[ 4, 277, 685, 18, 7],\n",
|
||||
" [15618, 3204, 5, 1675, -1]]), 'seq_len': tensor([5, 4])}\n",
|
||||
"batch_y: {'target': tensor([1, 1])}\n",
|
||||
"batch_x: {'words': tensor([[ 879, 96, 8, 1026, 12, 8067, 11, 13623, 8, 15619,\n",
|
||||
" 4, 673, 662, 15, 4, 1154, 240, 639, 417, 7],\n",
|
||||
" [ 45, 752, 327, 180, 10, 15621, 16, 72, 8904, 9,\n",
|
||||
" 1217, 7, -1, -1, -1, -1, -1, -1, -1, -1]]), 'seq_len': tensor([20, 12])}\n",
|
||||
"batch_y: {'target': tensor([0, 1])}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tmp_data.set_pad_val('words',-1)\n",
|
||||
"batch = DataSetIter(batch_size=2, dataset=tmp_data, sampler=sampler)\n",
|
||||
"for batch_x, batch_y in batch:\n",
|
||||
" print(\"batch_x: \",batch_x)\n",
|
||||
" print(\"batch_y: \", batch_y)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"batch_x: {'words': tensor([[ 45, 752, 327, 180, 10, 15621, 16, 72, 8904, 9,\n",
|
||||
" 1217, 7, 0, 0, 0, 0, 0, 0, 0, 0,\n",
|
||||
" 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
|
||||
" 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
|
||||
" [ 879, 96, 8, 1026, 12, 8067, 11, 13623, 8, 15619,\n",
|
||||
" 4, 673, 662, 15, 4, 1154, 240, 639, 417, 7,\n",
|
||||
" 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
|
||||
" 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]), 'seq_len': tensor([12, 20])}\n",
|
||||
"batch_y: {'target': tensor([1, 0])}\n",
|
||||
"batch_x: {'words': tensor([[ 13, 830, 7746, 174, 3, 47, 6, 83, 5752, 15,\n",
|
||||
" 2177, 15, 63, 57, 406, 84, 1009, 4973, 27, 17,\n",
|
||||
" 13785, 3, 533, 3687, 15623, 39, 375, 8, 15624, 8,\n",
|
||||
" 1323, 4398, 7, 0, 0, 0, 0, 0, 0, 0],\n",
|
||||
" [ 1045, 11113, 16, 104, 5, 4, 176, 1824, 1704, 3,\n",
|
||||
" 2, 18, 11, 4, 1018, 432, 143, 33, 245, 308,\n",
|
||||
" 7, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
|
||||
" 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]), 'seq_len': tensor([33, 21])}\n",
|
||||
"batch_y: {'target': tensor([1, 0])}\n",
|
||||
"batch_x: {'words': tensor([[ 14, 10, 4, 311, 5, 154, 1418, 609, 7, 0, 0, 0,\n",
|
||||
" 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
|
||||
" 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
|
||||
" 0, 0, 0, 0],\n",
|
||||
" [ 14, 10, 437, 32, 78, 3, 78, 437, 7, 0, 0, 0,\n",
|
||||
" 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
|
||||
" 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
|
||||
" 0, 0, 0, 0]]), 'seq_len': tensor([9, 9])}\n",
|
||||
"batch_y: {'target': tensor([0, 1])}\n",
|
||||
"batch_x: {'words': tensor([[ 2, 155, 3, 4426, 3, 239, 3, 739, 5, 1136,\n",
|
||||
" 41, 43, 2427, 736, 2, 648, 10, 15620, 2285, 7,\n",
|
||||
" 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
|
||||
" 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
|
||||
" [ 24, 95, 28, 46, 8, 336, 38, 239, 8, 2133,\n",
|
||||
" 2, 18, 10, 15622, 1421, 6, 61, 5, 387, 7,\n",
|
||||
" 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
|
||||
" 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]), 'seq_len': tensor([20, 20])}\n",
|
||||
"batch_y: {'target': tensor([0, 0])}\n",
|
||||
"batch_x: {'words': tensor([[ 4, 277, 685, 18, 7, 0, 0, 0, 0, 0,\n",
|
||||
" 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
|
||||
" 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
|
||||
" 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
|
||||
" [15618, 3204, 5, 1675, 0, 0, 0, 0, 0, 0,\n",
|
||||
" 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
|
||||
" 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
|
||||
" 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]), 'seq_len': tensor([5, 4])}\n",
|
||||
"batch_y: {'target': tensor([1, 1])}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from fastNLP.core.field import Padder\n",
|
||||
"import numpy as np\n",
|
||||
"class FixLengthPadder(Padder):\n",
|
||||
" def __init__(self, pad_val=0, length=None):\n",
|
||||
" super().__init__(pad_val=pad_val)\n",
|
||||
" self.length = length\n",
|
||||
" assert self.length is not None, \"Creating FixLengthPadder with no specific length!\"\n",
|
||||
"\n",
|
||||
" def __call__(self, contents, field_name, field_ele_dtype, dim):\n",
|
||||
" #计算当前contents中的最大长度\n",
|
||||
" max_len = max(map(len, contents))\n",
|
||||
" #如果当前contents中的最大长度大于指定的padder length的话就报错\n",
|
||||
" assert max_len <= self.length, \"Fixed padder length smaller than actual length! with length {}\".format(max_len)\n",
|
||||
" array = np.full((len(contents), self.length), self.pad_val, dtype=field_ele_dtype)\n",
|
||||
" for i, content_i in enumerate(contents):\n",
|
||||
" array[i, :len(content_i)] = content_i\n",
|
||||
" return array\n",
|
||||
"\n",
|
||||
"#设定FixLengthPadder的固定长度为40\n",
|
||||
"tmp_padder = FixLengthPadder(pad_val=0,length=40)\n",
|
||||
"#利用dataset的set_padder函数设定words field的padder\n",
|
||||
"tmp_data.set_padder('words',tmp_padder)\n",
|
||||
"batch = DataSetIter(batch_size=2, dataset=tmp_data, sampler=sampler)\n",
|
||||
"for batch_x, batch_y in batch:\n",
|
||||
" print(\"batch_x: \",batch_x)\n",
|
||||
" print(\"batch_y: \", batch_y)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 使用DataSetIter自己编写训练过程\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"-----start training-----\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\r",
|
||||
"Evaluate data in 2.68 seconds!\n",
|
||||
"Epoch 0 Avg Loss: 0.66 AccuracyMetric: acc=0.708716 29307ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\r",
|
||||
"Evaluate data in 0.38 seconds!\n",
|
||||
"Epoch 1 Avg Loss: 0.41 AccuracyMetric: acc=0.770642 52200ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\r",
|
||||
"Evaluate data in 0.51 seconds!\n",
|
||||
"Epoch 2 Avg Loss: 0.16 AccuracyMetric: acc=0.747706 70268ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\r",
|
||||
"Evaluate data in 0.96 seconds!\n",
|
||||
"Epoch 3 Avg Loss: 0.06 AccuracyMetric: acc=0.741972 90349ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\r",
|
||||
"Evaluate data in 1.04 seconds!\n",
|
||||
"Epoch 4 Avg Loss: 0.03 AccuracyMetric: acc=0.740826 114250ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\r",
|
||||
"Evaluate data in 0.8 seconds!\n",
|
||||
"Epoch 5 Avg Loss: 0.02 AccuracyMetric: acc=0.738532 134742ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\r",
|
||||
"Evaluate data in 0.65 seconds!\n",
|
||||
"Epoch 6 Avg Loss: 0.01 AccuracyMetric: acc=0.731651 154503ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\r",
|
||||
"Evaluate data in 0.8 seconds!\n",
|
||||
"Epoch 7 Avg Loss: 0.01 AccuracyMetric: acc=0.738532 175397ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\r",
|
||||
"Evaluate data in 0.36 seconds!\n",
|
||||
"Epoch 8 Avg Loss: 0.01 AccuracyMetric: acc=0.733945 192384ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\r",
|
||||
"Evaluate data in 0.84 seconds!\n",
|
||||
"Epoch 9 Avg Loss: 0.01 AccuracyMetric: acc=0.744266 214417ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=5.0), HTML(value='')), layout=Layout(disp…"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\r",
|
||||
"Evaluate data in 0.04 seconds!\n",
|
||||
"[tester] \n",
|
||||
"AccuracyMetric: acc=0.786667\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'AccuracyMetric': {'acc': 0.786667}}"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from fastNLP import BucketSampler\n",
|
||||
"from fastNLP import DataSetIter\n",
|
||||
"from fastNLP.models import CNNText\n",
|
||||
"from fastNLP import Tester\n",
|
||||
"import torch\n",
|
||||
"import time\n",
|
||||
"\n",
|
||||
"embed_dim = 100\n",
|
||||
"model = CNNText((len(vocab),embed_dim), num_classes=2, dropout=0.1)\n",
|
||||
"\n",
|
||||
"def train(epoch, data, devdata):\n",
|
||||
" optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n",
|
||||
" lossfunc = torch.nn.CrossEntropyLoss()\n",
|
||||
" batch_size = 32\n",
|
||||
"\n",
|
||||
" # 定义一个Batch,传入DataSet,规定batch_size和去batch的规则。\n",
|
||||
" # 顺序(Sequential),随机(Random),相似长度组成一个batch(Bucket)\n",
|
||||
" train_sampler = BucketSampler(batch_size=batch_size, seq_len_field_name='seq_len')\n",
|
||||
" train_batch = DataSetIter(batch_size=batch_size, dataset=data, sampler=train_sampler)\n",
|
||||
"\n",
|
||||
" start_time = time.time()\n",
|
||||
" print(\"-\"*5+\"start training\"+\"-\"*5)\n",
|
||||
" for i in range(epoch):\n",
|
||||
" loss_list = []\n",
|
||||
" for batch_x, batch_y in train_batch:\n",
|
||||
" optimizer.zero_grad()\n",
|
||||
" output = model(batch_x['words'])\n",
|
||||
" loss = lossfunc(output['pred'], batch_y['target'])\n",
|
||||
" loss.backward()\n",
|
||||
" optimizer.step()\n",
|
||||
" loss_list.append(loss.item())\n",
|
||||
"\n",
|
||||
" #这里verbose如果为0,在调用Tester对象的test()函数时不输出任何信息,返回评估信息; 如果为1,打印出验证结果,返回评估信息\n",
|
||||
" #在调用过Tester对象的test()函数后,调用其_format_eval_results(res)函数,结构化输出验证结果\n",
|
||||
" tester_tmp = Tester(devdata, model, metrics=AccuracyMetric(), verbose=0)\n",
|
||||
" res=tester_tmp.test()\n",
|
||||
"\n",
|
||||
" print('Epoch {:d} Avg Loss: {:.2f}'.format(i, sum(loss_list) / len(loss_list)),end=\" \")\n",
|
||||
" print(tester_tmp._format_eval_results(res),end=\" \")\n",
|
||||
" print('{:d}ms'.format(round((time.time()-start_time)*1000)))\n",
|
||||
" loss_list.clear()\n",
|
||||
"\n",
|
||||
"train(10, train_data, dev_data)\n",
|
||||
"#使用tester进行快速测试\n",
|
||||
"tester = Tester(test_data, model, metrics=AccuracyMetric())\n",
|
||||
"tester.test()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python Now",
|
||||
"language": "python",
|
||||
"name": "now"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
Loading…
Reference in New Issue
Block a user