-
Notifications
You must be signed in to change notification settings - Fork 51
/
Copy pathtrain.py
138 lines (110 loc) · 6.01 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
import os
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils import data
from model import Net
from data_load import ACE2005Dataset, pad, all_triggers, all_entities, all_postags, all_arguments, tokenizer
from utils import report_to_telegram
from eval import eval
def train(model, iterator, optimizer, criterion):
model.train()
for i, batch in enumerate(iterator):
tokens_x_2d, entities_x_3d, postags_x_2d, triggers_y_2d, arguments_2d, seqlens_1d, head_indexes_2d, words_2d, triggers_2d = batch
optimizer.zero_grad()
trigger_logits, triggers_y_2d, trigger_hat_2d, argument_hidden, argument_keys = model.module.predict_triggers(tokens_x_2d=tokens_x_2d, entities_x_3d=entities_x_3d,
postags_x_2d=postags_x_2d, head_indexes_2d=head_indexes_2d,
triggers_y_2d=triggers_y_2d, arguments_2d=arguments_2d)
trigger_logits = trigger_logits.view(-1, trigger_logits.shape[-1])
trigger_loss = criterion(trigger_logits, triggers_y_2d.view(-1))
if len(argument_keys) > 0:
argument_logits, arguments_y_1d, argument_hat_1d, argument_hat_2d = model.module.predict_arguments(argument_hidden, argument_keys, arguments_2d)
argument_loss = criterion(argument_logits, arguments_y_1d)
loss = trigger_loss + 2 * argument_loss
if i == 0:
print("=====sanity check for arguments======")
print('arguments_y_1d:', arguments_y_1d)
print("arguments_2d[0]:", arguments_2d[0]['events'])
print("argument_hat_2d[0]:", argument_hat_2d[0]['events'])
print("=======================")
else:
loss = trigger_loss
nn.utils.clip_grad_norm_(model.parameters(), 1.0)
loss.backward()
optimizer.step()
if i == 0:
print("=====sanity check======")
print("tokens_x_2d[0]:", tokenizer.convert_ids_to_tokens(tokens_x_2d[0])[:seqlens_1d[0]])
print("entities_x_3d[0]:", entities_x_3d[0][:seqlens_1d[0]])
print("postags_x_2d[0]:", postags_x_2d[0][:seqlens_1d[0]])
print("head_indexes_2d[0]:", head_indexes_2d[0][:seqlens_1d[0]])
print("triggers_2d[0]:", triggers_2d[0])
print("triggers_y_2d[0]:", triggers_y_2d.cpu().numpy().tolist()[0][:seqlens_1d[0]])
print('trigger_hat_2d[0]:', trigger_hat_2d.cpu().numpy().tolist()[0][:seqlens_1d[0]])
print("seqlens_1d[0]:", seqlens_1d[0])
print("arguments_2d[0]:", arguments_2d[0])
print("=======================")
if i % 10 == 0: # monitoring
print("step: {}, loss: {}".format(i, loss.item()))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", type=int, default=24)
parser.add_argument("--lr", type=float, default=0.00002)
parser.add_argument("--n_epochs", type=int, default=50)
parser.add_argument("--logdir", type=str, default="logdir")
parser.add_argument("--trainset", type=str, default="data/train.json")
parser.add_argument("--devset", type=str, default="data/dev.json")
parser.add_argument("--testset", type=str, default="data/test.json")
parser.add_argument("--telegram_bot_token", type=str, default="")
parser.add_argument("--telegram_chat_id", type=str, default="")
hp = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = Net(
device=device,
trigger_size=len(all_triggers),
entity_size=len(all_entities),
all_postags=len(all_postags),
argument_size=len(all_arguments)
)
if device == 'cuda':
model = model.cuda()
model = nn.DataParallel(model)
train_dataset = ACE2005Dataset(hp.trainset)
dev_dataset = ACE2005Dataset(hp.devset)
test_dataset = ACE2005Dataset(hp.testset)
samples_weight = train_dataset.get_samples_weight()
sampler = torch.utils.data.WeightedRandomSampler(samples_weight, len(samples_weight))
train_iter = data.DataLoader(dataset=train_dataset,
batch_size=hp.batch_size,
shuffle=False,
sampler=sampler,
num_workers=4,
collate_fn=pad)
dev_iter = data.DataLoader(dataset=dev_dataset,
batch_size=hp.batch_size,
shuffle=False,
num_workers=4,
collate_fn=pad)
test_iter = data.DataLoader(dataset=test_dataset,
batch_size=hp.batch_size,
shuffle=False,
num_workers=4,
collate_fn=pad)
optimizer = optim.Adam(model.parameters(), lr=hp.lr)
# optimizer = optim.Adadelta(model.parameters(), lr=1.0, weight_decay=1e-2)
criterion = nn.CrossEntropyLoss(ignore_index=0)
if not os.path.exists(hp.logdir):
os.makedirs(hp.logdir)
for epoch in range(1, hp.n_epochs + 1):
train(model, train_iter, optimizer, criterion)
fname = os.path.join(hp.logdir, str(epoch))
print(f"=========eval dev at epoch={epoch}=========")
metric_dev = eval(model, dev_iter, fname + '_dev')
print(f"=========eval test at epoch={epoch}=========")
metric_test = eval(model, test_iter, fname + '_test')
if hp.telegram_bot_token:
report_to_telegram('[epoch {}] dev\n{}'.format(epoch, metric_dev), hp.telegram_bot_token, hp.telegram_chat_id)
report_to_telegram('[epoch {}] test\n{}'.format(epoch, metric_test), hp.telegram_bot_token, hp.telegram_chat_id)
torch.save(model, "latest_model.pt")
# print(f"weights were saved to {fname}.pt")