Commit cf45cfe1 authored by szr712's avatar szr712

支持多卡训练

parent a3eec639
......@@ -79,7 +79,7 @@ class MyIterator(data.Iterator):
if r < p and char in self.yunmus:
new_ex.src[i] = char[:-1]+"0"
self.dataset.examples.append(new_ex)
print("data len:{}".format(len(self.dataset.examples)))
# print("data len:{}".format(len(self.dataset.examples)))
# print("src:{}\ntrg:{}".format(type(ex.src),type(ex.trg)))
if self.sort:
......
......@@ -164,6 +164,7 @@ def create_dataset(opt, SRC, TRG):
opt.trg_pad = TRG.vocab.stoi['<pad>']
opt.train_len = get_len(train_iter)
print("train len:{}".format(opt.train_len))
return train_iter
......
......@@ -42,4 +42,4 @@ CUDA_VISIBLE_DEVICES=2 nohup python train_token_classification.py -src_data data
CUDA_VISIBLE_DEVICES=1 python train_token_classification.py -src_data data/train_file/pinyin_split_random_wo_tones -trg_data data/train_file/hanzi_split_random_wo_tones -epochs 100 -model_name token_classification_split_new -src_voc ./data/voc/pinyin.txt -trg_voc ./data/voc/hanzi.txt
CUDA_VISIBLE_DEVICES=1 python train_token_classification.py -src_data data/train_file/pinyin_split_random_wo_tones -trg_data data/train_file/hanzi_split_random_wo_tones -epochs 100 -model_name token_classification_split_new -src_voc ./data/voc/pinyin.txt -trg_voc ./data/voc/hanzi.txt -gpus 4,5,6,7
CUDA_VISIBLE_DEVICES=5,6,7,8 python train_token_classification.py -src_data data/train_file/pinyin_split_random_wo_tones -trg_data data/train_file/hanzi_split_random_wo_tones -epochs 100 -model_name token_classification_split_new -src_voc ./data/voc/pinyin.txt -trg_voc ./data/voc/hanzi.txt -batchsize 128 -master_batch_size 32
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment