Resume training from the last checkpoint

!onmt_preprocess -train_src "/content/drive/My Drive/Bachelor/baseline-1M-enfr/baseline-1M_train.en" -train_tgt "/content/drive/My Drive/Bachelor/baseline-1M-enfr/baseline-1M_train.fr" -valid_src "/content/drive/My Drive/Bachelor/baseline-1M-enfr/baseline-1M_test.en" -valid_tgt "/content/drive/My Drive/Bachelor/baseline-1M-enfr/baseline-1M_test.fr" -save_data "/content/drive/My Drive/Bachelor/baseline-1M-enfr/data" -src_vocab_size 10000 -tgt_vocab_size 10000
import torch.nn as nn
import torch
import onmt
import onmt.inputters
import onmt.modules
import onmt.utils

import logging
logging.basicConfig(level=logging.NOTSET)

"""
We begin by loading in the vocabulary for the model of interest. 
This will let us check vocab size and to get the special ids for padding
"""
vocab_fields = torch.load("/content/drive/My Drive/Bachelor/baseline-1M-enfr/data.vocab.pt") #vocab field 만들기

src_text_field = vocab_fields["src"].base_field #src field 만들기
src_vocab = src_text_field.vocab
src_padding = src_vocab.stoi[src_text_field.pad_token] #Padding

tgt_text_field = vocab_fields['tgt'].base_field #tgt field 만들기
tgt_vocab = tgt_text_field.vocab
tgt_padding = tgt_vocab.stoi[tgt_text_field.pad_token] #Padding

"""
Next we specify the core model itself. 
Here we will build a small model with an encoder and an attention based input feeding decoder. 
Both models will be RNNs and the encoder will be bidirectional
"""

emb_size = 100
rnn_size = 500
# Specify the core model.

encoder_embeddings = onmt.modules.Embeddings(emb_size, len(src_vocab),
                                             word_padding_idx=src_padding ) #src 임베딩

encoder = onmt.encoders.RNNEncoder(hidden_size=rnn_size, num_layers=1,
                                   rnn_type="LSTM", bidirectional=True,
                                   embeddings=encoder_embeddings) #인코더 

decoder_embeddings = onmt.modules.Embeddings(emb_size, len(tgt_vocab),
                                             word_padding_idx=tgt_padding) #tgt 임베딩

decoder = onmt.decoders.decoder.InputFeedRNNDecoder(
    hidden_size=rnn_size, num_layers=1, bidirectional_encoder=True, 
    rnn_type="LSTM", embeddings=decoder_embeddings) #Decoder 

device = "cuda" if torch.cuda.is_available() else "cpu" #Device 설정 


model = onmt.models.model.NMTModel(encoder, decoder) #모델 생성 
model.to(device)#디바이스 올리기 

# modelSaver = onmt.models.ModelSaver()

# Specify the tgt word generator and loss computation module
model.generator = nn.Sequential(
    nn.Linear(rnn_size, len(tgt_vocab)),
    nn.LogSoftmax(dim=-1)).to(device) #제너레이터 부분 

loss = onmt.utils.loss.NMTLossCompute(
    criterion=nn.NLLLoss(ignore_index=tgt_padding, reduction="sum"),
    generator=model.generator) #Loss 설정

"""
Now we set up the optimizer. 
Our wrapper around a core torch optim class handles learning rate updates and gradient normalization automatically.
"""
lr = 1
torch_optimizer = torch.optim.SGD(model.parameters(), lr=lr) #옵티마이져
optim = onmt.utils.optimizers.Optimizer(
    torch_optimizer, learning_rate=lr, max_grad_norm=2) #옵티마이저

"""
Now we load the data from disk with the associated vocab fields. 
To iterate through the data itself we use a wrapper around a torchtext iterator class. 
We specify one for both the training and test data.
"""

# Load some data
from itertools import chain

train_data_file0 = "/content/drive/My Drive/Bachelor/baseline-1M-enfr/data.train.0.pt" #학습데이터
train_data_file1 = "/content/drive/My Drive/Bachelor/baseline-1M-enfr/data.train.1.pt" #학습데이터
train_data_file2 = "/content/drive/My Drive/Bachelor/baseline-1M-enfr/data.train.2.pt" #학습데이터
train_data_file3 = "/content/drive/My Drive/Bachelor/baseline-1M-enfr/data.train.3.pt" #학습데이터
train_data_file4 = "/content/drive/My Drive/Bachelor/baseline-1M-enfr/data.train.4.pt" #학습데이터
train_data_file5 = "/content/drive/My Drive/Bachelor/baseline-1M-enfr/data.train.5.pt" #학습데이터
train_data_file6 = "/content/drive/My Drive/Bachelor/baseline-1M-enfr/data.train.6.pt" #학습데이터
train_data_file7 = "/content/drive/My Drive/Bachelor/baseline-1M-enfr/data.train.7.pt" #학습데이터
train_data_file8 = "/content/drive/My Drive/Bachelor/baseline-1M-enfr/data.train.8.pt" #학습데이터
train_data_file9 = "/content/drive/My Drive/Bachelor/baseline-1M-enfr/data.train.9.pt" #학습데이터
train_data_file10 = "/content/drive/My Drive/Bachelor/baseline-1M-enfr/data.train.10.pt" #학습데이터
train_data_file11 = "/content/drive/My Drive/Bachelor/baseline-1M-enfr/data.train.11.pt" #학습데이터
train_data_file12 = "/content/drive/My Drive/Bachelor/baseline-1M-enfr/data.train.12.pt" #학습데이터
train_data_file13 = "/content/drive/My Drive/Bachelor/baseline-1M-enfr/data.train.13.pt" #학습데이터
train_data_file14 = "/content/drive/My Drive/Bachelor/baseline-1M-enfr/data.train.14.pt" #학습데이터
train_data_file15 = "/content/drive/My Drive/Bachelor/baseline-1M-enfr/data.train.15.pt" #학습데이터
train_data_file16 = "/content/drive/My Drive/Bachelor/baseline-1M-enfr/data.train.16.pt" #학습데이터
train_data_file17 = "/content/drive/My Drive/Bachelor/baseline-1M-enfr/data.train.17.pt" #학습데이터
valid_data_file = "/content/drive/My Drive/Bachelor/baseline-1M-enfr/data.valid.0.pt" #검증데이터 

train_iter = onmt.inputters.inputter.DatasetLazyIter(dataset_paths=[train_data_file0,
                                                                    train_data_file1,
                                                                    train_data_file2,
                                                                    train_data_file3,
                                                                    train_data_file4,
                                                                    train_data_file5,
                                                                    train_data_file6,
                                                                    train_data_file7,
                                                                    train_data_file8,
                                                                    train_data_file9,
                                                                    train_data_file10,
                                                                    train_data_file11,
                                                                    train_data_file12,
                                                                    train_data_file13,
                                                                    train_data_file14,
                                                                    train_data_file15,
                                                                    train_data_file16,
                                                                    train_data_file17,
                                                                    ],
                                                     fields=vocab_fields,
                                                     batch_size=50,
                                                     batch_size_multiple=1,
                                                     batch_size_fn=None,
                                                     device=device,
                                                     is_train=True,
                                                     repeat=True,
                                                     pool_factor=True#기존 소스 코드 변경
                                                     ) #iterator 만들기

valid_iter = onmt.inputters.inputter.DatasetLazyIter(dataset_paths=[valid_data_file],
                                                     fields=vocab_fields,
                                                     batch_size=10,
                                                     batch_size_multiple=1,
                                                     batch_size_fn=None,
                                                     device=device,
                                                     is_train=False,
                                                     repeat=False,
                                                     pool_factor=True
                                                     )

"""
Finally we train. Keeping track of the output requires a report manager.
"""

report_manager = onmt.utils.ReportMgr(
    report_every=500, start_time=None, tensorboard_writer=None)  #리포트 매니저

model_saver =  onmt.models.ModelSaver("/content/drive/My Drive/Bachelor/baseline-1M-enfr/checkpoint",model=model,optim=optim,keep_checkpoint=2, fields=vocab_fields,model_opt="text")



trainer = onmt.Trainer(model=model,
                       train_loss=loss,
                       valid_loss=loss,
                       model_saver=model_saver,
                       optim=optim,
                       report_manager=report_manager) #트레이너 생성 

trainer.train(train_iter=train_iter,
              train_steps=7000,
              save_checkpoint_steps=1000,
              valid_iter=valid_iter,
              valid_steps=250) #실제 학습진행

I need to restore from the latest checkpoint which is 7000 train step to continue to 11000 train step , how can I do it ?

You can have a look at the model_builder code, when a checkpoint is passed (originating from the -train_from flag):