Training NLP-based Models with NVIDIA#

Defining the Model#

[1]:
from transformers import GPT2Config, GPT2LMHeadModel

config = GPT2Config(
    vocab_size=50257,
    n_positions=16,
    n_embd=512,
    n_layer=4,
    n_head=8,
    embd_pdrop=0.0,
    attn_pdrop=0.0,
    use_cache=False,
)
model = GPT2LMHeadModel(config=config)

Running the Trainer#

[2]:
import os
from archai.trainers.nlp.nvidia_trainer import NvidiaTrainer
from archai.trainers.nlp.nvidia_training_args import NvidiaTrainingArguments

# In this example, we will create a dummy dataset with 3 splits
data_path = "dataroot/textpred/olx_tmp/"
os.makedirs(data_path, exist_ok=True)
with open(data_path + "train.txt", "w") as f:
    f.write("train")
with open(data_path + "valid.txt", "w") as f:
    f.write("valid")
with open(data_path + "test.txt", "w") as f:
    f.write("test")

training_args = NvidiaTrainingArguments(
    "nvidia-gpt2",
    seed=1234,
    no_cuda=True,
    logging_steps=1,
    do_eval=False,
    dataset_name="olx_tmp",
    dataset_dir="./dataroot",
    vocab_type="gpt2",
    vocab_size=None,
    global_batch_size=1,
    seq_len=16,
    strategy="dp",
    max_steps=1,
    optim="adam",
)
trainer = NvidiaTrainer(model=model, args=training_args)

trainer.train()
2023-03-21 15:15:49,613 - archai.datasets.nlp.nvidia_dataset_provider_utils — INFO —  Clearing and rebuilding cache ...
2023-03-21 15:15:49,617 - archai.datasets.nlp.nvidia_dataset_provider_utils — INFO —  Corpus: dataset = olx_tmp | vocab_type = gpt2 | vocab_size = None
2023-03-21 15:15:49,619 - archai.datasets.nlp.nvidia_dataset_provider_utils — INFO —  Training vocabulary ...
2023-03-21 15:15:49,619 - archai.datasets.nlp.tokenizer_utils.bbpe_tokenizer — INFO —  Training tokenizer with size = 50257 at c:\Users\gderosa\Projects\archai\docs\getting_started\notebooks\nlp\dataroot\textpred\olx_tmp\cache\olx_tmp\gpt2\None\vocab\bbpe_tokenizer.json ...
2023-03-21 15:15:49,619 - archai.datasets.nlp.tokenizer_utils.bbpe_tokenizer — INFO —  Training tokenizer ...
2023-03-21 15:15:49,692 - archai.datasets.nlp.tokenizer_utils.bbpe_tokenizer — DEBUG —  Tokenizer length: 264
2023-03-21 15:15:49,700 - archai.datasets.nlp.tokenizer_utils.bbpe_tokenizer — DEBUG —  Tokenizer file path: c:\Users\gderosa\Projects\archai\docs\getting_started\notebooks\nlp\dataroot\textpred\olx_tmp\cache\olx_tmp\gpt2\None\vocab\bbpe_tokenizer.json
2023-03-21 15:15:49,709 - archai.datasets.nlp.nvidia_dataset_provider_utils — INFO —  Vocabulary trained.
2023-03-21 15:15:49,713 - archai.datasets.nlp.tokenizer_utils.tokenizer_base — INFO —  Encoding file: c:\Users\gderosa\Projects\archai\docs\getting_started\notebooks\nlp\dataroot\textpred\olx_tmp\train.txt
2023-03-21 15:15:49,713 - archai.datasets.nlp.tokenizer_utils.tokenizer_base — INFO —  Encoding file: c:\Users\gderosa\Projects\archai\docs\getting_started\notebooks\nlp\dataroot\textpred\olx_tmp\valid.txt
2023-03-21 15:15:49,718 - archai.datasets.nlp.tokenizer_utils.tokenizer_base — INFO —  Encoding file: c:\Users\gderosa\Projects\archai\docs\getting_started\notebooks\nlp\dataroot\textpred\olx_tmp\test.txt
2023-03-21 15:15:49,725 - archai.datasets.nlp.nvidia_dataset_provider_utils — DEBUG —  Size: train = 7 | valid = 7 | test = 6
2023-03-21 15:15:49,741 - archai.trainers.nlp.nvidia_trainer — INFO —  Starting training ...
2023-03-21 15:15:49,747 - archai.trainers.nlp.nvidia_trainer — DEBUG —  Training arguments: {'experiment_name': 'nvidia-gpt2', 'checkpoint_file_path': '', 'output_dir': 'C:\\Users\\gderosa\\logdir\\nvidia-gpt2', 'seed': 1234, 'no_cuda': True, 'logging_steps': 1, 'do_eval': False, 'eval_steps': 100, 'save_all_checkpoints': False, 'dataset_name': 'olx_tmp', 'dataset_dir': 'c:\\Users\\gderosa\\Projects\\archai\\docs\\getting_started\\notebooks\\nlp\\dataroot\\textpred\\olx_tmp', 'dataset_cache_dir': 'c:\\Users\\gderosa\\Projects\\archai\\docs\\getting_started\\notebooks\\nlp\\dataroot\\textpred\\olx_tmp\\cache', 'dataset_refresh_cache': False, 'vocab_type': 'gpt2', 'vocab_size': None, 'iterator_roll': True, 'global_batch_size': 1, 'per_device_global_batch_size': None, 'seq_len': 16, 'strategy': 'dp', 'local_rank': 0, 'find_unused_parameters': False, 'max_steps': 1, 'gradient_accumulation_steps': 1, 'fp16': False, 'optim': 'adam', 'learning_rate': 0.01, 'weight_decay': 0.0, 'momentum': 0.0, 'max_grad_norm': 0.25, 'lr_scheduler_type': 'cosine', 'lr_qat_scheduler_type': 'cosine', 'lr_scheduler_max_steps': None, 'lr_scheduler_warmup_steps': 1000, 'lr_scheduler_patience': 0, 'lr_scheduler_min_lr': 0.001, 'lr_scheduler_decay_rate': 0.5, 'qat': False, 'mixed_qat': False}
2023-03-21 15:15:53,202 - archai.trainers.nlp.nvidia_trainer — INFO —  Epoch: 0 | Step: 1 | Batch: 1 / 1 | LR: 1.000e-05 | ms/batch: 3439.9 | tok/s: 2 | Loss: 10.926 | PPL: 55623.873
2023-03-21 15:15:53,210 - archai.trainers.nlp.nvidia_trainer — INFO —  End of training ...
2023-03-21 15:15:53,210 - archai.trainers.nlp.nvidia_trainer — INFO —  Training time: 3.462 seconds