adds both training scripts and evaluation files of topic classification

This commit is contained in:
Michael Beck 2023-08-15 14:19:08 +02:00
parent 90aa58239c
commit 7c6b618272
5 changed files with 769 additions and 83 deletions

View File

@ -0,0 +1,7 @@
epoch,Training Loss,Valid. Loss,Valid. Accur.,Training Time,Validation Time
1,0.6699380816093513,0.6216431430407933,0.6964285714285714,0:01:03,0:00:02
2,0.6649796058024678,0.621175297669002,0.6964285714285714,0:01:03,0:00:01
3,0.642247314964022,0.6377243144171578,0.6964285714285714,0:01:05,0:00:02
4,0.6300328698541436,0.6038827853543418,0.6964285714285714,0:01:04,0:00:02
5,0.544977219509227,0.6619421115943364,0.625,0:01:02,0:00:02
6,0.3951783587357828,0.48477122613361906,0.7857142857142857,0:01:05,0:00:01
1 epoch Training Loss Valid. Loss Valid. Accur. Training Time Validation Time
2 1 0.6699380816093513 0.6216431430407933 0.6964285714285714 0:01:03 0:00:02
3 2 0.6649796058024678 0.621175297669002 0.6964285714285714 0:01:03 0:00:01
4 3 0.642247314964022 0.6377243144171578 0.6964285714285714 0:01:05 0:00:02
5 4 0.6300328698541436 0.6038827853543418 0.6964285714285714 0:01:04 0:00:02
6 5 0.544977219509227 0.6619421115943364 0.625 0:01:02 0:00:02
7 6 0.3951783587357828 0.48477122613361906 0.7857142857142857 0:01:05 0:00:01

View File

@ -0,0 +1,7 @@
epoch,Training Loss,Valid. Loss,Valid. Accur.,Training Time,Validation Time
1,0.5610552686641376,0.4569096086310089,0.9116022099447514,0:37:20,0:00:31
2,0.43647773836513126,0.5441495520680196,0.9005524861878453,0:36:14,0:00:30
3,0.288773139899344,0.43471020716692715,0.9392265193370166,0:36:10,0:00:29
4,0.19330878817686287,0.4555162174395349,0.9281767955801105,0:36:17,0:00:30
5,0.09109889855869348,0.5060150003684702,0.9281767955801105,0:36:13,0:00:30
6,0.05734757932275739,0.6043995772428771,0.9226519337016574,0:36:11,0:00:31
1 epoch Training Loss Valid. Loss Valid. Accur. Training Time Validation Time
2 1 0.5610552686641376 0.4569096086310089 0.9116022099447514 0:37:20 0:00:31
3 2 0.43647773836513126 0.5441495520680196 0.9005524861878453 0:36:14 0:00:30
4 3 0.288773139899344 0.43471020716692715 0.9392265193370166 0:36:10 0:00:29
5 4 0.19330878817686287 0.4555162174395349 0.9281767955801105 0:36:17 0:00:30
6 5 0.09109889855869348 0.5060150003684702 0.9281767955801105 0:36:13 0:00:30
7 6 0.05734757932275739 0.6043995772428771 0.9226519337016574 0:36:11 0:00:31

View File

@ -0,0 +1,7 @@
epoch,Training Loss,Valid. Loss,Valid. Accur.,Training Time,Validation Time
1,0.21681843259712502,0.0005426188472483773,1.0,0:01:13,0:00:02
2,0.00016121647037353423,0.0002873415878639207,1.0,0:01:12,0:00:02
3,6.752021149355535e-05,0.00024319994372490328,1.0,0:01:12,0:00:02
4,4.7950222591787355e-05,0.00022139604243420763,1.0,0:01:13,0:00:02
5,3.99839740138679e-05,0.00021302999493855168,1.0,0:01:11,0:00:02
6,3.5356899656214995e-05,0.00020912183117616223,1.0,0:01:13,0:00:02
1 epoch Training Loss Valid. Loss Valid. Accur. Training Time Validation Time
2 1 0.21681843259712502 0.0005426188472483773 1.0 0:01:13 0:00:02
3 2 0.00016121647037353423 0.0002873415878639207 1.0 0:01:12 0:00:02
4 3 6.752021149355535e-05 0.00024319994372490328 1.0 0:01:12 0:00:02
5 4 4.7950222591787355e-05 0.00022139604243420763 1.0 0:01:13 0:00:02
6 5 3.99839740138679e-05 0.00021302999493855168 1.0 0:01:11 0:00:02
7 6 3.5356899656214995e-05 0.00020912183117616223 1.0 0:01:13 0:00:02

216
train.py
View File

@ -5,15 +5,13 @@ Created on Sat Aug 12 12:25:18 2023
@author: michael @author: michael
""" """
from datasets import load_dataset #from datasets import load_dataset
from transformers import Trainer #from transformers import Trainer
from transformers import AutoModelForSequenceClassification #from transformers import AutoModelForSequenceClassification
from transformers import AutoTokenizer from transformers import AutoTokenizer
import torch import torch
import numpy as np
import os from sklearn.model_selection import train_test_split # pip install scikit-learn
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:512"
import pandas as pd import pandas as pd
@ -41,39 +39,54 @@ di = "data/IN/"
ud = "data/OUT/" ud = "data/OUT/"
# Training CSV dataset # Training CSV dataset
twtCSV = "SenatorsTweets-Training_WORKING-COPY-correct" twtCSV = "SenatorsTweets-Training_WORKING-COPY-correct2"
twtCSVtrainCovClass = "SenatorsTweets-train-CovClassification" twtCSVtrainCovClass = "SenatorsTweets-train-CovClassification"
twtCSVtrainFakeClass = "SenatorsTweets-train-FakeClassification" twtCSVtrainFakeClass = "SenatorsTweets-train-FakeClassification"
statsTrainingTopicClass = "statsTopicClassification-"
# Name of new datafile generated
senCSVprep = "SenatorsTweets-Training_WORKING-COPY-prepared"
# don't change this one # don't change this one
twtCSVPath = wd + ud + twtCSV + ".csv" twtCSVPath = wd + ud + twtCSV + ".csv"
twtCSVtrainCovClassPath = wd + ud + twtCSVtrainCovClass + ".csv" # may be unnecessary twtCSVtrainCovClassPath = wd + ud + twtCSVtrainCovClass + ".csv"
twtCSVtrainFakeClassPath = wd + ud + twtCSVtrainFakeClass + ".csv" # may be unnecessary twtCSVtrainFakeClassPath = wd + ud + twtCSVtrainFakeClass + ".csv"
twtCSVtrainCovClassPathTrain = wd + ud + twtCSVtrainCovClass + "TRAIN.csv" # may be unnecessary statsTrainingTopicClassPath = wd + ud + statsTrainingTopicClass
twtCSVtrainFakeClassPathTrain = wd + ud + twtCSVtrainFakeClass + "TRAIN.csv" # may be unnecessary
twtCSVtrainCovClassPathTrain = wd + ud + twtCSVtrainCovClass + "TRAIN.csv"
twtCSVtrainFakeClassPathTrain = wd + ud + twtCSVtrainFakeClass + "TRAIN.csv"
twtTSVtrainCovClassPathTrain = wd + ud + "cov-train.tsv" twtTSVtrainCovClassPathTrain = wd + ud + "cov-train.tsv"
twtTSVtrainFakeClassPathTrain = wd + ud + "fake-train.tsv" twtTSVtrainFakeClassPathTrain = wd + ud + "fake-train.tsv"
twtTSVtrainCovClassPathEval = wd + ud + "cov-eval.tsv" # may be unnecessary twtTSVtrainCovClassPathEval = wd + ud + "cov-eval.tsv"
twtTSVtrainFakeClassPathEval = wd + ud + "fake-eval.tsv" # may be unnecessary twtTSVtrainFakeClassPathEval = wd + ud + "fake-eval.tsv"
seed = 12355
# Model paths
modCovClassPath = wd + "models/CovClass/"
modFakeClassPath = wd + "models/FakeClass/"
model_name = 'digitalepidemiologylab/covid-twitter-bert-v2' # accuracy 69
#model_name = 'justinqbui/bertweet-covid19-base-uncased-pretraining-covid-vaccine-tweets' #48
#model_name = "cardiffnlp/tweet-topic-latest-multi"
model_name = "bvrau/covid-twitter-bert-v2-struth"
#model_name = "cardiffnlp/roberta-base-tweet-topic-single-all"
model_fake_name = 'bvrau/covid-twitter-bert-v2-struth'
# More models for fake detection:
# https://huggingface.co/justinqbui/bertweet-covid-vaccine-tweets-finetuned
model_name = 'digitalepidemiologylab/covid-twitter-bert-v2'
tokenizer = AutoTokenizer.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name)
max_length = 64 # max token sentence length max_length = 64 # max token sentence length
## #%%
# Create training and testing dataset # Create training and testing dataset
dfTest = pd.read_csv(twtCSVPath, dtype=(object), delimiter=";") dfTest = pd.read_csv(twtCSVPath, dtype=(object), delimiter=";")
dfTest = dfTest[:-800] # remove last 800 rows
dfTest = dfTest.iloc[:,:-3] # remove last 800 rows
dfTest['text'] = dfTest['rawContent'].apply(CleanTweets.preprocess_text) #dfTest = dfTest[:-900] # remove last 800 rows
#dfTest = dfTest.iloc[:,:-3] # remove last 800 rows
dfTest['text'] = dfTest['rawContent'].apply(CleanTweets.preprocess_roberta)
dfTest.drop(columns=['rawContent'], inplace=True) dfTest.drop(columns=['rawContent'], inplace=True)
@ -82,12 +95,13 @@ dfTest['tweet_proc_length'] = [len(text.split(' ')) for text in dfTest['text']]
dfTest['tweet_proc_length'].value_counts() dfTest['tweet_proc_length'].value_counts()
dfTest = dfTest[dfTest['tweet_proc_length']>3] dfTest = dfTest[dfTest['tweet_proc_length']>3]
dfTest = dfTest.drop_duplicates(subset=['text']) dfTest = dfTest.drop_duplicates(subset=['text'])
dfTest = dfTest.drop(columns=['date', 'Unnamed: 0'])
# Create datasets for each classification # Create datasets for each classification
dfCovClass = dfTest dfCovClass = dfTest
dfCovClass = dfCovClass.drop(columns=['fake', 'date', 'Unnamed: 0']) # fake column not neeeded in covid topic classification data
dfFakeClass = dfTest dfFakeClass = dfTest
dfFakeClass = dfFakeClass.drop(columns=['topicCovid', 'date', 'Unnamed: 0']) # topicCovid column not neeeded in covid topic classification data dfCovClass = dfCovClass.drop(columns=['fake']) # fake column not neeeded in covid topic classification data
dfFakeClass = dfFakeClass[dfFakeClass['topicCovid']=='True'].drop(columns=['topicCovid']) # topicCovid column not neeeded in covid topic classification data
#type_map = {'Covid tweet': 'covid tweets', 'Noncovid tweet': 'noncovid tweet'} #type_map = {'Covid tweet': 'covid tweets', 'Noncovid tweet': 'noncovid tweet'}
dfCovClass.rename(index = str, columns={'topicCovid': 'labels', 'tid': 'id'}, inplace = True) dfCovClass.rename(index = str, columns={'topicCovid': 'labels', 'tid': 'id'}, inplace = True)
@ -97,10 +111,12 @@ dfCovClass.labels = dfCovClass.labels.replace({"True": 'Covid', "False": 'NonCov
dfFakeClass.rename(index = str, columns={'fake': 'labels', 'tid': 'id'}, inplace = True) dfFakeClass.rename(index = str, columns={'fake': 'labels', 'tid': 'id'}, inplace = True)
dfFakeClass.labels = dfFakeClass.labels.replace({"True": 'Fake', "False": 'True'}) dfFakeClass.labels = dfFakeClass.labels.replace({"True": 'Fake', "False": 'True'})
## #%%
# Tokenize tweets # Tokenize tweets
#dfCovClass['input_ids'] = dfCovClass['text'].apply(lambda x: tokenizer(x, max_length=max_length, padding="max_length",)['input_ids']) dfCovClass = dfCovClass[dfCovClass['labels'].notna()]
#dfFakeClass['input_ids'] = dfFakeClass['text'].apply(lambda x: tokenizer(x, max_length=max_length, padding="max_length",)['input_ids']) dfFakeClass = dfFakeClass[dfFakeClass['labels'].notna()]
dfCovClass['input_ids'] = dfCovClass['text'].apply(lambda x: tokenizer(x, max_length=max_length, padding="max_length",)['input_ids'])
dfFakeClass['input_ids'] = dfFakeClass['text'].apply(lambda x: tokenizer(x, max_length=max_length, padding="max_length",)['input_ids'])
def encode_labels(label): def encode_labels(label):
if label == 'Covid': if label == 'Covid':
@ -115,45 +131,80 @@ def encode_labels(label):
dfCovClass['labels_encoded'] = dfCovClass['labels'].apply(encode_labels) dfCovClass['labels_encoded'] = dfCovClass['labels'].apply(encode_labels)
dfFakeClass['labels_encoded'] = dfFakeClass['labels'].apply(encode_labels) dfFakeClass['labels_encoded'] = dfFakeClass['labels'].apply(encode_labels)
# get n of classes
print("# of Non-Covid tweets (coded 0):")
print(dfCovClass.groupby('labels_encoded', group_keys=False)['id'].nunique())
# 62 non-covid tweets, disproportionate sample for training has to be 124 tweets
#save dfs as csvs print("# of Fake-news tweets (coded 1):")
dfCovClass = dfCovClass.drop(columns=['tweet_proc_length']) print(dfFakeClass.groupby('labels_encoded', group_keys=False)['id'].nunique())
dfCovClass[200:1000].reset_index(drop=True).to_csv(twtCSVtrainCovClassPathTrain, encoding='utf-8', sep=";")
dfCovClass[200:1000].reset_index(drop=True).to_csv(twtTSVtrainCovClassPathTrain, encoding='utf-8', sep="\t")
dfCovClass[0:199].reset_index(drop=True).to_csv(twtCSVtrainCovClassPath, encoding='utf-8', sep=";")
dfCovClass[0:199].reset_index(drop=True).to_csv(twtTSVtrainCovClassPathEval, encoding='utf-8', sep="\t")
dfFakeClass = dfFakeClass.drop(columns=['tweet_proc_length'])
dfFakeClass[200:1000].reset_index(drop=True).to_csv(twtCSVtrainFakeClassPathTrain, encoding='utf-8', sep=";")
dfFakeClass[200:1000].reset_index(drop=True).to_csv(twtTSVtrainFakeClassPathTrain, encoding='utf-8', sep="\t")
dfFakeClass[0:199].reset_index(drop=True).to_csv(twtCSVtrainFakeClassPath, encoding='utf-8', sep=";")
dfFakeClass[0:199].reset_index(drop=True).to_csv(twtTSVtrainFakeClassPathEval, encoding='utf-8', sep="\t")
## # create disproportionate sample - 50/50 of both
#dfCovClass.groupby('labels_encoded', group_keys=False)['id'].nunique()
#dfCovClass = dfCovClass.groupby('labels_encoded', group_keys=False).apply(lambda x: x.sample(164, random_state=seed))
# after a lot of tests, it seems that a sample in which non-fake news tweets are overrepresented leads to better results.
# because of this, performance limitations and time constraints, group 1 (covid topic) will be overrepresented (twice as many), which still doesn't reflect the real preoportions ~10/1
'''dfCovClassa = dfCovClass.groupby('labels_encoded', group_keys=False).get_group(1).sample(frac=1, replace=True).reset_index()
dfCovClassb = dfCovClass.groupby('labels_encoded', group_keys=False).get_group(0).sample(frac=1, replace=True).reset_index()
dfCovClassab= pd.concat([dfCovClassa,dfCovClassb])
dfCovClassab.reset_index(inplace=True)
dfCovClass_train, dfCovClass_test = train_test_split(dfCovClassab, test_size=0.1, random_state=seed, stratify=dfCovClassab['labels_encoded'])
'''
# create training and validation samples
dfCovClass_train, dfCovClass_test = train_test_split(dfCovClass, test_size=0.1, random_state=seed, stratify=dfCovClass['labels_encoded'])
# reset index and drop unnecessary columns
dfCovClass_train.reset_index(drop=True, inplace=True)
dfCovClass_train.drop(inplace=True, columns=['tweet_proc_length'])
dfCovClass_train.groupby('labels_encoded', group_keys=False)['id'].nunique()
dfCovClass_test.reset_index(drop=True, inplace=True)
dfCovClass_test.drop(inplace=True, columns=['tweet_proc_length'])
dfCovClass_test.groupby('labels_encoded', group_keys=False)['id'].nunique()
# save dfs as csvs and tsvs, for training and validation
# covid classification datafiles
# rows 0-41 = noncovid, 42-81 covid, therfore:
#dfCovClass = dfCovClass.drop(columns=['tweet_proc_length'])
#dfCovClass.reset_index(inplace=True, drop=True)
#dfCovClass.loc[np.r_[0:31, 42:71], :].reset_index(drop=True).to_csv(twtCSVtrainCovClassPathTrain, encoding='utf-8', sep=";")
#dfCovClass.loc[np.r_[0:31, 42:72], :].reset_index(drop=True).to_csv(twtTSVtrainCovClassPathTrain, encoding='utf-8', sep="\t")
#dfCovClass.loc[np.r_[31:41, 72:81], :].reset_index(drop=True).to_csv(twtCSVtrainCovClassPath, encoding='utf-8', sep=";")
#dfCovClass.loc[np.r_[31:41, 72:81], :].reset_index(drop=True).to_csv(twtTSVtrainCovClassPathEval, encoding='utf-8', sep="\t")
# fake news classification datafiles
#dfFakeClass = dfFakeClass.drop(columns=['tweet_proc_length'])
#dfFakeClass[200:1000].reset_index(drop=True).to_csv(twtCSVtrainFakeClassPathTrain, encoding='utf-8', sep=";")
#dfFakeClass[200:1000].reset_index(drop=True).to_csv(twtTSVtrainFakeClassPathTrain, encoding='utf-8', sep="\t")
#dfFakeClass[0:199].reset_index(drop=True).to_csv(twtCSVtrainFakeClassPath, encoding='utf-8', sep=";")
#dfFakeClass[0:199].reset_index(drop=True).to_csv(twtTSVtrainFakeClassPathEval, encoding='utf-8', sep="\t")
#%%
# Prepare trainer # Prepare trainer
from transformers import TrainingArguments #from transformers import TrainingArguments
training_args = TrainingArguments( #training_args = TrainingArguments(
# report_to = 'wandb', # report_to = 'wandb',
output_dir=wd+'results', # output directory # output_dir=wd+'results', # output directory/
overwrite_output_dir = True, # overwrite_output_dir = True,
num_train_epochs=3, # total number of training epochs # num_train_epochs=6, # total number of training epochs
per_device_train_batch_size=8, # batch size per device during training # per_device_train_batch_size=8, # batch size per device during training
per_device_eval_batch_size=16, # batch size for evaluation # per_device_eval_batch_size=16, # batch size for evaluation
learning_rate=2e-5, # learning_rate=2e-5,
warmup_steps=1000, # number of warmup steps for learning rate scheduler # warmup_steps=1000, # number of warmup steps for learning rate scheduler
weight_decay=0.01, # strength of weight decay # weight_decay=0.01, # strength of weight decay
logging_dir='./logs3', # directory for storing logs # logging_dir='./logs3', # directory for storing logs
logging_steps=1000, # logging_steps=1000,
evaluation_strategy="epoch", # evaluation_strategy="epoch",
save_strategy="epoch", # save_strategy="epoch",
load_best_model_at_end=True # load_best_model_at_end=True
) #)
tokenizer = AutoTokenizer.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name)
from transformers import BertForSequenceClassification, AdamW, BertConfig from transformers import BertForSequenceClassification, AdamW#, BertConfig
from torch.utils.data import TensorDataset, random_split #from torch.utils.data import TensorDataset, random_split
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
""" """
@ -162,7 +213,7 @@ train_dataset = train_dataset['train']
eval_dataset = load_dataset('csv', data_files={'test': twtCSVtrainCovClassPath}, encoding = "utf-8") eval_dataset = load_dataset('csv', data_files={'test': twtCSVtrainCovClassPath}, encoding = "utf-8")
eval_dataset = eval_dataset['test'] eval_dataset = eval_dataset['test']
""" """
batch_size = 16 batch_size = 1
from torch.utils.data import Dataset from torch.utils.data import Dataset
@ -191,17 +242,14 @@ class PandasDataset(Dataset):
} }
df = pd.read_csv(twtCSVtrainCovClassPathTrain, delimiter=";") train_dataset = PandasDataset(dfCovClass_train, tokenizer, max_length)
train_dataset = PandasDataset(df, tokenizer, max_length)
train_dataloader = DataLoader( train_dataloader = DataLoader(
train_dataset, train_dataset,
sampler=RandomSampler(train_dataset), sampler=RandomSampler(train_dataset),
batch_size=batch_size batch_size=batch_size
) )
eval_dataset = PandasDataset(dfCovClass_test, tokenizer, max_length)
df = pd.read_csv(twtCSVtrainCovClassPath, delimiter=";")
eval_dataset = PandasDataset(df, tokenizer, max_length)
validation_dataloader = DataLoader( validation_dataloader = DataLoader(
eval_dataset, eval_dataset,
sampler=SequentialSampler(eval_dataset), sampler=SequentialSampler(eval_dataset),
@ -215,7 +263,7 @@ for idx, batch in enumerate(train_dataloader):
break break
model = BertForSequenceClassification.from_pretrained( model = BertForSequenceClassification.from_pretrained(
"digitalepidemiologylab/covid-twitter-bert-v2", # Use the 12-layer BERT model, with an uncased vocab. model_name,
num_labels = 2, # The number of output labels--2 for binary classification. num_labels = 2, # The number of output labels--2 for binary classification.
# You can increase this for multi-class tasks. # You can increase this for multi-class tasks.
output_attentions = False, # Whether the model returns attentions weights. output_attentions = False, # Whether the model returns attentions weights.
@ -240,9 +288,8 @@ optimizer = AdamW(model.parameters(),
from transformers import get_linear_schedule_with_warmup from transformers import get_linear_schedule_with_warmup
# Number of training epochs. The BERT authors recommend between 2 and 4. # Number of training epochs. The BERT authors recommend between 2 and 4.
# We chose to run for 4, but we'll see later that this may be over-fitting the # We chose to run for 6
# training data. epochs = 6
epochs = 4
# Total number of training steps is [number of batches] x [number of epochs]. # Total number of training steps is [number of batches] x [number of epochs].
# (Note that this is not the same as the number of training samples). # (Note that this is not the same as the number of training samples).
@ -253,10 +300,6 @@ scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps = 0, # Default value in run_glue.py num_warmup_steps = 0, # Default value in run_glue.py
num_training_steps = total_steps) num_training_steps = total_steps)
import numpy as np
# Function to calculate the accuracy of our predictions vs labels # Function to calculate the accuracy of our predictions vs labels
def flat_accuracy(preds, labels): def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten() pred_flat = np.argmax(preds, axis=1).flatten()
@ -277,7 +320,6 @@ def format_time(elapsed):
return str(datetime.timedelta(seconds=elapsed_rounded)) return str(datetime.timedelta(seconds=elapsed_rounded))
import random import random
import numpy as np
# This training code is based on the `run_glue.py` script here: # This training code is based on the `run_glue.py` script here:
# https://github.com/huggingface/transformers/blob/5bfcd0485ece086ebcbed2d008813037968a9e58/examples/run_glue.py#L128 # https://github.com/huggingface/transformers/blob/5bfcd0485ece086ebcbed2d008813037968a9e58/examples/run_glue.py#L128
@ -307,6 +349,8 @@ np.random.seed(seed_val)
torch.manual_seed(seed_val) torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val) torch.cuda.manual_seed_all(seed_val)
#%%
# Start training
# We'll store a number of quantities such as training and validation loss, # We'll store a number of quantities such as training and validation loss,
# validation accuracy, and timings. # validation accuracy, and timings.
training_stats = [] training_stats = []
@ -324,14 +368,14 @@ for epoch_i in range(0, epochs):
print("") print("")
print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs)) print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))
print('{:>5,} steps per batch will be calculated.'.format(len(train_dataloader)))
print('Training...') print('Training...')
# Measure how long the training epoch takes. # Measure how long the training epoch takes.
t0 = time.time() t0 = time.time()
model.to(device)
# Reset the total loss for this epoch. # Reset the total loss for this epoch.
total_train_loss = 0 total_train_loss = 0
# Put the model into training mode. Don't be mislead--the call to # Put the model into training mode. Don't be mislead--the call to
# `train` just changes the *mode*, it doesn't *perform* the training. # `train` just changes the *mode*, it doesn't *perform* the training.
# `dropout` and `batchnorm` layers behave differently during training # `dropout` and `batchnorm` layers behave differently during training
@ -341,8 +385,8 @@ for epoch_i in range(0, epochs):
# For each batch of training data... # For each batch of training data...
for step, batch in enumerate(train_dataloader): for step, batch in enumerate(train_dataloader):
# Progress update every 40 batches. # Progress update every 10 batches.
if step % 40 == 0 and not step == 0: if step % 10 == 0 and not step == 0:
# Calculate elapsed time in minutes. # Calculate elapsed time in minutes.
elapsed = format_time(time.time() - t0) elapsed = format_time(time.time() - t0)
@ -527,8 +571,12 @@ for p in params[-4:]:
import os import os
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
from datetime import datetime as dt
output_dir = wd + 'model_save/' fTimeFormat = "%Y-%m-%d_%H-%M-%S"
now = dt.now().strftime(fTimeFormat)
output_dir = modCovClassPath + now + "/"
# Create output directory if needed # Create output directory if needed
if not os.path.exists(output_dir): if not os.path.exists(output_dir):
@ -548,16 +596,18 @@ tokenizer.save_pretrained(output_dir)
import pandas as pd import pandas as pd
# Display floats with two decimal places. # Display floats with two decimal places.
pd.set_option('precision', 2) pd.set_option('display.precision', 2)
# Create a DataFrame from our training statistics. # Create a DataFrame from our training statistics.
df_stats = pd.DataFrame(data=training_stats) df_stats = pd.DataFrame(data=training_stats)
# Use the 'epoch' as the row index. # Use the 'epoch' as the row index.# Good practice: save your training arguments together with the trained model
df_stats = df_stats.set_index('epoch') df_stats = df_stats.set_index('epoch')
# A hack to force the column headers to wrap. # A hack to force the column headers to wrap.
#df = df.style.set_table_styles([dict(selector="th",props=[('max-width', '70px')])]) #df = df.style.set_table_styles([dict(selector="th",props=[('max-width', '70px')])])
# Display the table. # Display the table.
df_stats df_stats
df_stats.to_csv(output_dir + now + ".csv")

615
trainFake.py Normal file
View File

@ -0,0 +1,615 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 12 12:25:18 2023
@author: michael
"""
#from datasets import load_dataset
#from transformers import Trainer
#from transformers import AutoModelForSequenceClassification
from transformers import AutoTokenizer
import torch
import numpy as np
from sklearn.model_selection import train_test_split # pip install scikit-learn
import pandas as pd
## Follow these two guides:
# best one https://mccormickml.com/2019/07/22/BERT-fine-tuning/
# https://xiangyutang2.github.io/tweet-classification/
# https://medium.com/mlearning-ai/fine-tuning-bert-for-tweets-classification-ft-hugging-face-8afebadd5dbf
###################
# Setup directories
# WD Michael
wd = "/home/michael/Documents/PS/Data/collectTweets/"
# WD Server
# wd = '/home/yunohost.multimedia/polsoc/Politics & Society/TweetCollection/'
import sys
funs = wd+"funs"
sys.path.insert(1, funs)
import CleanTweets
# datafile input directory
di = "data/IN/"
# Tweet-datafile output directory
ud = "data/OUT/"
# Training CSV dataset
twtCSV = "SenatorsTweets-Training_WORKING-COPY-correct2"
twtCSVtrainCovClass = "SenatorsTweets-train-CovClassification"
twtCSVtrainFakeClass = "SenatorsTweets-train-FakeClassification"
statsTrainingTopicClass = "statsTopicClassification-"
# don't change this one
twtCSVPath = wd + ud + twtCSV + ".csv"
twtCSVtrainCovClassPath = wd + ud + twtCSVtrainCovClass + ".csv"
twtCSVtrainFakeClassPath = wd + ud + twtCSVtrainFakeClass + ".csv"
statsTrainingTopicClassPath = wd + ud + statsTrainingTopicClass
twtCSVtrainCovClassPathTrain = wd + ud + twtCSVtrainCovClass + "TRAIN.csv"
twtCSVtrainFakeClassPathTrain = wd + ud + twtCSVtrainFakeClass + "TRAIN.csv"
twtTSVtrainCovClassPathTrain = wd + ud + "cov-train.tsv"
twtTSVtrainFakeClassPathTrain = wd + ud + "fake-train.tsv"
twtTSVtrainCovClassPathEval = wd + ud + "cov-eval.tsv"
twtTSVtrainFakeClassPathEval = wd + ud + "fake-eval.tsv"
seed = 12355
# Model paths
modCovClassPath = wd + "models/CovClass/"
modFakeClassPath = wd + "models/FakeClass/"
model_name = 'digitalepidemiologylab/covid-twitter-bert-v2' # accuracy 69
#model_name = 'justinqbui/bertweet-covid19-base-uncased-pretraining-covid-vaccine-tweets' #48
#model_name = "cardiffnlp/tweet-topic-latest-multi"
model_name = "bvrau/covid-twitter-bert-v2-struth"
#model_name = "cardiffnlp/roberta-base-tweet-topic-single-all"
model_fake_name = 'bvrau/covid-twitter-bert-v2-struth'
# More models for fake detection:
# https://huggingface.co/justinqbui/bertweet-covid-vaccine-tweets-finetuned
tokenizer = AutoTokenizer.from_pretrained(model_name)
max_length = 64 # max token sentence length
#%%
# Create training and testing dataset
dfTest = pd.read_csv(twtCSVPath, dtype=(object), delimiter=";")
#dfTest = dfTest[:-900] # remove last 800 rows
#dfTest = dfTest.iloc[:,:-3] # remove last 800 rows
dfTest['text'] = dfTest['rawContent'].apply(CleanTweets.preprocess_roberta)
dfTest.drop(columns=['rawContent'], inplace=True)
# Only keep tweets that are longer than 3 words
dfTest['tweet_proc_length'] = [len(text.split(' ')) for text in dfTest['text']]
dfTest['tweet_proc_length'].value_counts()
dfTest = dfTest[dfTest['tweet_proc_length']>3]
dfTest = dfTest.drop_duplicates(subset=['text'])
dfTest = dfTest.drop(columns=['date', 'Unnamed: 0'])
# Create datasets for each classification
dfCovClass = dfTest
dfFakeClass = dfTest
dfCovClass = dfCovClass.drop(columns=['fake']) # fake column not neeeded in covid topic classification data
dfFakeClass = dfFakeClass[dfFakeClass['topicCovid']=='True'].drop(columns=['topicCovid']) # topicCovid column not neeeded in covid topic classification data
#type_map = {'Covid tweet': 'covid tweets', 'Noncovid tweet': 'noncovid tweet'}
dfCovClass.rename(index = str, columns={'topicCovid': 'labels', 'tid': 'id'}, inplace = True)
dfCovClass.labels = dfCovClass.labels.replace({"True": 'Covid', "False": 'NonCovid'})
#type_map = {'fake news tweet': 'fake news tweet', 'non-fake-news-tweet': 'non-fake-news-tweet'}
dfFakeClass.rename(index = str, columns={'fake': 'labels', 'tid': 'id'}, inplace = True)
#%%
# Tokenize tweets
dfCovClass = dfCovClass[dfCovClass['labels'].notna()]
dfFakeClass['labels'].replace({'Check': '','check': '', 'FALSE':''}, inplace=True)
dfFakeClass = dfFakeClass[dfFakeClass['labels'].notna()]
dfCovClass['input_ids'] = dfCovClass['text'].apply(lambda x: tokenizer(x, max_length=max_length, padding="max_length",)['input_ids'])
dfFakeClass['input_ids'] = dfFakeClass['text'].apply(lambda x: tokenizer(x, max_length=max_length, padding="max_length",)['input_ids'])
def encode_labels(label):
if label == 'Covid':
return 1
elif label == 'NonCovid':
return 0
elif label == 'False':
return 1
elif label == 'True':
return 0
return 0
dfCovClass['labels_encoded'] = dfCovClass['labels'].apply(encode_labels)
dfFakeClass['labels_encoded'] = dfFakeClass['labels'].apply(encode_labels)
dfFakeClass = dfFakeClass[dfFakeClass['labels']!=""]
#dfFakeClass = dfFakeClass[(dfFakeClass['labels']=="Fake") | (dfFakeClass['labels']=="True")]
# get n of classes
print("# of Non-Covid tweets (coded 0):")
print(dfCovClass.groupby('labels_encoded', group_keys=False)['id'].nunique())
# 62 non-covid tweets, disproportionate sample for training has to be 124 tweets
print("# of Fake-news tweets (coded 1):")
print(dfFakeClass.groupby('labels_encoded', group_keys=False)['id'].nunique())
# create disproportionate sample - 50/50 of both
#dfCovClass.groupby('labels_encoded', group_keys=False)['id'].nunique()
#dfCovClass = dfCovClass.groupby('labels_encoded', group_keys=False).apply(lambda x: x.sample(164, random_state=seed))
# after a lot of tests, it seems that a sample in which non-fake news tweets are overrepresented leads to better results.
# because of this, performance limitations and time constraints, group 1 (covid topic) will be overrepresented (twice as many), which still doesn't reflect the real preoportions ~10/1
'''dfCovClassa = dfCovClass.groupby('labels_encoded', group_keys=False).get_group(1).sample(frac=1, replace=True).reset_index()
dfCovClassb = dfCovClass.groupby('labels_encoded', group_keys=False).get_group(0).sample(frac=1, replace=True).reset_index()
dfCovClassab= pd.concat([dfCovClassa,dfCovClassb])
dfCovClassab.reset_index(inplace=True)
dfCovClass_train, dfCovClass_test = train_test_split(dfCovClassab, test_size=0.1, random_state=seed, stratify=dfCovClassab['labels_encoded'])
'''
# create training and validation samples
dfFakeClass_train, dfFakeClass_test = train_test_split(dfFakeClass, test_size=0.1, random_state=seed, stratify=dfFakeClass['labels_encoded'])
# reset index and drop unnecessary columns
dfFakeClass_train.reset_index(drop=True, inplace=True)
dfFakeClass_train.drop(inplace=True, columns=['tweet_proc_length'])
dfFakeClass_train.groupby('labels_encoded', group_keys=False)['id'].nunique()
dfFakeClass_test.reset_index(drop=True, inplace=True)
dfFakeClass_test.drop(inplace=True, columns=['tweet_proc_length'])
dfFakeClass_test.groupby('labels_encoded', group_keys=False)['id'].nunique()
# save dfs as csvs and tsvs, for training and validation
# covid classification datafiles
# rows 0-41 = noncovid, 42-81 covid, therfore:
#dfCovClass = dfCovClass.drop(columns=['tweet_proc_length'])
#dfCovClass.reset_index(inplace=True, drop=True)
#dfCovClass.loc[np.r_[0:31, 42:71], :].reset_index(drop=True).to_csv(twtCSVtrainCovClassPathTrain, encoding='utf-8', sep=";")
#dfCovClass.loc[np.r_[0:31, 42:72], :].reset_index(drop=True).to_csv(twtTSVtrainCovClassPathTrain, encoding='utf-8', sep="\t")
#dfCovClass.loc[np.r_[31:41, 72:81], :].reset_index(drop=True).to_csv(twtCSVtrainCovClassPath, encoding='utf-8', sep=";")
#dfCovClass.loc[np.r_[31:41, 72:81], :].reset_index(drop=True).to_csv(twtTSVtrainCovClassPathEval, encoding='utf-8', sep="\t")
# fake news classification datafiles
#dfFakeClass = dfFakeClass.drop(columns=['tweet_proc_length'])
#dfFakeClass[200:1000].reset_index(drop=True).to_csv(twtCSVtrainFakeClassPathTrain, encoding='utf-8', sep=";")
#dfFakeClass[200:1000].reset_index(drop=True).to_csv(twtTSVtrainFakeClassPathTrain, encoding='utf-8', sep="\t")
#dfFakeClass[0:199].reset_index(drop=True).to_csv(twtCSVtrainFakeClassPath, encoding='utf-8', sep=";")
#dfFakeClass[0:199].reset_index(drop=True).to_csv(twtTSVtrainFakeClassPathEval, encoding='utf-8', sep="\t")
#%%
# Prepare trainer
#from transformers import TrainingArguments
#training_args = TrainingArguments(
# report_to = 'wandb',
# output_dir=wd+'results', # output directory/
# overwrite_output_dir = True,
# num_train_epochs=6, # total number of training epochs
# per_device_train_batch_size=8, # batch size per device during training
# per_device_eval_batch_size=16, # batch size for evaluation
# learning_rate=2e-5,
# warmup_steps=1000, # number of warmup steps for learning rate scheduler
# weight_decay=0.01, # strength of weight decay
# logging_dir='./logs3', # directory for storing logs
# logging_steps=1000,
# evaluation_strategy="epoch",
# save_strategy="epoch",
# load_best_model_at_end=True
#)
tokenizer = AutoTokenizer.from_pretrained(model_name)
from transformers import BertForSequenceClassification, AdamW#, BertConfig
#from torch.utils.data import TensorDataset, random_split
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
"""
train_dataset = load_dataset('csv', data_files={'train': twtCSVtrainCovClassPathTrain}, encoding = "utf-8")
train_dataset = train_dataset['train']
eval_dataset = load_dataset('csv', data_files={'test': twtCSVtrainCovClassPath}, encoding = "utf-8")
eval_dataset = eval_dataset['test']
"""
batch_size = 1
from torch.utils.data import Dataset
class PandasDataset(Dataset):
def __init__(self, dataframe, tokenizer, max_length):
self.dataframe = dataframe
self.tokenizer = tokenizer
self.max_length = max_length
def __len__(self):
return len(self.dataframe)
def __getitem__(self, index):
row = self.dataframe.iloc[index]
text = row['text']
labels = row['labels_encoded']
encoded = self.tokenizer(text, max_length=self.max_length, padding="max_length", truncation=True)
input_ids = torch.tensor(encoded['input_ids'])
attention_mask = torch.tensor(encoded['attention_mask'])
return {
'input_ids': input_ids,
'attention_mask': attention_mask,
'labels': torch.tensor(labels) # Assuming labels are already encoded
}
train_dataset = PandasDataset(dfFakeClass_train, tokenizer, max_length)
train_dataloader = DataLoader(
train_dataset,
sampler=RandomSampler(train_dataset),
batch_size=batch_size
)
eval_dataset = PandasDataset(dfFakeClass_test, tokenizer, max_length)
validation_dataloader = DataLoader(
eval_dataset,
sampler=SequentialSampler(eval_dataset),
batch_size=batch_size
)
for idx, batch in enumerate(train_dataloader):
print('Batch index: ', idx)
print('Batch size: ', batch['input_ids'].size()) # Access 'input_ids' field
print('Batch label: ', batch['labels']) # Access 'labels' field
break
model = BertForSequenceClassification.from_pretrained(
model_name,
num_labels = 2, # The number of output labels--2 for binary classification.
# You can increase this for multi-class tasks.
output_attentions = False, # Whether the model returns attentions weights.
output_hidden_states = False, # Whether the model returns all hidden-states.
)
#trainer = Trainer(
# model=model, # the instantiated 🤗 Transformers model to be trained
# args=training_args, # training arguments, defined above
# train_dataset=train_dataset, # training dataset
# eval_dataset=eval_dataset # evaluation dataset
#)
# Note: AdamW is a class from the huggingface library (as opposed to pytorch)
# I believe the 'W' stands for 'Weight Decay fix"
optimizer = AdamW(model.parameters(),
lr = 2e-5, # args.learning_rate - default is 5e-5, our notebook had 2e-5
eps = 1e-8 # args.adam_epsilon - default is 1e-8.
)
from transformers import get_linear_schedule_with_warmup
# Number of training epochs. The BERT authors recommend between 2 and 4.
# We chose to run for 6
epochs = 6
# Total number of training steps is [number of batches] x [number of epochs].
# (Note that this is not the same as the number of training samples).
total_steps = len(train_dataloader) * epochs
# Create the learning rate scheduler.
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps = 0, # Default value in run_glue.py
num_training_steps = total_steps)
# Function to calculate the accuracy of our predictions vs labels
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
import time
import datetime
def format_time(elapsed):
'''
Takes a time in seconds and returns a string hh:mm:ss
'''
# Round to the nearest second.
elapsed_rounded = int(round((elapsed)))
# Format as hh:mm:ss
return str(datetime.timedelta(seconds=elapsed_rounded))
import random
# This training code is based on the `run_glue.py` script here:
# https://github.com/huggingface/transformers/blob/5bfcd0485ece086ebcbed2d008813037968a9e58/examples/run_glue.py#L128
# Set the seed value all over the place to make this reproducible.
seed_val = 12355
# If there's a GPU available...
if torch.cuda.is_available():
# Tell PyTorch to use the GPU.
device = torch.device("cuda")
print('There are %d GPU(s) available.' % torch.cuda.device_count())
print('We will use the GPU:', torch.cuda.get_device_name(0))
#model.cuda()
# If not...
else:
print('No GPU available, using the CPU instead.')
device = torch.device("cpu")
device = torch.device("cpu")
random.seed(seed_val)
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
#%%
# Start training
# We'll store a number of quantities such as training and validation loss,
# validation accuracy, and timings.
training_stats = []
# Measure the total training time for the whole run.
total_t0 = time.time()
# For each epoch...
for epoch_i in range(0, epochs):
# ========================================
# Training
# ========================================
# Perform one full pass over the training set.
print("")
print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))
print('{:>5,} steps per batch will be calculated.'.format(len(train_dataloader)))
print('Training...')
# Measure how long the training epoch takes.
t0 = time.time()
model.to(device)
# Reset the total loss for this epoch.
total_train_loss = 0
# Put the model into training mode. Don't be mislead--the call to
# `train` just changes the *mode*, it doesn't *perform* the training.
# `dropout` and `batchnorm` layers behave differently during training
# vs. test (source: https://stackoverflow.com/questions/51433378/what-does-model-train-do-in-pytorch)
model.train()
# For each batch of training data...
for step, batch in enumerate(train_dataloader):
# Progress update every 10 batches.
if step % 10 == 0 and not step == 0:
# Calculate elapsed time in minutes.
elapsed = format_time(time.time() - t0)
# Report progress.
print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_dataloader), elapsed))
# Unpack this training batch from our dataloader.
#
# As we unpack the batch, we'll also copy each tensor to the GPU using the
# `to` method.
#
# `batch` contains three pytorch tensors:
# [0]: input ids
# [1]: attention masks
# [2]: labels
print("Batch keys:", batch.keys())
b_input_ids = batch['input_ids'].to(device)
b_input_mask = batch['attention_mask'].to(device)
b_labels = batch['labels'].to(device)
# Always clear any previously calculated gradients before performing a
# backward pass. PyTorch doesn't do this automatically because
# accumulating the gradients is "convenient while training RNNs".
# (source: https://stackoverflow.com/questions/48001598/why-do-we-need-to-call-zero-grad-in-pytorch)
model.zero_grad()
# Perform a forward pass (evaluate the model on this training batch).
# The documentation for this `model` function is here:
# https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification
# It returns different numbers of parameters depending on what arguments
# arge given and what flags are set. For our useage here, it returns
# the loss (because we provided labels) and the "logits"--the model
# outputs prior to activation.
output = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels)
loss = output[0]
logits = output[1]
# Accumulate the training loss over all of the batches so that we can
# calculate the average loss at the end. `loss` is a Tensor containing a
# single value; the `.item()` function just returns the Python value
# from the tensor.
total_train_loss += loss.item()
# Perform a backward pass to calculate the gradients.
loss.backward()
# Clip the norm of the gradients to 1.0.
# This is to help prevent the "exploding gradients" problem.
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
# Update parameters and take a step using the computed gradient.
# The optimizer dictates the "update rule"--how the parameters are
# modified based on their gradients, the learning rate, etc.
optimizer.step()
# Update the learning rate.
scheduler.step()
# Calculate the average loss over all of the batches.
avg_train_loss = total_train_loss / len(train_dataloader)
# Measure how long this epoch took.
training_time = format_time(time.time() - t0)
print("")
print(" Average training loss: {0:.2f}".format(avg_train_loss))
print(" Training epcoh took: {:}".format(training_time))
# ========================================
# Validation
# ========================================
# After the completion of each training epoch, measure our performance on
# our validation set.
print("")
print("Running Validation...")
t0 = time.time()
# Put the model in evaluation mode--the dropout layers behave differently
# during evaluation.
model.eval()
# Tracking variables
total_eval_accuracy = 0
total_eval_loss = 0
nb_eval_steps = 0
# Evaluate data for one epoch
for batch in validation_dataloader:
# Unpack this training batch from our dataloader.
#
# As we unpack the batch, we'll also copy each tensor to the GPU using
# the `to` method.
#
# `batch` contains three pytorch tensors:
# [0]: input ids
# [1]: attention masks
# [2]: labels
b_input_ids = batch['input_ids'].to(device)
b_input_mask = batch['attention_mask'].to(device)
b_labels = batch['labels'].to(device)
# Tell pytorch not to bother with constructing the compute graph during
# the forward pass, since this is only needed for backprop (training).
with torch.no_grad():
# Forward pass, calculate logit predictions.
# token_type_ids is the same as the "segment ids", which
# differentiates sentence 1 and 2 in 2-sentence tasks.
# The documentation for this `model` function is here:
# https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification
# Get the "logits" output by the model. The "logits" are the output
# values prior to applying an activation function like the softmax.
output = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels)
loss = output[0]
logits = output[1]
# Accumulate the validation loss.
total_eval_loss += loss.item()
# Move logits and labels to CPU
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
# Calculate the accuracy for this batch of test sentences, and
# accumulate it over all batches.
total_eval_accuracy += flat_accuracy(logits, label_ids)
# Report the final accuracy for this validation run.
avg_val_accuracy = total_eval_accuracy / len(validation_dataloader)
print(" Accuracy: {0:.2f}".format(avg_val_accuracy))
# Calculate the average loss over all of the batches.
avg_val_loss = total_eval_loss / len(validation_dataloader)
# Measure how long the validation run took.
validation_time = format_time(time.time() - t0)
print(" Validation Loss: {0:.2f}".format(avg_val_loss))
print(" Validation took: {:}".format(validation_time))
# Record all statistics from this epoch.
training_stats.append(
{
'epoch': epoch_i + 1,
'Training Loss': avg_train_loss,
'Valid. Loss': avg_val_loss,
'Valid. Accur.': avg_val_accuracy,
'Training Time': training_time,
'Validation Time': validation_time
}
)
print("")
print("Training complete!")
print("Total training took {:} (h:mm:ss)".format(format_time(time.time()-total_t0)))
params = list(model.named_parameters())
print('The BERT model has {:} different named parameters.\n'.format(len(params)))
print('==== Embedding Layer ====\n')
for p in params[0:5]:
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
print('\n==== First Transformer ====\n')
for p in params[5:21]:
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
print('\n==== Output Layer ====\n')
for p in params[-4:]:
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
import os
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
from datetime import datetime as dt
fTimeFormat = "%Y-%m-%d_%H-%M-%S"
now = dt.now().strftime(fTimeFormat)
output_dir = modFakeClassPath + now + "/"
# Create output directory if needed
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print("Saving model to %s" % output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
# torch.save(args, os.path.join(output_dir, 'training_args.bin'))
import pandas as pd
# Display floats with two decimal places.
pd.set_option('display.precision', 2)
# Create a DataFrame from our training statistics.
df_stats = pd.DataFrame(data=training_stats)
# Use the 'epoch' as the row index.# Good practice: save your training arguments together with the trained model
df_stats = df_stats.set_index('epoch')
# A hack to force the column headers to wrap.
#df = df.style.set_table_styles([dict(selector="th",props=[('max-width', '70px')])])
# Display the table.
df_stats
df_stats.to_csv(output_dir + now + ".csv")