608 lines
23 KiB
Python
608 lines
23 KiB
Python
#!/usr/bin/env python3
|
|
# -*- coding: utf-8 -*-
|
|
"""
|
|
Created on Sat Aug 12 12:25:18 2023
|
|
|
|
@author: michael
|
|
"""
|
|
#from datasets import load_dataset
|
|
#from transformers import Trainer
|
|
#from transformers import AutoModelForSequenceClassification
|
|
from transformers import AutoTokenizer
|
|
import torch
|
|
import numpy as np
|
|
from sklearn.model_selection import train_test_split # pip install scikit-learn
|
|
|
|
import pandas as pd
|
|
|
|
## Uses snippets from this guide:
|
|
# https://mccormickml.com/2019/07/22/BERT-fine-tuning/
|
|
|
|
###################
|
|
# Setup directories
|
|
# WD Michael
|
|
wd = "/home/michael/Documents/PS/Data/collectTweets/"
|
|
# WD Server
|
|
# wd = '/home/yunohost.multimedia/polsoc/Politics & Society/TweetCollection/'
|
|
|
|
import sys
|
|
funs = wd+"funs"
|
|
sys.path.insert(1, funs)
|
|
import CleanTweets
|
|
|
|
# datafile input directory
|
|
di = "data/IN/"
|
|
|
|
# Tweet-datafile output directory
|
|
ud = "data/OUT/"
|
|
|
|
# Training CSV dataset
|
|
twtCSV = "SenatorsTweets-Training_WORKING-COPY-correct2"
|
|
twtCSVtrainCovClass = "SenatorsTweets-train-CovClassification"
|
|
twtCSVtrainFakeClass = "SenatorsTweets-train-FakeClassification"
|
|
statsTrainingTopicClass = "statsTopicClassification-"
|
|
|
|
# don't change this one
|
|
twtCSVPath = wd + ud + twtCSV + ".csv"
|
|
twtCSVtrainCovClassPath = wd + ud + twtCSVtrainCovClass + ".csv"
|
|
twtCSVtrainFakeClassPath = wd + ud + twtCSVtrainFakeClass + ".csv"
|
|
|
|
statsTrainingTopicClassPath = wd + ud + statsTrainingTopicClass
|
|
|
|
twtCSVtrainCovClassPathTrain = wd + ud + twtCSVtrainCovClass + "TRAIN.csv"
|
|
twtCSVtrainFakeClassPathTrain = wd + ud + twtCSVtrainFakeClass + "TRAIN.csv"
|
|
twtTSVtrainCovClassPathTrain = wd + ud + "cov-train.tsv"
|
|
twtTSVtrainFakeClassPathTrain = wd + ud + "fake-train.tsv"
|
|
|
|
twtTSVtrainCovClassPathEval = wd + ud + "cov-eval.tsv"
|
|
twtTSVtrainFakeClassPathEval = wd + ud + "fake-eval.tsv"
|
|
|
|
seed = 12355
|
|
|
|
# Model paths
|
|
modCovClassPath = wd + "models/CovClass/"
|
|
modFakeClassPath = wd + "models/FakeClass/"
|
|
|
|
model_name = "bvrau/covid-twitter-bert-v2-struth"
|
|
model_fake_name = 'bvrau/covid-twitter-bert-v2-struth'
|
|
|
|
# More models for fake detection:
|
|
# https://huggingface.co/justinqbui/bertweet-covid-vaccine-tweets-finetuned
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
|
|
max_length = 64 # max token sentence length
|
|
|
|
#%%
|
|
# Create training and testing dataset
|
|
dfTest = pd.read_csv(twtCSVPath, dtype=(object), delimiter=";")
|
|
|
|
#dfTest = dfTest[:-900] # remove last 800 rows
|
|
#dfTest = dfTest.iloc[:,:-3] # remove last 800 rows
|
|
|
|
dfTest['text'] = dfTest['rawContent'].apply(CleanTweets.preprocess_roberta)
|
|
|
|
dfTest.drop(columns=['rawContent'], inplace=True)
|
|
|
|
# Only keep tweets that are longer than 3 words
|
|
dfTest['tweet_proc_length'] = [len(text.split(' ')) for text in dfTest['text']]
|
|
dfTest['tweet_proc_length'].value_counts()
|
|
dfTest = dfTest[dfTest['tweet_proc_length']>3]
|
|
dfTest = dfTest.drop_duplicates(subset=['text'])
|
|
dfTest = dfTest.drop(columns=['date', 'Unnamed: 0'])
|
|
|
|
# Create datasets for each classification
|
|
dfCovClass = dfTest
|
|
dfFakeClass = dfTest
|
|
dfCovClass = dfCovClass.drop(columns=['fake']) # fake column not neeeded in covid topic classification data
|
|
dfFakeClass = dfFakeClass[dfFakeClass['topicCovid']=='True'].drop(columns=['topicCovid']) # topicCovid column not neeeded in covid topic classification data
|
|
|
|
#type_map = {'Covid tweet': 'covid tweets', 'Noncovid tweet': 'noncovid tweet'}
|
|
dfCovClass.rename(index = str, columns={'topicCovid': 'labels', 'tid': 'id'}, inplace = True)
|
|
dfCovClass.labels = dfCovClass.labels.replace({"True": 'Covid', "False": 'NonCovid'})
|
|
|
|
#type_map = {'fake news tweet': 'fake news tweet', 'non-fake-news-tweet': 'non-fake-news-tweet'}
|
|
dfFakeClass.rename(index = str, columns={'fake': 'labels', 'tid': 'id'}, inplace = True)
|
|
dfFakeClass.labels = dfFakeClass.labels.replace({"True": 'Fake', "False": 'True'})
|
|
|
|
#%%
|
|
# Tokenize tweets
|
|
dfCovClass = dfCovClass[dfCovClass['labels'].notna()]
|
|
dfFakeClass = dfFakeClass[dfFakeClass['labels'].notna()]
|
|
dfCovClass['input_ids'] = dfCovClass['text'].apply(lambda x: tokenizer(x, max_length=max_length, padding="max_length",)['input_ids'])
|
|
dfFakeClass['input_ids'] = dfFakeClass['text'].apply(lambda x: tokenizer(x, max_length=max_length, padding="max_length",)['input_ids'])
|
|
|
|
def encode_labels(label):
|
|
if label == 'Covid':
|
|
return 1
|
|
elif label == 'NonCovid':
|
|
return 0
|
|
elif label == 'Fake':
|
|
return 1
|
|
elif label == 'True':
|
|
return 0
|
|
return 0
|
|
dfCovClass['labels_encoded'] = dfCovClass['labels'].apply(encode_labels)
|
|
dfFakeClass['labels_encoded'] = dfFakeClass['labels'].apply(encode_labels)
|
|
|
|
# get n of classes
|
|
print("# of Non-Covid tweets (coded 0):")
|
|
print(dfCovClass.groupby('labels_encoded', group_keys=False)['id'].nunique())
|
|
# 62 non-covid tweets, disproportionate sample for training has to be 124 tweets
|
|
|
|
print("# of Fake-news tweets (coded 1):")
|
|
print(dfFakeClass.groupby('labels_encoded', group_keys=False)['id'].nunique())
|
|
|
|
# create disproportionate sample - 50/50 of both
|
|
#dfCovClass.groupby('labels_encoded', group_keys=False)['id'].nunique()
|
|
#dfCovClass = dfCovClass.groupby('labels_encoded', group_keys=False).apply(lambda x: x.sample(164, random_state=seed))
|
|
# after a lot of tests, it seems that a sample in which non-fake news tweets are overrepresented leads to better results.
|
|
# because of this, performance limitations and time constraints, group 1 (covid topic) will be overrepresented (twice as many), which still doesn't reflect the real preoportions ~10/1
|
|
|
|
'''dfCovClassa = dfCovClass.groupby('labels_encoded', group_keys=False).get_group(1).sample(frac=1, replace=True).reset_index()
|
|
dfCovClassb = dfCovClass.groupby('labels_encoded', group_keys=False).get_group(0).sample(frac=1, replace=True).reset_index()
|
|
dfCovClassab= pd.concat([dfCovClassa,dfCovClassb])
|
|
dfCovClassab.reset_index(inplace=True)
|
|
dfCovClass_train, dfCovClass_test = train_test_split(dfCovClassab, test_size=0.1, random_state=seed, stratify=dfCovClassab['labels_encoded'])
|
|
'''
|
|
|
|
# create training and validation samples
|
|
dfCovClass_train, dfCovClass_test = train_test_split(dfCovClass, test_size=0.1, random_state=seed, stratify=dfCovClass['labels_encoded'])
|
|
|
|
# reset index and drop unnecessary columns
|
|
dfCovClass_train.reset_index(drop=True, inplace=True)
|
|
dfCovClass_train.drop(inplace=True, columns=['tweet_proc_length'])
|
|
dfCovClass_train.groupby('labels_encoded', group_keys=False)['id'].nunique()
|
|
|
|
dfCovClass_test.reset_index(drop=True, inplace=True)
|
|
dfCovClass_test.drop(inplace=True, columns=['tweet_proc_length'])
|
|
dfCovClass_test.groupby('labels_encoded', group_keys=False)['id'].nunique()
|
|
|
|
# save dfs as csvs and tsvs, for training and validation
|
|
# covid classification datafiles
|
|
# rows 0-41 = noncovid, 42-81 covid, therfore:
|
|
#dfCovClass = dfCovClass.drop(columns=['tweet_proc_length'])
|
|
#dfCovClass.reset_index(inplace=True, drop=True)
|
|
#dfCovClass.loc[np.r_[0:31, 42:71], :].reset_index(drop=True).to_csv(twtCSVtrainCovClassPathTrain, encoding='utf-8', sep=";")
|
|
#dfCovClass.loc[np.r_[0:31, 42:72], :].reset_index(drop=True).to_csv(twtTSVtrainCovClassPathTrain, encoding='utf-8', sep="\t")
|
|
#dfCovClass.loc[np.r_[31:41, 72:81], :].reset_index(drop=True).to_csv(twtCSVtrainCovClassPath, encoding='utf-8', sep=";")
|
|
#dfCovClass.loc[np.r_[31:41, 72:81], :].reset_index(drop=True).to_csv(twtTSVtrainCovClassPathEval, encoding='utf-8', sep="\t")
|
|
|
|
# fake news classification datafiles
|
|
#dfFakeClass = dfFakeClass.drop(columns=['tweet_proc_length'])
|
|
#dfFakeClass[200:1000].reset_index(drop=True).to_csv(twtCSVtrainFakeClassPathTrain, encoding='utf-8', sep=";")
|
|
#dfFakeClass[200:1000].reset_index(drop=True).to_csv(twtTSVtrainFakeClassPathTrain, encoding='utf-8', sep="\t")
|
|
#dfFakeClass[0:199].reset_index(drop=True).to_csv(twtCSVtrainFakeClassPath, encoding='utf-8', sep=";")
|
|
#dfFakeClass[0:199].reset_index(drop=True).to_csv(twtTSVtrainFakeClassPathEval, encoding='utf-8', sep="\t")
|
|
|
|
#%%
|
|
# Prepare trainer
|
|
#from transformers import TrainingArguments
|
|
|
|
#training_args = TrainingArguments(
|
|
# report_to = 'wandb',
|
|
# output_dir=wd+'results', # output directory/
|
|
# overwrite_output_dir = True,
|
|
# num_train_epochs=6, # total number of training epochs
|
|
# per_device_train_batch_size=8, # batch size per device during training
|
|
# per_device_eval_batch_size=16, # batch size for evaluation
|
|
# learning_rate=2e-5,
|
|
# warmup_steps=1000, # number of warmup steps for learning rate scheduler
|
|
# weight_decay=0.01, # strength of weight decay
|
|
# logging_dir='./logs3', # directory for storing logs
|
|
# logging_steps=1000,
|
|
# evaluation_strategy="epoch",
|
|
# save_strategy="epoch",
|
|
# load_best_model_at_end=True
|
|
#)
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
from transformers import BertForSequenceClassification, AdamW#, BertConfig
|
|
#from torch.utils.data import TensorDataset, random_split
|
|
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
|
|
|
|
"""
|
|
train_dataset = load_dataset('csv', data_files={'train': twtCSVtrainCovClassPathTrain}, encoding = "utf-8")
|
|
train_dataset = train_dataset['train']
|
|
eval_dataset = load_dataset('csv', data_files={'test': twtCSVtrainCovClassPath}, encoding = "utf-8")
|
|
eval_dataset = eval_dataset['test']
|
|
"""
|
|
batch_size = 1
|
|
|
|
from torch.utils.data import Dataset
|
|
|
|
class PandasDataset(Dataset):
|
|
def __init__(self, dataframe, tokenizer, max_length):
|
|
self.dataframe = dataframe
|
|
self.tokenizer = tokenizer
|
|
self.max_length = max_length
|
|
|
|
def __len__(self):
|
|
return len(self.dataframe)
|
|
|
|
def __getitem__(self, index):
|
|
row = self.dataframe.iloc[index]
|
|
text = row['text']
|
|
labels = row['labels_encoded']
|
|
|
|
encoded = self.tokenizer(text, max_length=self.max_length, padding="max_length", truncation=True)
|
|
input_ids = torch.tensor(encoded['input_ids'])
|
|
attention_mask = torch.tensor(encoded['attention_mask'])
|
|
|
|
return {
|
|
'input_ids': input_ids,
|
|
'attention_mask': attention_mask,
|
|
'labels': torch.tensor(labels) # Assuming labels are already encoded
|
|
}
|
|
|
|
|
|
train_dataset = PandasDataset(dfCovClass_train, tokenizer, max_length)
|
|
train_dataloader = DataLoader(
|
|
train_dataset,
|
|
sampler=RandomSampler(train_dataset),
|
|
batch_size=batch_size
|
|
)
|
|
|
|
eval_dataset = PandasDataset(dfCovClass_test, tokenizer, max_length)
|
|
validation_dataloader = DataLoader(
|
|
eval_dataset,
|
|
sampler=SequentialSampler(eval_dataset),
|
|
batch_size=batch_size
|
|
)
|
|
|
|
for idx, batch in enumerate(train_dataloader):
|
|
print('Batch index: ', idx)
|
|
print('Batch size: ', batch['input_ids'].size()) # Access 'input_ids' field
|
|
print('Batch label: ', batch['labels']) # Access 'labels' field
|
|
break
|
|
|
|
model = BertForSequenceClassification.from_pretrained(
|
|
model_name,
|
|
num_labels = 2, # The number of output labels--2 for binary classification.
|
|
# You can increase this for multi-class tasks.
|
|
output_attentions = False, # Whether the model returns attentions weights.
|
|
output_hidden_states = False, # Whether the model returns all hidden-states.
|
|
)
|
|
|
|
#trainer = Trainer(
|
|
# model=model, # the instantiated 🤗 Transformers model to be trained
|
|
# args=training_args, # training arguments, defined above
|
|
# train_dataset=train_dataset, # training dataset
|
|
# eval_dataset=eval_dataset # evaluation dataset
|
|
#)
|
|
|
|
|
|
# Note: AdamW is a class from the huggingface library (as opposed to pytorch)
|
|
# I believe the 'W' stands for 'Weight Decay fix"
|
|
optimizer = AdamW(model.parameters(),
|
|
lr = 2e-5, # args.learning_rate - default is 5e-5, our notebook had 2e-5
|
|
eps = 1e-8 # args.adam_epsilon - default is 1e-8.
|
|
)
|
|
|
|
from transformers import get_linear_schedule_with_warmup
|
|
|
|
# Number of training epochs. The BERT authors recommend between 2 and 4.
|
|
# We chose to run for 6
|
|
epochs = 6
|
|
|
|
# Total number of training steps is [number of batches] x [number of epochs].
|
|
# (Note that this is not the same as the number of training samples).
|
|
total_steps = len(train_dataloader) * epochs
|
|
|
|
# Create the learning rate scheduler.
|
|
scheduler = get_linear_schedule_with_warmup(optimizer,
|
|
num_warmup_steps = 0, # Default value in run_glue.py
|
|
num_training_steps = total_steps)
|
|
|
|
# Function to calculate the accuracy of our predictions vs labels
|
|
def flat_accuracy(preds, labels):
|
|
pred_flat = np.argmax(preds, axis=1).flatten()
|
|
labels_flat = labels.flatten()
|
|
return np.sum(pred_flat == labels_flat) / len(labels_flat)
|
|
|
|
import time
|
|
import datetime
|
|
|
|
def format_time(elapsed):
|
|
'''
|
|
Takes a time in seconds and returns a string hh:mm:ss
|
|
'''
|
|
# Round to the nearest second.
|
|
elapsed_rounded = int(round((elapsed)))
|
|
|
|
# Format as hh:mm:ss
|
|
return str(datetime.timedelta(seconds=elapsed_rounded))
|
|
|
|
import random
|
|
|
|
# This training code is based on the `run_glue.py` script here:
|
|
# https://github.com/huggingface/transformers/blob/5bfcd0485ece086ebcbed2d008813037968a9e58/examples/run_glue.py#L128
|
|
|
|
# Set the seed value all over the place to make this reproducible.
|
|
seed_val = 12355
|
|
|
|
# If there's a GPU available...
|
|
if torch.cuda.is_available():
|
|
|
|
# Tell PyTorch to use the GPU.
|
|
device = torch.device("cuda")
|
|
|
|
print('There are %d GPU(s) available.' % torch.cuda.device_count())
|
|
|
|
print('We will use the GPU:', torch.cuda.get_device_name(0))
|
|
#model.cuda()
|
|
# If not...
|
|
else:
|
|
print('No GPU available, using the CPU instead.')
|
|
device = torch.device("cpu")
|
|
|
|
device = torch.device("cpu")
|
|
|
|
random.seed(seed_val)
|
|
np.random.seed(seed_val)
|
|
torch.manual_seed(seed_val)
|
|
torch.cuda.manual_seed_all(seed_val)
|
|
|
|
#%%
|
|
# Start training
|
|
# We'll store a number of quantities such as training and validation loss,
|
|
# validation accuracy, and timings.
|
|
training_stats = []
|
|
|
|
# Measure the total training time for the whole run.
|
|
total_t0 = time.time()
|
|
|
|
# For each epoch...
|
|
for epoch_i in range(0, epochs):
|
|
# ========================================
|
|
# Training
|
|
# ========================================
|
|
|
|
# Perform one full pass over the training set.
|
|
|
|
print("")
|
|
print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))
|
|
print('{:>5,} steps per batch will be calculated.'.format(len(train_dataloader)))
|
|
print('Training...')
|
|
|
|
# Measure how long the training epoch takes.
|
|
t0 = time.time()
|
|
model.to(device)
|
|
# Reset the total loss for this epoch.
|
|
total_train_loss = 0
|
|
# Put the model into training mode. Don't be mislead--the call to
|
|
# `train` just changes the *mode*, it doesn't *perform* the training.
|
|
# `dropout` and `batchnorm` layers behave differently during training
|
|
# vs. test (source: https://stackoverflow.com/questions/51433378/what-does-model-train-do-in-pytorch)
|
|
model.train()
|
|
|
|
# For each batch of training data...
|
|
for step, batch in enumerate(train_dataloader):
|
|
|
|
# Progress update every 10 batches.
|
|
if step % 10 == 0 and not step == 0:
|
|
# Calculate elapsed time in minutes.
|
|
elapsed = format_time(time.time() - t0)
|
|
|
|
# Report progress.
|
|
print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_dataloader), elapsed))
|
|
|
|
# Unpack this training batch from our dataloader.
|
|
#
|
|
# As we unpack the batch, we'll also copy each tensor to the GPU using the
|
|
# `to` method.
|
|
#
|
|
# `batch` contains three pytorch tensors:
|
|
# [0]: input ids
|
|
# [1]: attention masks
|
|
# [2]: labels
|
|
print("Batch keys:", batch.keys())
|
|
b_input_ids = batch['input_ids'].to(device)
|
|
b_input_mask = batch['attention_mask'].to(device)
|
|
b_labels = batch['labels'].to(device)
|
|
|
|
# Always clear any previously calculated gradients before performing a
|
|
# backward pass. PyTorch doesn't do this automatically because
|
|
# accumulating the gradients is "convenient while training RNNs".
|
|
# (source: https://stackoverflow.com/questions/48001598/why-do-we-need-to-call-zero-grad-in-pytorch)
|
|
model.zero_grad()
|
|
|
|
# Perform a forward pass (evaluate the model on this training batch).
|
|
# The documentation for this `model` function is here:
|
|
# https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification
|
|
# It returns different numbers of parameters depending on what arguments
|
|
# arge given and what flags are set. For our useage here, it returns
|
|
# the loss (because we provided labels) and the "logits"--the model
|
|
# outputs prior to activation.
|
|
output = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels)
|
|
loss = output[0]
|
|
logits = output[1]
|
|
|
|
# Accumulate the training loss over all of the batches so that we can
|
|
# calculate the average loss at the end. `loss` is a Tensor containing a
|
|
# single value; the `.item()` function just returns the Python value
|
|
# from the tensor.
|
|
total_train_loss += loss.item()
|
|
|
|
# Perform a backward pass to calculate the gradients.
|
|
loss.backward()
|
|
|
|
# Clip the norm of the gradients to 1.0.
|
|
# This is to help prevent the "exploding gradients" problem.
|
|
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
|
|
|
|
# Update parameters and take a step using the computed gradient.
|
|
# The optimizer dictates the "update rule"--how the parameters are
|
|
# modified based on their gradients, the learning rate, etc.
|
|
optimizer.step()
|
|
|
|
# Update the learning rate.
|
|
scheduler.step()
|
|
|
|
# Calculate the average loss over all of the batches.
|
|
avg_train_loss = total_train_loss / len(train_dataloader)
|
|
|
|
# Measure how long this epoch took.
|
|
training_time = format_time(time.time() - t0)
|
|
|
|
print("")
|
|
print(" Average training loss: {0:.2f}".format(avg_train_loss))
|
|
print(" Training epcoh took: {:}".format(training_time))
|
|
|
|
# ========================================
|
|
# Validation
|
|
# ========================================
|
|
# After the completion of each training epoch, measure our performance on
|
|
# our validation set.
|
|
|
|
print("")
|
|
print("Running Validation...")
|
|
|
|
t0 = time.time()
|
|
|
|
# Put the model in evaluation mode--the dropout layers behave differently
|
|
# during evaluation.
|
|
model.eval()
|
|
|
|
# Tracking variables
|
|
total_eval_accuracy = 0
|
|
total_eval_loss = 0
|
|
nb_eval_steps = 0
|
|
|
|
# Evaluate data for one epoch
|
|
for batch in validation_dataloader:
|
|
|
|
# Unpack this training batch from our dataloader.
|
|
#
|
|
# As we unpack the batch, we'll also copy each tensor to the GPU using
|
|
# the `to` method.
|
|
#
|
|
# `batch` contains three pytorch tensors:
|
|
# [0]: input ids
|
|
# [1]: attention masks
|
|
# [2]: labels
|
|
b_input_ids = batch['input_ids'].to(device)
|
|
b_input_mask = batch['attention_mask'].to(device)
|
|
b_labels = batch['labels'].to(device)
|
|
|
|
# Tell pytorch not to bother with constructing the compute graph during
|
|
# the forward pass, since this is only needed for backprop (training).
|
|
with torch.no_grad():
|
|
|
|
# Forward pass, calculate logit predictions.
|
|
# token_type_ids is the same as the "segment ids", which
|
|
# differentiates sentence 1 and 2 in 2-sentence tasks.
|
|
# The documentation for this `model` function is here:
|
|
# https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification
|
|
# Get the "logits" output by the model. The "logits" are the output
|
|
# values prior to applying an activation function like the softmax.
|
|
output = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels)
|
|
loss = output[0]
|
|
logits = output[1]
|
|
|
|
# Accumulate the validation loss.
|
|
total_eval_loss += loss.item()
|
|
|
|
# Move logits and labels to CPU
|
|
logits = logits.detach().cpu().numpy()
|
|
label_ids = b_labels.to('cpu').numpy()
|
|
|
|
# Calculate the accuracy for this batch of test sentences, and
|
|
# accumulate it over all batches.
|
|
total_eval_accuracy += flat_accuracy(logits, label_ids)
|
|
|
|
|
|
# Report the final accuracy for this validation run.
|
|
avg_val_accuracy = total_eval_accuracy / len(validation_dataloader)
|
|
print(" Accuracy: {0:.2f}".format(avg_val_accuracy))
|
|
|
|
# Calculate the average loss over all of the batches.
|
|
avg_val_loss = total_eval_loss / len(validation_dataloader)
|
|
|
|
# Measure how long the validation run took.
|
|
validation_time = format_time(time.time() - t0)
|
|
|
|
print(" Validation Loss: {0:.2f}".format(avg_val_loss))
|
|
print(" Validation took: {:}".format(validation_time))
|
|
|
|
# Record all statistics from this epoch.
|
|
training_stats.append(
|
|
{
|
|
'epoch': epoch_i + 1,
|
|
'Training Loss': avg_train_loss,
|
|
'Valid. Loss': avg_val_loss,
|
|
'Valid. Accur.': avg_val_accuracy,
|
|
'Training Time': training_time,
|
|
'Validation Time': validation_time
|
|
}
|
|
)
|
|
|
|
print("")
|
|
print("Training complete!")
|
|
|
|
print("Total training took {:} (h:mm:ss)".format(format_time(time.time()-total_t0)))
|
|
|
|
params = list(model.named_parameters())
|
|
|
|
print('The BERT model has {:} different named parameters.\n'.format(len(params)))
|
|
|
|
print('==== Embedding Layer ====\n')
|
|
|
|
for p in params[0:5]:
|
|
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
|
|
|
|
print('\n==== First Transformer ====\n')
|
|
|
|
for p in params[5:21]:
|
|
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
|
|
|
|
print('\n==== Output Layer ====\n')
|
|
|
|
for p in params[-4:]:
|
|
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
|
|
|
|
|
|
import os
|
|
|
|
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
|
|
from datetime import datetime as dt
|
|
|
|
fTimeFormat = "%Y-%m-%d_%H-%M-%S"
|
|
now = dt.now().strftime(fTimeFormat)
|
|
|
|
output_dir = modCovClassPath + now + "/"
|
|
|
|
# Create output directory if needed
|
|
if not os.path.exists(output_dir):
|
|
os.makedirs(output_dir)
|
|
|
|
print("Saving model to %s" % output_dir)
|
|
|
|
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
|
|
# They can then be reloaded using `from_pretrained()`
|
|
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
|
|
model_to_save.save_pretrained(output_dir)
|
|
tokenizer.save_pretrained(output_dir)
|
|
|
|
# Good practice: save your training arguments together with the trained model
|
|
# torch.save(args, os.path.join(output_dir, 'training_args.bin'))
|
|
|
|
import pandas as pd
|
|
|
|
# Display floats with two decimal places.
|
|
pd.set_option('display.precision', 2)
|
|
|
|
# Create a DataFrame from our training statistics.
|
|
df_stats = pd.DataFrame(data=training_stats)
|
|
|
|
# Use the 'epoch' as the row index.# Good practice: save your training arguments together with the trained model
|
|
df_stats = df_stats.set_index('epoch')
|
|
|
|
# A hack to force the column headers to wrap.
|
|
#df = df.style.set_table_styles([dict(selector="th",props=[('max-width', '70px')])])
|
|
|
|
|
|
# Display the table.
|
|
df_stats
|
|
df_stats.to_csv(output_dir + now + ".csv")
|