117 lines
4.8 KiB
Python
117 lines
4.8 KiB
Python
from datetime import datetime
|
|
import time
|
|
import pandas as pd
|
|
import snscrape.modules.twitter as sntwitter
|
|
|
|
def scrapeTweets(handle, keywords, td, tweetDFColumns, ts_beg, ts_end, suffix, maxTweets = 5000):
|
|
"""Scrapes tweets from a specific account in a specific time span using snscrape.modules.twitter.
|
|
|
|
Args:
|
|
handle (str): twitter handle of account to be scraped
|
|
keywords (list): list of strings containing the keywords that the tweets shall be searched for
|
|
td (str): tweet file output path
|
|
tweetDFColumns (list): Columns for tweet dataframe. Parameters for snscrape.modules.twitter.Tweet
|
|
ts_beg (str): scrape from ... YYYY-MM-DDTHH:MM:SSZ from datetime: %Y-%m-%dT%H:%M:%SZ (https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes)
|
|
ts_end (_type_): scrape until ... YYYY-MM-DDTHH:MM:SSZ from datetime: %Y-%m-%dT%H:%M:%SZ (https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes)
|
|
suffix (str): suffix that shall be added to filename after the handle. Example: "-slice1" of handle "handle" will produce the file "Tweets-handle-slice1.csv"
|
|
maxTweets (int, optional): Maximum number of tweets to be scraped. Defaults to 5000.
|
|
"""
|
|
i = 0
|
|
|
|
currentTime = datetime.now()
|
|
tweetDataFilePath = td + f"Tweets-{handle}{suffix}.csv"
|
|
|
|
# create empty tweetlist that will be filled with tweets of current sen
|
|
TweetList = []
|
|
|
|
# statusmsg
|
|
print(f'{currentTime:<30} Fetching: {handle:>15}{suffix:<7} - from {ts_beg} to {ts_end}')
|
|
|
|
# Snscrape query:
|
|
query = f'from:{handle} since:{ts_beg} until:{ts_end}'
|
|
for i,tweet in enumerate(sntwitter.TwitterSearchScraper(query).get_items()):
|
|
if i >= maxTweets:
|
|
break
|
|
# get tweet vars from tweetDFColumns and append to singleTweetList
|
|
# which will then be appended to TweetList. TweetList contains all tweets of the current slice.
|
|
singleTweetList = []
|
|
for col in tweetDFColumns:
|
|
singleTweetList.append(eval(f'tweet.{col}'))
|
|
TweetList.append(singleTweetList)
|
|
|
|
|
|
# # Check if no tweets fetched for the current time slice. If there are no tweets, skip to next time_slices loop iteration
|
|
# if not TweetList:
|
|
# open(tweetDataFilePath, 'a').close()
|
|
# print(f'return empty in {handle}{suffix} - from {ts_beg} to {ts_end}')
|
|
# return
|
|
|
|
print(f'{i:<6} tweets scraped for: {handle:>15}{suffix:<7}')
|
|
|
|
# convert to dataframe
|
|
tweet_df = pd.DataFrame(TweetList, columns=tweetDFColumns)
|
|
|
|
## Check if tweet-text contains keyword
|
|
tweet_df['contains_keyword'] = ''
|
|
tweet_df['contains_keyword'] = (
|
|
tweet_df['rawContent'].str.findall('|'.join(keywords)).str.join(',').replace('', 'none')
|
|
)
|
|
#return(tweet_df)
|
|
# Save two versions of the dataset, one with all fields and one without dict fields
|
|
# define filepaths
|
|
csv_path = tweetDataFilePath
|
|
# save short csv
|
|
tweet_df.to_csv(csv_path, encoding='utf-8')
|
|
# sleep 1 second to not get blocked because of excessive requests
|
|
time.sleep(0.5)
|
|
|
|
def getHandles(di):
|
|
"""grabs accounts from senators-raw.csv
|
|
|
|
Args:
|
|
di (str): path to senators-raw.csv
|
|
|
|
Returns:
|
|
list: list containing str of senator account handles
|
|
"""
|
|
accounts = pd.read_csv(f"{di}senators-raw.csv")["twitter_handle"].tolist()
|
|
alt_accounts = pd.read_csv(f"{di}senators-raw.csv")["alt_handle"].tolist()
|
|
alt_accounts = [x for x in alt_accounts if str(x) != 'nan'] # remove empty alt_accounts fields
|
|
accounts.extend(alt_accounts)
|
|
return accounts
|
|
|
|
def printHandles(accounts):
|
|
"""returns string with all accounts in a readable way.
|
|
|
|
Args:
|
|
accounts (list): list of str with handles
|
|
|
|
Returns:
|
|
str: containing text that can be written to txtfile
|
|
"""
|
|
txt = ["Accounts to be scraped:\n"]
|
|
for i, acc in enumerate(accounts): # print 5 accounts per line
|
|
txt.append(f"{acc:^17}") # twitter handle max length = 15 chars
|
|
if i % 5 == 4:
|
|
txt.append(" \n")
|
|
txt.append(f"\n{i} accounts in total.")
|
|
return ''.join(txt)
|
|
|
|
def scrapeUsers(handle, userDFColumns, maxTweets=1):
|
|
currentTime = datetime.now()
|
|
userList = []
|
|
print(f'{currentTime:<30} Fetching: {handle:>15}')
|
|
query = f'from:{handle}'
|
|
|
|
for i, tweet in enumerate(sntwitter.TwitterSearchScraper(query).get_items()):
|
|
if i > maxTweets:
|
|
break
|
|
# Get user data and append to singleUserList
|
|
userList = []
|
|
for col in userDFColumns:
|
|
singleUser = eval(f'tweet.user.{col}')
|
|
userList.append(singleUser)
|
|
|
|
# Create dataframe using userList and userDFColumns
|
|
#df = pd.DataFrame(userList, columns=userDFColumns)
|
|
return userList |