from datetime import datetime import time import pandas as pd import snscrape.modules.twitter as sntwitter def scrapeTweets(handle, keywords, td, tweetDFColumns, ts_beg, ts_end, suffix, maxTweets = 5000): """Scrapes tweets from a specific account in a specific time span using snscrape.modules.twitter. Args: handle (str): twitter handle of account to be scraped keywords (list): list of strings containing the keywords that the tweets shall be searched for td (str): tweet file output path tweetDFColumns (list): Columns for tweet dataframe. Parameters for snscrape.modules.twitter.Tweet ts_beg (str): scrape from ... YYYY-MM-DDTHH:MM:SSZ from datetime: %Y-%m-%dT%H:%M:%SZ (https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes) ts_end (_type_): scrape until ... YYYY-MM-DDTHH:MM:SSZ from datetime: %Y-%m-%dT%H:%M:%SZ (https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes) suffix (str): suffix that shall be added to filename after the handle. Example: "-slice1" of handle "handle" will produce the file "Tweets-handle-slice1.csv" maxTweets (int, optional): Maximum number of tweets to be scraped. Defaults to 5000. """ i = 0 currentTime = datetime.now() tweetDataFilePath = td + f"Tweets-{handle}{suffix}.csv" # create empty tweetlist that will be filled with tweets of current sen TweetList = [] # statusmsg print(f'{currentTime:<30} Fetching: {handle:>15}{suffix:<7} - from {ts_beg} to {ts_end}') # Snscrape query: query = f'from:{handle} since:{ts_beg} until:{ts_end}' for i,tweet in enumerate(sntwitter.TwitterSearchScraper(query).get_items()): if i >= maxTweets: break # get tweet vars from tweetDFColumns and append to singleTweetList # which will then be appended to TweetList. TweetList contains all tweets of the current slice. singleTweetList = [] for col in tweetDFColumns: singleTweetList.append(eval(f'tweet.{col}')) TweetList.append(singleTweetList) # # Check if no tweets fetched for the current time slice. If there are no tweets, skip to next time_slices loop iteration # if not TweetList: # open(tweetDataFilePath, 'a').close() # print(f'return empty in {handle}{suffix} - from {ts_beg} to {ts_end}') # return print(f'{i:<6} tweets scraped for: {handle:>15}{suffix:<7}') # convert to dataframe tweet_df = pd.DataFrame(TweetList, columns=tweetDFColumns) ## Check if tweet-text contains keyword tweet_df['contains_keyword'] = '' tweet_df['contains_keyword'] = ( tweet_df['rawContent'].str.findall('|'.join(keywords)).str.join(',').replace('', 'none') ) #return(tweet_df) # Save two versions of the dataset, one with all fields and one without dict fields # define filepaths csv_path = tweetDataFilePath # save short csv tweet_df.to_csv(csv_path, encoding='utf-8') # sleep 1 second to not get blocked because of excessive requests time.sleep(0.5)