diff --git a/collect.py b/collect.py index dee5f0b..85d0d63 100644 --- a/collect.py +++ b/collect.py @@ -188,22 +188,22 @@ print(timeStartScrape.strftime(fTimeFormat)) print("---") # Iterate over each Twitter account using multiprocessing -with concurrent.futures.ThreadPoolExecutor() as executor: +# Iterate over each Twitter account using multiprocessing +with concurrent.futures.ProcessPoolExecutor() as executor: # List to store the scraping tasks tasks = [] - for handle in accounts: # Iterate over each time slice for slice_data in time_slices: - # ... code to prepare the slice_data ... - + # ... Code to prepare the slice_data ... # Schedule the scraping task - task = executor.submit(scrapeTweets, handle, slice_data, keywords, td, tweetDFColumns) + task = executor.submit( + scrapeTweets, handle, slice_data, keywords, td, tweetDFColumns + ) tasks.append(task) - # Wait for all tasks to complete concurrent.futures.wait(tasks) - + timeEndScrape = datetime.now() print("---") print("End of scraping at:") @@ -227,7 +227,7 @@ with open(f"{logfile}missing-"+timeStartScrape.strftime(fTimeFormat)+".txt", "w" if file_alltweets in tweetfiles: tweetfiles.remove(file_alltweets) # Go through all csv files and merge them into file_alltweets -if len(tweetfiles) > 0: +if tweetfiles: with open(file_alltweets, "wb") as fout: # first file (because of the header): with open(tweetfiles[0], "rb") as f: diff --git a/funs/Scrape.py b/funs/Scrape.py index 8891b62..51cc743 100644 --- a/funs/Scrape.py +++ b/funs/Scrape.py @@ -1,7 +1,9 @@ +from datetime import datetime, time +import pandas as pd +import snscrape.modules.twitter as sntwitter + def scrapeTweets(handle, slice_data, keywords, td, tweetDFColumns, maxTweets = 5000): - from datetime import datetime, time - import pandas as pd - import snscrape.modules.twitter as sntwitter + i = 0 currentTime = datetime.now ts_beg = slice_data['beg_time'] @@ -18,17 +20,19 @@ def scrapeTweets(handle, slice_data, keywords, td, tweetDFColumns, maxTweets = 5 # Snscrape query: query = f'from:{handle} since:{ts_beg} until:{ts_end}' for i,tweet in enumerate(sntwitter.TwitterSearchScraper(query).get_items()): - if i>maxTweets: + if i >= maxTweets: break # get tweet vars from tweetDFColumns and append to singleTweetList # which will then be appended to TweetList. TweetList contains all tweets of the current slice. singleTweetList = [singleTweetList.append(eval(f'tweet.{col}')) for col in tweetDFColumns] TweetList.append(singleTweetList) + # Check if no tweets fetched for the current time slice. If there are no tweets, skip to next time_slices loop iteration - if TweetList: + if not TweetList: open(tweetDataFilePath, 'a').close() print(f'return empty in {handle}{suffix} - from {ts_beg} to {ts_end}') return + print(f'{i:<6} tweets scraped for: {handle:>15}{suffix:<7}') # convert to dataframe @@ -36,7 +40,9 @@ def scrapeTweets(handle, slice_data, keywords, td, tweetDFColumns, maxTweets = 5 ## Check if tweet-text contains keyword tweet_df['contains_keyword'] = '' - tweet_df['contains_keyword'] = (tweet_df['rawContent'].str.findall('|'.join(keywords)).str.join(',').replace('', 'none')) + tweet_df['contains_keyword'] = ( + tweet_df['rawContent'].str.findall('|'.join(keywords)).str.join(',').replace('', 'none') + ) ## Save two versions of the dataset, one with all fields and one without dict fields # define filepaths csv_path = tweetDataFilePath