adds multiprocessing to scrape tweets.

This commit is contained in:
Michael Beck
2023-06-23 16:41:20 +02:00
parent c675db9d00
commit 5d0c41407e
3 changed files with 71 additions and 52 deletions

View File

@ -6,6 +6,12 @@ Created on Wed Jun 21 13:58:42 2023
@author: michael
'''
def deDupe(inFile, outFile):
"""Reads file line by line and removes duplicates. Saves deduplicated lines into another file.
Args:
inFile (string): Path to file that shall be deduplicated.
outFile (string): Path to output-file.
"""
from collections import Counter
with open(inFile) as f:
lines = f.readlines()

44
funs/Scrape.py Normal file
View File

@ -0,0 +1,44 @@
def scrapeTweets(handle, slice_data, keywords, td, maxTweets = 5000):
from datetime import datetime
currentTime = datetime.now
import snscrape.modules.twitter as sntwitter
ts_beg = slice_data['beg_time']
ts_end = slice_data['end_time']
suffix = slice_data['suffix']
tweetDataFilePath = td + "Tweets-{handle}{suffix}.csv"
# create empty tweetlist that will be filled with tweets of current sen
TweetList = []
# statusmsg
print(f'{currentTime:<30} Fetching: {handle:>15}{suffix:<7} - from {ts_beg} to {ts_end}')
# Snscrape query:
query = f'from:{handle} since:{ts_beg} until:{ts_end}'
for i,tweet in enumerate(sntwitter.TwitterSearchScraper(query).get_items()):
if i>maxTweets:
break
# get tweet vars from tweetDFColumns and append to singleTweetList
# which will then be appended to TweetList. TweetList contains all tweets of the current slice.
singleTweetList = [singleTweetList.append(eval(f'tweet.{col}')) for col in tweetDFColumns]
TweetList.append(singleTweetList)
# Check if no tweets fetched for the current time slice. If there are no tweets, skip to next time_slices loop iteration
if TweetList:
open(tweetDataFilePath, 'a').close()
print(f'return empty in {handle}{suffix} - from {ts_beg} to {ts_end}')
return
print(f'{i:<6} tweets scraped for: {handle:>15}{suffix:<7}')
# convert to dataframe
tweet_df = pd.DataFrame(TweetList, columns=tweetDFColumns)
## Check if tweet-text contains keyword
tweet_df['contains_keyword'] = ''
tweet_df['contains_keyword'] = (tweet_df['rawContent'].str.findall('|'.join(keywords)).str.join(',').replace('', 'none'))
## Save two versions of the dataset, one with all fields and one without dict fields
# define filepaths
csv_path = tweetDataFilePath
# save short csv
tweet_df.to_csv(csv_path, encoding='utf-8')
# sleep 1 second to not get blocked because of excessive requests
# time.sleep(0.5)