corrects some mistakes
This commit is contained in:
parent
1b43b295ce
commit
b00f75e9fe
16
collect.py
16
collect.py
@ -188,22 +188,22 @@ print(timeStartScrape.strftime(fTimeFormat))
|
|||||||
print("---")
|
print("---")
|
||||||
|
|
||||||
# Iterate over each Twitter account using multiprocessing
|
# Iterate over each Twitter account using multiprocessing
|
||||||
with concurrent.futures.ThreadPoolExecutor() as executor:
|
# Iterate over each Twitter account using multiprocessing
|
||||||
|
with concurrent.futures.ProcessPoolExecutor() as executor:
|
||||||
# List to store the scraping tasks
|
# List to store the scraping tasks
|
||||||
tasks = []
|
tasks = []
|
||||||
|
|
||||||
for handle in accounts:
|
for handle in accounts:
|
||||||
# Iterate over each time slice
|
# Iterate over each time slice
|
||||||
for slice_data in time_slices:
|
for slice_data in time_slices:
|
||||||
# ... code to prepare the slice_data ...
|
# ... Code to prepare the slice_data ...
|
||||||
|
|
||||||
# Schedule the scraping task
|
# Schedule the scraping task
|
||||||
task = executor.submit(scrapeTweets, handle, slice_data, keywords, td, tweetDFColumns)
|
task = executor.submit(
|
||||||
|
scrapeTweets, handle, slice_data, keywords, td, tweetDFColumns
|
||||||
|
)
|
||||||
tasks.append(task)
|
tasks.append(task)
|
||||||
|
|
||||||
# Wait for all tasks to complete
|
# Wait for all tasks to complete
|
||||||
concurrent.futures.wait(tasks)
|
concurrent.futures.wait(tasks)
|
||||||
|
|
||||||
timeEndScrape = datetime.now()
|
timeEndScrape = datetime.now()
|
||||||
print("---")
|
print("---")
|
||||||
print("End of scraping at:")
|
print("End of scraping at:")
|
||||||
@ -227,7 +227,7 @@ with open(f"{logfile}missing-"+timeStartScrape.strftime(fTimeFormat)+".txt", "w"
|
|||||||
if file_alltweets in tweetfiles:
|
if file_alltweets in tweetfiles:
|
||||||
tweetfiles.remove(file_alltweets)
|
tweetfiles.remove(file_alltweets)
|
||||||
# Go through all csv files and merge them into file_alltweets
|
# Go through all csv files and merge them into file_alltweets
|
||||||
if len(tweetfiles) > 0:
|
if tweetfiles:
|
||||||
with open(file_alltweets, "wb") as fout:
|
with open(file_alltweets, "wb") as fout:
|
||||||
# first file (because of the header):
|
# first file (because of the header):
|
||||||
with open(tweetfiles[0], "rb") as f:
|
with open(tweetfiles[0], "rb") as f:
|
||||||
|
@ -1,7 +1,9 @@
|
|||||||
|
from datetime import datetime, time
|
||||||
|
import pandas as pd
|
||||||
|
import snscrape.modules.twitter as sntwitter
|
||||||
|
|
||||||
def scrapeTweets(handle, slice_data, keywords, td, tweetDFColumns, maxTweets = 5000):
|
def scrapeTweets(handle, slice_data, keywords, td, tweetDFColumns, maxTweets = 5000):
|
||||||
from datetime import datetime, time
|
i = 0
|
||||||
import pandas as pd
|
|
||||||
import snscrape.modules.twitter as sntwitter
|
|
||||||
|
|
||||||
currentTime = datetime.now
|
currentTime = datetime.now
|
||||||
ts_beg = slice_data['beg_time']
|
ts_beg = slice_data['beg_time']
|
||||||
@ -18,17 +20,19 @@ def scrapeTweets(handle, slice_data, keywords, td, tweetDFColumns, maxTweets = 5
|
|||||||
# Snscrape query:
|
# Snscrape query:
|
||||||
query = f'from:{handle} since:{ts_beg} until:{ts_end}'
|
query = f'from:{handle} since:{ts_beg} until:{ts_end}'
|
||||||
for i,tweet in enumerate(sntwitter.TwitterSearchScraper(query).get_items()):
|
for i,tweet in enumerate(sntwitter.TwitterSearchScraper(query).get_items()):
|
||||||
if i>maxTweets:
|
if i >= maxTweets:
|
||||||
break
|
break
|
||||||
# get tweet vars from tweetDFColumns and append to singleTweetList
|
# get tweet vars from tweetDFColumns and append to singleTweetList
|
||||||
# which will then be appended to TweetList. TweetList contains all tweets of the current slice.
|
# which will then be appended to TweetList. TweetList contains all tweets of the current slice.
|
||||||
singleTweetList = [singleTweetList.append(eval(f'tweet.{col}')) for col in tweetDFColumns]
|
singleTweetList = [singleTweetList.append(eval(f'tweet.{col}')) for col in tweetDFColumns]
|
||||||
TweetList.append(singleTweetList)
|
TweetList.append(singleTweetList)
|
||||||
|
|
||||||
# Check if no tweets fetched for the current time slice. If there are no tweets, skip to next time_slices loop iteration
|
# Check if no tweets fetched for the current time slice. If there are no tweets, skip to next time_slices loop iteration
|
||||||
if TweetList:
|
if not TweetList:
|
||||||
open(tweetDataFilePath, 'a').close()
|
open(tweetDataFilePath, 'a').close()
|
||||||
print(f'return empty in {handle}{suffix} - from {ts_beg} to {ts_end}')
|
print(f'return empty in {handle}{suffix} - from {ts_beg} to {ts_end}')
|
||||||
return
|
return
|
||||||
|
|
||||||
print(f'{i:<6} tweets scraped for: {handle:>15}{suffix:<7}')
|
print(f'{i:<6} tweets scraped for: {handle:>15}{suffix:<7}')
|
||||||
|
|
||||||
# convert to dataframe
|
# convert to dataframe
|
||||||
@ -36,7 +40,9 @@ def scrapeTweets(handle, slice_data, keywords, td, tweetDFColumns, maxTweets = 5
|
|||||||
|
|
||||||
## Check if tweet-text contains keyword
|
## Check if tweet-text contains keyword
|
||||||
tweet_df['contains_keyword'] = ''
|
tweet_df['contains_keyword'] = ''
|
||||||
tweet_df['contains_keyword'] = (tweet_df['rawContent'].str.findall('|'.join(keywords)).str.join(',').replace('', 'none'))
|
tweet_df['contains_keyword'] = (
|
||||||
|
tweet_df['rawContent'].str.findall('|'.join(keywords)).str.join(',').replace('', 'none')
|
||||||
|
)
|
||||||
## Save two versions of the dataset, one with all fields and one without dict fields
|
## Save two versions of the dataset, one with all fields and one without dict fields
|
||||||
# define filepaths
|
# define filepaths
|
||||||
csv_path = tweetDataFilePath
|
csv_path = tweetDataFilePath
|
||||||
|
Loading…
x
Reference in New Issue
Block a user