corrects some mistakes

This commit is contained in:
Michael Beck
2023-06-23 18:09:09 +02:00
parent 1b43b295ce
commit b00f75e9fe
2 changed files with 20 additions and 14 deletions

View File

@@ -188,22 +188,22 @@ print(timeStartScrape.strftime(fTimeFormat))
print("---")
# Iterate over each Twitter account using multiprocessing
with concurrent.futures.ThreadPoolExecutor() as executor:
# Iterate over each Twitter account using multiprocessing
with concurrent.futures.ProcessPoolExecutor() as executor:
# List to store the scraping tasks
tasks = []
for handle in accounts:
# Iterate over each time slice
for slice_data in time_slices:
# ... code to prepare the slice_data ...
# ... Code to prepare the slice_data ...
# Schedule the scraping task
task = executor.submit(scrapeTweets, handle, slice_data, keywords, td, tweetDFColumns)
task = executor.submit(
scrapeTweets, handle, slice_data, keywords, td, tweetDFColumns
)
tasks.append(task)
# Wait for all tasks to complete
concurrent.futures.wait(tasks)
timeEndScrape = datetime.now()
print("---")
print("End of scraping at:")
@@ -227,7 +227,7 @@ with open(f"{logfile}missing-"+timeStartScrape.strftime(fTimeFormat)+".txt", "w"
if file_alltweets in tweetfiles:
tweetfiles.remove(file_alltweets)
# Go through all csv files and merge them into file_alltweets
if len(tweetfiles) > 0:
if tweetfiles:
with open(file_alltweets, "wb") as fout:
# first file (because of the header):
with open(tweetfiles[0], "rb") as f: