-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
274 lines (228 loc) · 9.49 KB
/
main.py
File metadata and controls
274 lines (228 loc) · 9.49 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
"""
Main entry point for the Upwork Scraper application.
This script coordinates the authentication, scraping, and data storage components.
"""
import os
import sys
import asyncio
import logging
import argparse
import warnings
from dotenv import load_dotenv
from datetime import datetime, timedelta
import pytz
from typing import List
# Add the parent directory to sys.path to enable absolute imports
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Import components
from upwork_scraper.auth.login import UpworkAuthenticator
from upwork_scraper.scrapers import JobsScraperCrawl4ai, JobsScraperSelenium
from upwork_scraper.data.baserow import BaserowService
from upwork_scraper.utils.helpers import setup_logging, parse_relative_time
# Filter out specific selenium_driverless warning
warnings.filterwarnings("ignore", message="got execution_context_id and unique_context=True*")
# Set up logging
logger = setup_logging(log_level="INFO")
# Load environment variables
load_dotenv()
# Constants
COOKIES_FILE = "upwork_cookies_selenium.json"
OUTPUT_DIR = "jobs"
DEFAULT_QUERIES = ["n8n tutor", "n8n tutoring", "n8n teacher", "n8n coach", "n8n guide", "n8n expert", "n8n mentor", "n8n training", "n8n consulting", "n8n instructor"] # n8n tutoring jobs
# DEFAULT_QUERIES = ["web scraping", "data scraping", "scraping", "ai development", "elevenlabs", "scraping", "ai automation", "n8n", "angularjs", "angular"] # List of default queries
DEFAULT_MAX_PAGES = 2
DEFAULT_DAYS_TO_KEEP = 365
async def process_jobs_for_baserow(jobs, search_query: str = ""):
"""Process job data for Baserow storage.
Args:
jobs: List of job data dictionaries.
search_query: The search query used to find these jobs.
Returns:
List of processed job dictionaries.
"""
processed_jobs = []
for job in jobs:
# Parse the relative time string into a UTC datetime object
posted_time_str = job["posted_time"]
posted_time, success = parse_relative_time(posted_time_str)
if not success:
logger.warning(f"Failed to parse posted time: {posted_time_str}, using current UTC time")
try:
job_uid = int(job["job_uid"])
except (ValueError, TypeError) as e:
logger.warning(f"Failed to convert job_uid to integer: {job['job_uid']}. Error: {e}")
job_uid = 0 # or you might want to skip this job entirely
# Rating comes as a string 4.92 or 4.54, I need the resulting float to be with two decimal points
rating = float(job.get("client_info", {}).get("rating", 0))
status = "scraped"
if rating > 0 and rating < 4:
status = "low_rating"
processed_job = {
"job_uid": job_uid,
"job_title": job["job_title"],
"job_url": job["job_url"],
"posted_time": job["posted_time"],
"posted_time_date": posted_time.isoformat(), # Already in UTC from parse_relative_time
"description": job.get("description", ""),
"location": job.get("client_info", {}).get("location", ""),
"rating": rating,
"total_feedback": job.get("client_info", {}).get("total_feedback", ""),
"spent": job.get("client_info", {}).get("spent", ""),
"budget": job.get("job_details", {}).get("budget", ""),
"job_type": job.get("job_details", {}).get("job_type", ""),
"client_info": str(job.get("client_info", {})),
"job_details": str(job.get("job_details", {})),
"skills": str(job.get("skills", [])),
"proposals": job.get("proposals", ""),
"status": status,
"search_query": search_query,
"my_proposal": "",
"score": 0,
}
processed_jobs.append(processed_job)
return processed_jobs
def parse_arguments():
"""Parse command line arguments.
Returns:
Parsed arguments namespace.
"""
parser = argparse.ArgumentParser(description="Upwork Scraper")
parser.add_argument(
"--queries",
type=str,
nargs="+", # Accept one or more arguments
default=DEFAULT_QUERIES,
help=f"Search queries for jobs (default: {DEFAULT_QUERIES})"
)
parser.add_argument(
"--scraper",
type=str,
choices=["crawl4ai", "selenium"],
default="selenium",
help="Scraper implementation to use (crawl4ai or selenium)"
)
parser.add_argument(
"--max-pages",
type=int,
default=DEFAULT_MAX_PAGES,
help=f"Maximum number of pages to scrape per query (default: {DEFAULT_MAX_PAGES})"
)
parser.add_argument(
"--headless",
action="store_true",
default=False,
help="Run browser in headless mode (default: False)"
)
parser.add_argument(
"--no-headless",
action="store_false",
dest="headless",
help="Run browser in non-headless mode"
)
parser.add_argument(
"--days-to-keep",
type=int,
default=DEFAULT_DAYS_TO_KEEP,
help=f"Number of days to keep jobs in Baserow (default: {DEFAULT_DAYS_TO_KEEP})"
)
return parser.parse_args()
async def main():
"""Main function that orchestrates the scraping process."""
# Parse command line arguments
args = parse_arguments()
logger.info("Starting Upwork Scraper")
logger.info(f"Using {args.scraper} scraper")
logger.info(f"Search queries: {args.queries}")
logger.info(f"Max pages per query: {args.max_pages}")
logger.info(f"Headless mode: {args.headless}")
try:
# Initialize authenticator and perform conditional login
logger.info("Initializing authenticator")
authenticator = UpworkAuthenticator(
headless=args.headless
)
login_performed = await authenticator.login_if_needed()
if login_performed:
logger.info("Login was performed because cookies were invalid or expired")
else:
logger.info("Using existing valid cookies")
cookies = authenticator.get_cookies()
logger.info(f"Using {len(cookies)} cookies for requests")
# Initialize Baserow service
logger.info("Initializing Baserow service")
baserow_service = BaserowService()
# Fetch existing rows from Baserow
rows = await baserow_service.get_all_rows()
logger.info(f"Found {len(rows)} existing rows in Baserow")
# Initialize job scraper based on the selected implementation
logger.info(f"Initializing {args.scraper} job scraper")
if args.scraper == "crawl4ai":
scraper = JobsScraperCrawl4ai(
cookies_file=COOKIES_FILE,
output_dir=OUTPUT_DIR,
headless=args.headless,
verbose=True
)
else: # selenium
scraper = JobsScraperSelenium(
cookies_file=COOKIES_FILE,
output_dir=OUTPUT_DIR,
headless=args.headless,
verbose=True
)
# Process each query
all_jobs = []
total_uploaded = 0
for query in args.queries:
logger.info(f"Scraping {args.max_pages} page(s) for query: {query}")
jobs = await scraper.scrape_jobs(
query=query,
max_pages=args.max_pages
)
if jobs:
logger.info(f"Found {len(jobs)} jobs for query: {query}")
# Process and upload jobs to Baserow
all_jobs.extend(jobs)
processed_jobs = await process_jobs_for_baserow(jobs, search_query=query)
created_rows = await baserow_service.upload_multiple_rows(
processed_jobs,
deduplicate=True,
deduplication_field="job_uid"
)
uploaded_count = len(created_rows)
total_uploaded += uploaded_count
logger.info(f"Successfully uploaded {uploaded_count} new jobs to Baserow")
else:
logger.warning(f"No jobs found for query: {query}")
if all_jobs:
# Clean up old rows
deleted_count = await baserow_service.clean_up_old_rows(days=args.days_to_keep)
logger.info(f"Cleaned up {deleted_count} old rows from Baserow")
else:
logger.warning("No jobs found for any query")
# Find similar jobs and update status
await baserow_service.find_similar_jobs(update_status=True)
logger.info("Upwork Scraper completed successfully")
# Write summary to scraper.log
try:
import subprocess
date_str = subprocess.check_output(["date"], text=True).strip()
with open("scraper.log", "a") as log_file:
log_file.write(f"{date_str}: Scraper run completed - {total_uploaded} rows uploaded\n")
except Exception as e:
logger.warning(f"Failed to write to scraper.log: {str(e)}")
except Exception as e:
logger.error(f"An error occurred: {str(e)}")
# Still try to log completion even on error
try:
import subprocess
date_str = subprocess.check_output(["date"], text=True).strip()
with open("scraper.log", "a") as log_file:
log_file.write(f"{date_str}: Scraper run completed with error - {total_uploaded} rows uploaded\n")
except:
pass
return 1
return 0
if __name__ == "__main__":
exit_code = asyncio.run(main())
exit(exit_code)