mhg
parent
c769a3b232
commit
5712d4cf8b
|
@ -94,11 +94,11 @@ def run_india_scraper():
|
|||
search_pattern = "search_result_india_*.csv"
|
||||
last_file = find_second_latest_file(folder_path, search_pattern)
|
||||
fresh_output = f"india_data/daily_process_folder/new_jobs_on_{today_date}.csv"
|
||||
expired_output = f"india_data/daily_upload_folder/expired_Compete_1_India_{today_date}.csv"
|
||||
expired_output = f"india_data/daily_upload_folder/Compete_1_India_Archieve_{today_date}.csv"
|
||||
common_output = f"india_data/daily_common_folder/common_data_on_{today_date}.csv"
|
||||
do_the_difference(india_search_output_file, last_file, 'jdURL',
|
||||
fresh_output, expired_output, common_output)
|
||||
india_detail_file = f"india_data/daily_upload_folder/Compete_1_India_{today_date}.csv"
|
||||
india_detail_file = f"india_data/daily_upload_folder/Compete_1_India_Active_{today_date}.csv"
|
||||
india_detail_error_file = f"india_data/daily_error_folder/error_on_India_detail_{today_date}.txt"
|
||||
start_time = time.time()
|
||||
scraper = NaukriJobDetailScraper(fresh_output, india_detail_file, india_detail_error_file)
|
||||
|
@ -110,8 +110,8 @@ def run_india_scraper():
|
|||
stat.write(f"Jobdata program took {duration_hours:.2f} hours to run.\n")
|
||||
current_date = datetime.now()
|
||||
today_date = current_date.strftime('%d-%m-%Y')
|
||||
upload_file_to_bucket(expired_output, f"expired_Compete_1_India_{today_date}.csv" )
|
||||
upload_file_to_bucket(india_detail_file, f"Compete_1_India_{today_date}.csv" )
|
||||
upload_file_to_bucket(expired_output, f"Compete_1_India_Archieve_{today_date}.csv" )
|
||||
upload_file_to_bucket(india_detail_file, f"Compete_1_India_Active_{today_date}.csv" )
|
||||
|
||||
def run_gulf_scraper():
|
||||
gulfSearch()
|
||||
|
@ -121,12 +121,12 @@ def run_gulf_scraper():
|
|||
current_date = datetime.now()
|
||||
today_date = current_date.strftime('%d-%m-%Y')
|
||||
fresh_output = f"gulf_data/daily_process_folder/new_jobs_on_{today_date}.csv"
|
||||
expired_output = f"gulf_data/daily_upload_folder/expired_Compete_1_gulf_{today_date}.csv"
|
||||
expired_output = f"gulf_data/daily_upload_folder/Compete_1_Gulf_Archieve_{today_date}.csv"
|
||||
common_output = f"gulf_data/daily_common_folder/common_data_on_{today_date}.csv"
|
||||
do_the_difference(gulf_search_file, last_file, "jdURL", fresh_output, expired_output, common_output)
|
||||
upload_file_to_bucket(expired_output, f"expired_Compete_1_Gulf_{today_date}.csv" )
|
||||
upload_file_to_bucket(expired_output, f"Compete_1_Gulf_Archieve_{today_date}.csv" )
|
||||
start_time = time.time()
|
||||
gulf_detail_file = f"gulf_data/daily_upload_folder/Compete_1_gulf_{today_date}.csv"
|
||||
gulf_detail_file = f"gulf_data/daily_upload_folder/Compete_1_Gulf_Active_{today_date}.csv"
|
||||
gulf_detail_error_file = f"india_data/daily_error_folder/error_on_India_detail_{today_date}.txt"
|
||||
scraper = NaukriGulfJobDetailScraper(fresh_output, gulf_detail_file, gulf_detail_error_file)
|
||||
scraper.scrape()
|
||||
|
@ -135,7 +135,7 @@ def run_gulf_scraper():
|
|||
print(f"Jobdata program took {duration_hours:.2f} hours to run.")
|
||||
with open(f'gulf_data/daily_stats_folder/stats_file_of_{today_date}.txt', "a") as stat:
|
||||
stat.write(f"Jobdata program took {duration_hours:.2f} hours to run.\n")
|
||||
upload_file_to_bucket(gulf_detail_file, f"Compete_1_Gulf_{today_date}.csv" )
|
||||
upload_file_to_bucket(gulf_detail_file, f"Compete_1_Gulf_Active_{today_date}.csv" )
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
Loading…
Reference in New Issue