How to scrape rental data from Zumper.com

Post date: May 7, 2018 6:21:21 AM

zumper.com has lots of rental data and the website is designed to give difficult time to scrapers. This blog shows you how to scrape from the website.

How to use the program:

save this code scrape_rental_zumper_v1.py into your working folder. The example code below will

$ python scrape_rental_zumper_v1.py -mode crawl_from_a_url -urls 'https://www.zumper.com/apartments-for-rent/san-francisco-ca?property-categories=condo,house&page=10' -o zumper_rental_data_v1_san-francisco-ca.json -err zumper_error_urls_san-francisco-ca.txt -w w

scrape_rental_zumper_v1.py

# -*- coding: utf-8 -*- import requests import re import time import random import pandas as pd import datetime import json import traceback import bs4 from bs4 import BeautifulSoup import logging ################################# ### logfile configuration ################################# TMP_LOGFILE = './scrape_rental_zumper_v1.log' logging.basicConfig(filename=TMP_LOGFILE, level=logging.INFO, format='%(asctime)s %(message)s') ################################# ### Use Selenium to mimic human browsing ################################# import os from selenium import webdriver from selenium.webdriver.common.keys import Keys ### use Google Chrome as a browser driver chromedriver = "/usr/local/bin/chromedriver" os.environ["webdriver.chrome.driver"] = chromedriver driver = webdriver.Chrome(chromedriver) ################################# ### User-defined parameters ################################# headers = { 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'accept-encoding': 'gzip, deflate, br', 'accept-language': 'en-US,en;q=0.8', 'upgrade-insecure-requests': '1', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36' } SLEEP_MIN_MS_BETWEEN_DETAIL_PAGE = 300 SLEEP_MAX_MS_BETWEEN_DETAIL_PAGE = 1000 SLEEP_MIN_MS_BETWEEN_SEARCH_PAGE = 1000 SLEEP_MAX_MS_BETWEEN_SEARCH_PAGE = 5000 SCROLL_PAUSE_TIME = 5 # sec SCROLL_PAUSE_TIME_DP = 0.5 # sec PAGE_NUM_LIMIT = 100 # pages ################################# ### helper functions ################################# def parse_number_only(s, num_type=float, pattern='[-]?[0-9.,]+'): number_part = re.findall(pattern=pattern, string=s) if (number_part is not None) and (len(number_part)>0): number_part = number_part[0] number_part = re.sub(pattern='[^0-9.-]+', repl='', string=number_part) number_part = re.sub(pattern='[.]+', repl='.', string=number_part) if (number_part is not None) and (len(number_part)>0): try: return num_type(float(number_part)) except ValueError: logging.warning("cannot convert input into number: '{0}'".format(s)) return None return None def traverse_json(x,path): ''' traverse_json(json_info,['entity','deposits',0,"min"]) ''' for p in path: if x is None: return None try: x = x[p] except: traceback.print_exc() logging.warning("index/element {0} (from path {1}) does not exist in the json".format(p, str(path))) return None return x ################################# ### Parsing-related functions ################################# def detect_property_type_from_text(text): if text is None: return None if "studio" in text.lower(): return "studio" elif ("townhome" in text.lower()) or ("townhouse" in text.lower()): return "townhome" elif "condo" in text.lower(): return "condo" elif "single family" in text.lower(): return "single family home" else: return "single family home" def parse_home_detail_page(soup_dp): ''' Parse information from property detail page when the format is html. ''' detail_page_info = {} ### locations location_json = json.loads(soup_dp.find("script", {"type":"application/ld+json"}).text) detail_page_info['city'] = location_json['address']['addressLocality'] detail_page_info['state_code'] = location_json['address']['addressRegion'] detail_page_info['zip_code'] = location_json['address']['postalCode'] detail_page_info['street_name'] = location_json['address']['streetAddress'] ### address address_part = location_json['name'].split(' at ')[-1] if detail_page_info['zip_code'] not in address_part: detail_page_info['full_address'] = location_json['name'].split(' at ')[-1]+' '+detail_page_info['zip_code'] else: detail_page_info['full_address'] = location_json['name'].split(' at ')[-1] detail_page_info['property_status'] = location_json['name'].split(' at ')[0] detail_page_info['latitude'] = parse_number_only(location_json['geo']['latitude'], float) detail_page_info['longitude'] = parse_number_only(location_json['geo']['longitude'], float) detail_page_info['listing_id'] = None detail_page_info['street'] = None ### house number if detail_page_info['street_name']: detail_page_info['house_number'] = detail_page_info['street_name'].split(' ')[0] else: detail_page_info['house_number'] = None ### unit unit_match = re.search("#([0-9a-zA-Z]+)", detail_page_info['street_name']) if unit_match: detail_page_info['unit'] = unit_match.groups()[0] else: detail_page_info['unit'] = None detail_page_info['neighborhood_id'] = None neighborhood = soup_dp.find("span", {"ng-bind":"linkData.neighborhood"}) if neighborhood: detail_page_info['neighborhood'] = neighborhood.text else: detail_page_info['neighborhood'] = None detail_page_info['building_id'] = None detail_page_info['timezone'] = None description = soup_dp.select_one("p.description") if description: detail_page_info['description'] = description.text else: detail_page_info['description'] = None ### number of images in carousel try: num_photo_collage = len(soup_dp.select_one("div.image-collage-section").select("div.side-image")) num_photo_more = parse_number_only(soup_dp.select_one("div.image-collage-section").select("div.side-image")[-1].text, int) detail_page_info['num_photos'] = num_photo_more + num_photo_collage except: detail_page_info['num_photos'] = 0 ### parse the basic attributes for s in soup_dp.find_all("div", {"class":"details-meta-col"}): class_name = s.select_one("img")["class"][0].strip().lower() raw_value = s.text.strip().lower() if 'bed' in class_name: detail_page_info['num_bedrooms'] = parse_number_only(raw_value, int) detail_page_info['is_studio'] = 1 if (detail_page_info['num_bedrooms']==0) and ('studio' in detail_page_info['description'].lower()) else 0 elif 'bath' in class_name: detail_page_info['num_bathrooms'] = parse_number_only(raw_value, float) elif 'sqft' in class_name: detail_page_info['living_area'] = parse_number_only(raw_value, int) elif 'age' in class_name: detail_page_info['listing_age'] = parse_number_only(raw_value, int) detail_page_info['listing_age_unit'] = raw_value.split(' ')[-1] elif 'dog' in class_name: detail_page_info['dog_ok'] = 1 if 'yes' in raw_value else 0 elif 'cat' in class_name: detail_page_info['cat_ok'] = 1 if 'yes' in raw_value else 0 else: detail_page_info[class_name] = raw_value detail_page_info['pets'] = None detail_page_info['pet_policy'] = None detail_page_info['listing_rent_price'] = parse_number_only(soup_dp.find("span",{"ng-bind":"::entity.priceText"}).text, int) detail_page_info['listing_rent_prices'] = None detail_page_info['deposit'] = None detail_page_info['lease_length'] = None ### available_date available_date = soup_dp.find("span", {"ng-bind":"entity.dateAvailableText()"}).text.strip().lower() if available_date == 'now': detail_page_info['available_date'] = datetime.datetime.now().strftime('%Y-%m-%d') else: detail_page_info['available_date'] = available_date detail_page_info['listing_status'] = None ### listing date try: if detail_page_info['listing_age_unit'] == 'minutes': listing_date = datetime.datetime.now()-datetime.timedelta(minutes=detail_page_info['listing_age']) elif detail_page_info['listing_age_unit'] == 'hours': listing_date = datetime.datetime.now()-datetime.timedelta(hours=detail_page_info['listing_age']) elif detail_page_info['listing_age_unit'] == 'days': listing_date = datetime.datetime.now()-datetime.timedelta(days=detail_page_info['listing_age']) elif detail_page_info['listing_age_unit'] == 'weeks': listing_date = datetime.datetime.now()-datetime.timedelta(weeks=detail_page_info['listing_age']) elif detail_page_info['listing_age_unit'] == 'months': listing_date = datetime.datetime.now()-datetime.timedelta(months=detail_page_info['listing_age']) detail_page_info['listing_date'] = parse_number_only(listing_date.strftime('%s'), int) except: detail_page_info['listing_date'] = None ### amenities detail_page_info['unit_amenities'] = [s.text.strip().lower() for s in soup_dp.find_all("div", {"ng-repeat":"amenity in entity.amenities"})] detail_page_info['building_amenities'] = [s.text.strip().lower() for s in soup_dp.find_all("div", {"ng-repeat":"amenity in entity.building_amenities"})] ### type of this response detail_page_info['response_type'] = 'html' detail_page_info['feed_name'] = None return detail_page_info def parse_preloaded_json(json_info): ''' Zumper has its data in JSON format, so we just scrape it from there. ''' detail_page_info = {} detail_page_info['listing_id'] = traverse_json(json_info, ['entity','listing_id']) detail_page_info['street_name'] = traverse_json(json_info, ['entity','address']) detail_page_info['house_number'] = traverse_json(json_info, ['entity','listing_location','house']) detail_page_info['street'] = traverse_json(json_info, ['entity','listing_location','street']) detail_page_info['unit'] = traverse_json(json_info, ['entity','listing_location','unit']) detail_page_info['city'] = traverse_json(json_info, ['entity','listing_location','city']) detail_page_info['state_code'] = traverse_json(json_info, ['entity','listing_location','state']) detail_page_info['zip_code'] = traverse_json(json_info, ['entity','listing_location','zipcode']) detail_page_info['full_address'] = ' '.join([str(s) for s in [detail_page_info['street_name'], detail_page_info['city'], detail_page_info['state_code'], detail_page_info['zip_code']] if (s is not None) and len(str(s))>0]) detail_page_info['neighborhood_id'] = traverse_json(json_info, ['entity','listing_location','neighborhood_id']) detail_page_info['neighborhood'] = traverse_json(json_info, ['entity','neighborhood','name']) detail_page_info['building_id'] = traverse_json(json_info, ['entity','building_id']) detail_page_info['latitude'] = traverse_json(json_info, ['entity','listing_location','lat']) detail_page_info['longitude'] = traverse_json(json_info, ['entity','listing_location','lng']) detail_page_info['timezone'] = traverse_json(json_info, ['entity','listing_location','tz']) detail_page_info['description'] = traverse_json(json_info, ['entity','description']) ### number of images in carousel photos = traverse_json(json_info,['entity','media']) if (photos is not None) and isinstance(photos, list): detail_page_info['num_photos'] = len(photos) else: detail_page_info['num_photos'] = 0 ### bedrooms detail_page_info['num_bedrooms'] = traverse_json(json_info, ['entity','bedrooms']) detail_page_info['is_studio'] = 1 if ((detail_page_info['num_bedrooms'] is None) or (detail_page_info['num_bedrooms']==0)) and ('studio' in detail_page_info['description'].lower()) else 0 detail_page_info['property_type_id'] = traverse_json(json_info, ['entity','property_type']) ## Need to check the mapping ### bedrooms num_full_bathrooms = traverse_json(json_info, ['entity','bathrooms']) num_full_bathrooms = int(num_full_bathrooms) if (num_full_bathrooms is not None) else 0 num_half_bathrooms = traverse_json(json_info, ['entity','half_bathrooms']) num_half_bathrooms = int(num_half_bathrooms) if (num_half_bathrooms is not None) else 0 detail_page_info['num_bathrooms'] = num_full_bathrooms + 0.5*num_half_bathrooms detail_page_info['listing_rent_price'] = traverse_json(json_info, ['entity','price']) detail_page_info['listing_rent_prices'] = traverse_json(json_info, ['entity','prices',0]) detail_page_info['deposit'] = traverse_json(json_info, ['entity','deposits',0,"min"]) detail_page_info['living_area'] = traverse_json(json_info, ['entity','square_feet']) detail_page_info['lease_length'] = traverse_json(json_info, ['entity','lease_terms']) ### available date date_available = traverse_json(json_info, ['entity','date_available']) if date_available: detail_page_info['available_date'] = re.sub("/","-",date_available) else: detail_page_info['available_date'] = None detail_page_info['listing_status'] = traverse_json(json_info, ['entity','listing_type']) detail_page_info['listing_date'] = traverse_json(json_info, ['entity','listed_on']) detail_page_info['listing_age'] = None detail_page_info['listing_age_unit'] = None detail_page_info['pets'] = traverse_json(json_info, ['entity','pets']) detail_page_info['dog_ok'] = 1 if (detail_page_info['pets'] is not None) and (1 in detail_page_info['pets']) else 0 detail_page_info['cat_ok'] = 1 if (detail_page_info['pets'] is not None) and (2 in detail_page_info['pets']) else 0 detail_page_info['pet_policy'] = traverse_json(json_info, ['entity','pet_policy']) ### amenities detail_page_info['unit_amenities_id'] = traverse_json(json_info, ['entity','amenities']) detail_page_info['building_amenities_id'] = traverse_json(json_info, ['entity','building_amenities']) ### type of this response detail_page_info['response_type'] = 'json' detail_page_info['feed_name'] = traverse_json(json_info, ['entity','feed_name']) return detail_page_info def parse_preloaded_basic_attributes_simple(soup_dp): elems = soup_dp.select_one("div.icon-sqft-dims").parent.parent.contents detail_page_info = {} for elem in elems: text = elem.text.strip().lower() if 'icon-bed-dims' in elem.next["class"]: detail_page_info["num_bedrooms"] = parse_number_only(text, int) elif 'icon-bath-dims' in elem.next["class"]: if 'half bath' in text: num_full_bath, num_half_bath = re.search("([0-9]).+([0-9]+)", text).groups() num_full_bath = int(num_full_bath) if num_full_bath else 0 num_half_bath = int(num_half_bath) if num_half_bath else 0 detail_page_info["num_bathrooms"] = num_full_bath + 0.5*num_half_bath elif 'bathroom' in text: num_full_bath = re.search("([0-9]).+", text).groups()[0] num_full_bath = float(num_full_bath) if num_full_bath else 0 detail_page_info["num_bathrooms"] = num_full_bath elif 'icon-sqft-dims' in elem.next["class"]: detail_page_info["living_area"] = parse_number_only(text,int) elif 'icon-pet-dims' in elem.next["class"]: if text == 'dogs & cats ok': detail_page_info["dog_ok"] = 1 detail_page_info["cat_ok"] = 1 else: detail_page_info["dog_ok"] = 0 detail_page_info["cat_ok"] = 0 elif 'icon-clock-dims' in elem.next["class"]: if 'minute' in text: listing_date = datetime.datetime.now()-datetime.timedelta(minutes=parse_number_only(text)) elif 'hour' in text: listing_date = datetime.datetime.now()-datetime.timedelta(hours=parse_number_only(text)) elif 'day' in text: listing_date = datetime.datetime.now()-datetime.timedelta(days=parse_number_only(text)) detail_page_info["listing_date"] = parse_number_only(listing_date.strftime('%s'), int) return detail_page_info def parse_preloaded_basic_attributes_complex(soup_dp): elems = soup_dp.select_one("div.icon-info-sqft-dims").parent.parent.parent.contents detail_page_info = {} for elem in elems: text = elem.text.strip().lower() if 'icon-info-bed-dims' in elem.next.next["class"]: if "studio" in text: detail_page_info["is_studio"] = 1 beds = 0 else: beds = re.search("([0-9]+) bed", text).groups()[0] baths = re.search("([0-9]+) bath", text).groups()[0] detail_page_info["num_bedrooms"] = parse_number_only(beds, int) if beds else 0 detail_page_info["num_bathrooms"] = parse_number_only(baths, float) if baths else 0 elif 'icon-info-pet-dims' in elem.next.next["class"]: detail_page_info["dog_ok"] = 0 if 'no' in text else 1 detail_page_info["cat_ok"] = 0 if 'no' in text else 1 elif 'icon-info-address-dims' in elem.next.next["class"]: detail_page_info["full_address"] = text elif 'icon-info-sqft-dims' in elem.next.next["class"]: detail_page_info["living_area"] = parse_number_only(text,int) elif 'icon-info-age-dims' in elem.next.next["class"]: if 'minute' in text: listing_date = datetime.datetime.now()-datetime.timedelta(minutes=parse_number_only(text)) elif 'hour' in text: listing_date = datetime.datetime.now()-datetime.timedelta(hours=parse_number_only(text)) elif 'day' in text: listing_date = datetime.datetime.now()-datetime.timedelta(days=parse_number_only(text)) detail_page_info["listing_date"] = parse_number_only(listing_date.strftime('%s'), int) return detail_page_info def parse_detail_page_info_not_preloaded(driver): sleep_time = 1 sleep_time_step = 2 cnt_max = 5 cnt = 1 while cnt <= cnt_max: logging.info("{0} reloading the url...".format(str(cnt))) response_html = driver.page_source.encode('utf-8') soup_dp = BeautifulSoup(response_html, 'html.parser') if soup_dp.find("span",{"ng-bind":"::entity.priceText"}) is not None: logging.info("alright, the tag has been found, proceed with parsing!!") break time.sleep(sleep_time) sleep_time += sleep_time_step cnt += 1 detail_page_info = parse_home_detail_page(soup_dp) return detail_page_info def parse_listing_detail(soup_dp, driver): ''' zumper has very interesting approach to make scraping difficult. There are two type of data detail pages that we can scrape: 1) normal html 2) weird (and probably time-varying) class name, but they provice JSON info So, this function detect which type it is and execute parsing algorithm accordingly. ''' detail_page_info = {} if "window.__PRELOADED_STATE__" in soup_dp.text: ### JSON blob json_info = [s.text.strip() for s in soup_dp.select("script") if "window.__PRELOADED_STATE__" in s.text] json_info = json_info[0].replace("window.__PRELOADED_STATE__ = ","") json_info = json.loads(json_info) detail_page_json_info = parse_preloaded_json(json_info) ### parse amenities if soup_dp.find(text="UNIT"): amenities_info = [s.text.strip().lower() for s in soup_dp.find(text="UNIT").next.contents] else: amenities_info = [] ### parse building amenities if soup_dp.find(text="BUILDING"): building_amenities_info = [s.text.strip().lower() for s in soup_dp.find(text="BUILDING").next.contents] else: building_amenities_info = [] ### parse Simple basic bar if soup_dp.select_one("div.icon-sqft-dims"): detail_page_basic_info_simple = parse_preloaded_basic_attributes_simple(soup_dp) else: detail_page_basic_info_simple = {} ### parse Complex basic bar if soup_dp.select_one("div.icon-info-sqft-dims"): detail_page_basic_info_complex = parse_preloaded_basic_attributes_complex(soup_dp) else: detail_page_basic_info_complex = {} ### merge information TODO: merge logics detail_page_info.update(detail_page_json_info) detail_page_info.update(detail_page_basic_info_simple) detail_page_info.update(detail_page_basic_info_complex) detail_page_info["unit_amenities"] = amenities_info detail_page_info["building_amenities"] = building_amenities_info else: detail_page_json_info = parse_detail_page_info_not_preloaded(driver) detail_page_info.update(detail_page_json_info) ### property type detail_page_info['property_type'] = detect_property_type_from_text(detail_page_info['description']) detail_page_info['scrape_date'] = int(datetime.datetime.now().strftime('%s')) return detail_page_info ############################################# ## Selenium stuff ############################################# def load_page_and_scroll(driver, request_url, scroll_pause_time=SCROLL_PAUSE_TIME): # load the page driver.get(request_url) # scroll to the bottom of the page driver.execute_script("window.scrollTo(0, document.body.scrollHeight);") # Wait to load page time.sleep(scroll_pause_time) # when loading completed, get the html response_html = driver.page_source.encode('utf-8') return response_html, driver def get_next_page_url(soup, page_number, base_url=None, verbose=False): try: ### first, we will try to get the next page from the url next_url = soup.find("li",{"ng-show": "nextPageUrl"}).find("a")["href"] if verbose: logging.info("next page: {0}".format(next_url)) except: ### if we can't find the next page url, we will just synthesize it ourselves. url_wo_page = re.sub("&.+",'',base_url) next_url = url_wo_page + "&page=" + str(page_number+1) if verbose: logging.info("synthesize next_url: {0}".format(next_url)) return next_url def join_url(url, domain_url): regex_search = re.search("((https://)?www.[a-zA-Z0-9]+.[a-z]{3,4})/",url) if regex_search and len(regex_search.groups())>0: return url else: if (domain_url[-1]=="/") and (url[0]=="/"): return domain_url[:-1]+url else: return domain_url+url ############################################# ## Main program ############################################# def start_crawler_from_url(request_url, write_mode='a'): ### parse the domain domain_url = re.search("((https://)?www.[a-zA-Z0-9]+.[a-z]{3,4})/",request_url).groups()[0] ### start an output file with open(OUTPUT_FILEPATH, write_mode) as f: f.writelines('[\n') with open(ERROR_FILEPATH, write_mode) as f: f.write('===error urls===\n') ### loop until no more next page cnt_page = 1 while request_url and (cnt_page <= PAGE_NUM_LIMIT): logging.info("main page: {0}".format(request_url)) ### extract page number page_number_regex = re.search(".+page=([0-9]+)",request_url) if page_number_regex: page_number = int(page_number_regex.groups()[0]) else: page_number = 1 response_main_page, _ = load_page_and_scroll(driver, request_url, SCROLL_PAUSE_TIME) soup = BeautifulSoup(response_main_page, 'html.parser') ### this is the stopping criterion if len(soup.select("div.listingFeed-item"))<1: logging.info("We've hit an empty page at page={0}. I guess it's time to stop".format(str(page_number))) break ### next page url next_url = get_next_page_url(soup, page_number, base_url=request_url, verbose=True) ### get photocard urls from main page urls = [] for soup_photo_card in soup.select("div.listingFeed-item"): dp_url = soup_photo_card.select_one("h3 a")["href"] ### we only filter apartment-for-rent, not apartment-buildings if "apartments-for-rent" in dp_url: dp_url = join_url(dp_url, domain_url) urls.append(dp_url) ### scrape all urls on the main page data, error = scrape_from_urls(urls, domain_url) ### write to file when each page is completed with open(OUTPUT_FILEPATH, "a") as f: for d in data: f.writelines(json.dumps(d)+',\n') with open(ERROR_FILEPATH, "a") as f: for e in error: f.writelines('"{0}",\n'.format(e)) ### update next url cnt_page += 1 request_url = next_url time.sleep(1.0/1000 * random.randint(SLEEP_MIN_MS_BETWEEN_SEARCH_PAGE,SLEEP_MAX_MS_BETWEEN_SEARCH_PAGE)) ### close the output file with open(OUTPUT_FILEPATH, "a") as f: f.writelines(']') def scrape_from_urls(urls, domain_url): ### loop data = [] error = [] for dp_url in urls: dp_url = join_url(dp_url, domain_url) logging.info("detail page: {0}".format(dp_url)) response_dp, driver_cur = load_page_and_scroll(driver, dp_url, SCROLL_PAUSE_TIME_DP) soup_dp = BeautifulSoup(response_dp, 'html.parser') try: detail_page_info = parse_listing_detail(soup_dp, driver_cur) detail_page_info["id_foreign"] = parse_number_only(dp_url.split('/')[-2], int) detail_page_info["dp_url"] = dp_url data.append(detail_page_info) except: logging.warning("Error and not saved: {0}".format(dp_url)) traceback.print_exc() error.append(dp_url) ### wait time between request time.sleep(1.0/1000 * random.randint(SLEEP_MIN_MS_BETWEEN_DETAIL_PAGE,SLEEP_MAX_MS_BETWEEN_DETAIL_PAGE)) return data, error if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument("-mode", dest="operating_mode", type=str, choices=["crawl_from_a_url", "crawl_all_urls"]) parser.add_argument("-urls", dest="request_urls", nargs='*', type=str, help="url you want to start your crawler") parser.add_argument("-domain", dest="domain_url", type=str, help="url you want to start your crawler", default="https://www.zumper.com") parser.add_argument("-o", dest="output", type=str, help="path to the output file") parser.add_argument("-err", dest="error", type=str, help="path to error logfile") parser.add_argument("-log", dest="logfile", type=str, help="path to the final log file") parser.add_argument("-w", dest="write_mode", type=str, help='"w" for overwrite, "a" for append', default="a", choices=["a", "w"]) ### examples # "python scrape_rental_zumper_v1.py -mode crawl_from_a_url -urls 'https://www.zumper.com/apartments-for-rent/san-diego-ca?property-categories=condo,house' -o zumper_rental_data_v1_San-Diego-CA_2.json -err zumper_error_urls_San-Diego-CA_2.txt -w a" args = parser.parse_args() logging.info(str(args)) request_urls = args.request_urls OUTPUT_FILEPATH = args.output ERROR_FILEPATH = args.error write_mode = args.write_mode operating_mode = args.operating_mode domain_url = args.domain_url if args.logfile: LOG_FILEPATH = args.logfile else: LOG_FILEPATH = './'+re.sub(".json", ".log", OUTPUT_FILEPATH) print("TMP_LOGFILE: {0}".format(TMP_LOGFILE)) print("LOG_FILEPATH: {0}".format(LOG_FILEPATH)) ######################################### ### use case 1: scrape from a single url ######################################### if operating_mode=='crawl_from_a_url': try: start_crawler_from_url(request_urls[0], write_mode=write_mode) finally: os.rename(TMP_LOGFILE, LOG_FILEPATH) ######################################### ### use case 2: scrape from list of urls ######################################### elif operating_mode=='crawl_all_urls': try: data, error = scrape_from_urls(request_urls, domain_url) ### start an output file with open(ERROR_FILEPATH, "a") as f: f.write('===error urls===\n') for e in error: f.writelines('"{0}",\n'.format(e)) ### write to file when each page is completed with open(OUTPUT_FILEPATH, "a") as f: f.writelines('[\n') for d in data: f.writelines(json.dumps(d)+',\n') f.writelines(']') finally: os.rename(TMP_LOGFILE, LOG_FILEPATH)