Dec-29-2016, 07:19 AM
I use urllib.requests which has worked well for me, both binary, text, and html files
here's the class I use:
here's the class I use:
# GetUrl - Fetch files or web pages from internet # # Author: Larz60+ import urllib.request as ur import os from time import sleep import requests class GetUrl: def __init__(self, returndata=False): self.returndata = returndata def get_url(self, url, tofile=None, bin=False): head, tail = os.path.split(url) try: if tofile: if os.path.exists(tofile): os.remove(tofile) if bin: with open(tofile, 'wb') as f: rdata = requests.get(url) # rdata = ur.urlopen(url).read() f.write(rdata) else: with open(tofile, 'w') as f: rdata = requests.get(url) # rdata = ur.urlopen(url).read().decode('utf8') f.write(rdata) sleep(.5) else: rdata = requests.get(url) # rdata = ur.urlopen(url).read().decode('utf8') return rdata except Exception as e: print(str(e)) if __name__ == '__main__': url = 'ftp://ftp.nasdaqtrader.com/symboldirectory/phlxListedStrikesWithOptionIds.zip' tofile = 'G:\python\stock_market\symbols\data\DailyFiles\\USA\phlxListedStrikesWithOptionIds.zip' p = GetUrl() p.get_url(url, tofile, bin=True)