May-21-2018, 12:34 PM
(This post was last modified: May-21-2018, 12:37 PM by eddywinch82.)
Error:Exception in thread Thread-1:
Traceback (most recent call last):
File "C:\Python34\Lib\threading.py", line 920, in _bootstrap_inner
self.run()
File "C:\Python34\Lib\threading.py", line 868, in run
self._target(*self._args, **self._kwargs)
File "C:\Python34\Lib\concurrent\futures\process.py", line 251, in _queue_management_worker
shutdown_worker()
File "C:\Python34\Lib\concurrent\futures\process.py", line 209, in shutdown_worker
call_queue.put_nowait(None)
File "C:\Python34\Lib\multiprocessing\queues.py", line 131, in put_nowait
return self.put(obj, False)
File "C:\Python34\Lib\multiprocessing\queues.py", line 82, in put
raise Full
queue.Full
I get that traceback and then the .zip Files start downloading at the normal speed not quicker. What is the Traceback Text meaning ? EddieAlso for one of the Website links :- I have the following Code
from bs4 import BeautifulSoup import requests, zipfile, io, concurrent.futures def download(number_id): a_zip = 'http://web.archive.org/web/20050301025710//http://www.projectai.com:80/packages/download_model.php?eula=1&fileid={}'.format(number_id) with open('{}.zip'.format(number_id), 'wb') as f: f.write(requests.get(a_zip).content) if __name__ == '__main__': file_id = list(range(1,50)) with concurrent.futures.ProcessPoolExecutor(max_workers=10) as executor: for number_id in file_id: executor.submit(download, number_id) def get_zips(link_root, zips_suffix): # 'http://web.archive.org/web/20050315112710/http://www.projectai.com:80/libraries/repaints.php?ac=89&cat=6' zips_page = link_root + zips_suffix # print zips_page zips_source = requests.get(zips_page).text zip_soup = BeautifulSoup(zips_source, "html.parser") for zip_file in zip_soup.select("a[href*=download.php?fileid=]"): zip_url = link_root + zip_file['href'] print('downloading', zip_file.text, '...',) r = requests.get(zip_url) with open(zip_file.text, 'wb') as zipFile: zipFile.write(r.content) def download_links(root, cat): url = ''.join([root, cat]) source_code = requests.get(url) plain_text = source_code.text soup = BeautifulSoup(plain_text, "html.parser") link_root = 'http://web.archive.org/web/20050301025710/http://www.projectai.com:80/packages/' category ='fde.php' download_links(link_root,category)but .zip files are not being saved with the proper .zip File name as 49.zip 50.zip 51.zip etc and they say 0 bytes. Or is that because they havn't finished downloading ? Eddie
Also for one of the Website links :- I have the following Code
from bs4 import BeautifulSoup import requests, zipfile, io, concurrent.futures def download(number_id): a_zip = 'http://web.archive.org/web/20050301025710//http://www.projectai.com:80/packages/download_model.php?eula=1&fileid={}'.format(number_id) with open('{}.zip'.format(number_id), 'wb') as f: f.write(requests.get(a_zip).content) if __name__ == '__main__': file_id = list(range(1,50)) with concurrent.futures.ProcessPoolExecutor(max_workers=10) as executor: for number_id in file_id: executor.submit(download, number_id) def get_zips(link_root, zips_suffix): # 'http://web.archive.org/web/20050315112710/http://www.projectai.com:80/libraries/repaints.php?ac=89&cat=6' zips_page = link_root + zips_suffix # print zips_page zips_source = requests.get(zips_page).text zip_soup = BeautifulSoup(zips_source, "html.parser") for zip_file in zip_soup.select("a[href*=download.php?fileid=]"): zip_url = link_root + zip_file['href'] print('downloading', zip_file.text, '...',) r = requests.get(zip_url) with open(zip_file.text, 'wb') as zipFile: zipFile.write(r.content) def download_links(root, cat): url = ''.join([root, cat]) source_code = requests.get(url) plain_text = source_code.text soup = BeautifulSoup(plain_text, "html.parser") link_root = 'http://web.archive.org/web/20050301025710/http://www.projectai.com:80/packages/' category ='fde.php' download_links(link_root,category)but .zip files are not being saved with the proper .zip File name, they are being saved as 49.zip 50.zip 51.zip etc and they say 0 bytes. Or is that because they havn't finished downloading ? Eddie
Sorry they are being saved as 49.zip 50.zip 51.zip etc