Python Forum
Thread Rating:
  • 0 Vote(s) - 0 Average
  • 1
  • 2
  • 3
  • 4
  • 5
Many errors
#1
Lars60+ crafted the first version of this. When I run his version, it runs fine. I don't know what I did to mess mine up. I didn't change much but I've learned it doesn't take much to turn it right on its head! Let me know if you want to see his to compare. I am downloading different reports but from the same website.

As always - any help is most appreciated!


import requests
from bs4 import BeautifulSoup
from pathlib import Path
import CheckInternet
import sys


class GetCompletions:
    def __init__(self, infile):
        self.check_network = CheckInternet.CheckInternet()
        self.homepath = Path('.')
        self.rootpath = self.homepath / '..'
        self.datapath = self.rootpath / 'data'
        self.commandpath = self.datapath / 'command_files'
        self.wellgeopath = self.datapath / 'wellgeo'
        self.htmlpath = self.datapath / 'html'
        self.reportspath = self.datapath / 'reports'

        if self.check_network.check_availability():
            # use: Api_May_27_2018.txt for testing
            # self.infilename = 'Api_May_27_2018.txt'
            self.infilename = input('Please enter api filename: ')

            self.infile = self.commandpath / self.infilename
            self.api = []

            with self.infile.open() as f:
                for line in f:
                    self.api.append(line.strip())

            self.fields = ['API Number', 'Field', 'Formation', 'Well', 'location']
            self.get_all_pages()
            self.parse_and_save(getpdfs=True)
        else:
            print('Internet access required, and not found.')
            print('Please make Internet available and try again')

    def get_url(self):
        for entry in self.api:
            print("http://http://wogcc.state.wy.us/coreapi.cfm?API={}".format(entry[3:10]))
            yield (entry, "http://http://wogcc.state.wy.us/coreapi.cfm?API={}".format(entry[3:10]))

    def get_all_pages(self):
        for entry, url in self.get_url():
            print('Fetching main page for entry: {}'.format(entry))
            response = requests.get(url)
            if response.status_code == 200:
                filename = self.htmlpath / 'api_{}.html'.format(entry)
                with filename.open('w') as f:
                    f.write(response.text)
            else:
                print('error downloading {}'.format(entry))

    def parse_and_save(self, getpdfs=False):
        filelist = [file for file in self.htmlpath.iterdir() if file.is_file()]
        for file in filelist:
            with file.open('r') as f:
                soup = BeautifulSoup(f.read(), 'lxml')
            if getpdfs:
                links = soup.find_all('a')
                for link in links:
                    url = link['href']
                    if 'www' in url:
                        continue
                    print('downloading pdf at: {}'.format(url))
                    p = url.index('=')
                    response = requests.get(url, stream=True, allow_redirects=False)
                    if response.status_code == 200:
                        try:
                            header_info = response.headers['Content-Disposition']
                            idx = header_info.index('filename')
                            filename = self.wellgeopath / header_info[idx + 9:]
                        except ValueError:
                            filename = self.wellgeopath / 'comp{}.pdf'.format(url[p + 1:])
                            print("couldn't locate filename for {} will use: {}".format(file, filename))
                        except KeyError:
                            filename = self.wellgeopath / 'comp{}.pdf'.format(url[p + 1:])
                            print('got KeyError on {}, response.headers = {}'.format(file, response.headers))
                            print('will use name: {}'.format(filename))
                            print(response.headers)
                        with filename.open('wb') as f:
                            f.write(response.content)
            sfname = self.reportspath / 'summary_{}.txt'.format((file.name.split('_'))[1].split('.')[0][3:10])
            tds = soup.find_all('td')
            with sfname.open('w') as f:
                for td in tds:
                    if td.text:
                        if any(field in td.text for field in self.fields):
                            f.write('{}\n'.format(td.text))
            # Delete html file when finished
            file.unlink()


if __name__ == '__main__':
    GetCompletions('apis.txt')
Error:
C:\Python365\python.exe "O:/Python/WellInfo - GEO/src/FetchCompletions.py" Please enter api filename: Api_May_27_2018.txt http://http://wogcc.state.wy.us/coreapi.cfm?API=0527389 Fetching main page for entry: 49005273890000 Traceback (most recent call last): File "C:\Python365\lib\site-packages\urllib3\connection.py", line 141, in _new_conn (self.host, self.port), self.timeout, **extra_kw) File "C:\Python365\lib\site-packages\urllib3\util\connection.py", line 60, in create_connection for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM): File "C:\Python365\lib\socket.py", line 745, in getaddrinfo for res in _socket.getaddrinfo(host, port, family, type, proto, flags): socket.gaierror: [Errno 11004] getaddrinfo failed During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Python365\lib\site-packages\urllib3\connectionpool.py", line 601, in urlopen chunked=chunked) File "C:\Python365\lib\site-packages\urllib3\connectionpool.py", line 357, in _make_request conn.request(method, url, **httplib_request_kw) File "C:\Python365\lib\http\client.py", line 1239, in request self._send_request(method, url, body, headers, encode_chunked) File "C:\Python365\lib\http\client.py", line 1285, in _send_request self.endheaders(body, encode_chunked=encode_chunked) File "C:\Python365\lib\http\client.py", line 1234, in endheaders self._send_output(message_body, encode_chunked=encode_chunked) File "C:\Python365\lib\http\client.py", line 1026, in _send_output self.send(msg) File "C:\Python365\lib\http\client.py", line 964, in send self.connect() File "C:\Python365\lib\site-packages\urllib3\connection.py", line 166, in connect conn = self._new_conn() File "C:\Python365\lib\site-packages\urllib3\connection.py", line 150, in _new_conn self, "Failed to establish a new connection: %s" % e) urllib3.exceptions.NewConnectionError: <urllib3.connection.HTTPConnection object at 0x000000000372ECC0>: Failed to establish a new connection: [Errno 11004] getaddrinfo failed During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Python365\lib\site-packages\requests\adapters.py", line 440, in send timeout=timeout File "C:\Python365\lib\site-packages\urllib3\connectionpool.py", line 639, in urlopen _stacktrace=sys.exc_info()[2]) File "C:\Python365\lib\site-packages\urllib3\util\retry.py", line 388, in increment raise MaxRetryError(_pool, url, error or ResponseError(cause)) urllib3.exceptions.MaxRetryError: HTTPConnectionPool(host='http', port=80): Max retries exceeded with url: //wogcc.state.wy.us/coreapi.cfm?API=0527389 (Caused by NewConnectionError('<urllib3.connection.HTTPConnection object at 0x000000000372ECC0>: Failed to establish a new connection: [Errno 11004] getaddrinfo failed',)) During handling of the above exception, another exception occurred: Traceback (most recent call last): File "O:/Python/WellInfo - GEO/src/FetchCompletions.py", line 95, in <module> GetCompletions('apis.txt') File "O:/Python/WellInfo - GEO/src/FetchCompletions.py", line 32, in __init__ self.get_all_pages() File "O:/Python/WellInfo - GEO/src/FetchCompletions.py", line 46, in get_all_pages response = requests.get(url) File "C:\Python365\lib\site-packages\requests\api.py", line 72, in get return request('get', url, params=params, **kwargs) File "C:\Python365\lib\site-packages\requests\api.py", line 58, in request return session.request(method=method, url=url, **kwargs) File "C:\Python365\lib\site-packages\requests\sessions.py", line 508, in request resp = self.send(prep, **send_kwargs) File "C:\Python365\lib\site-packages\requests\sessions.py", line 618, in send r = adapter.send(request, **kwargs) File "C:\Python365\lib\site-packages\requests\adapters.py", line 508, in send raise ConnectionError(e, request=request) requests.exceptions.ConnectionError: HTTPConnectionPool(host='http', port=80): Max retries exceeded with url: //wogcc.state.wy.us/coreapi.cfm?API=0527389 (Caused by NewConnectionError('<urllib3.connection.HTTPConnection object at 0x000000000372ECC0>: Failed to establish a new connection: [Errno 11004] getaddrinfo failed',)) Process finished with exit code 1
Reply


Messages In This Thread
Many errors - by tjnichols - Jun-07-2018, 06:43 PM
RE: Many errors - by Larz60+ - Jun-07-2018, 07:22 PM
RE: Many errors - by tjnichols - Jun-07-2018, 07:57 PM
RE: Many errors - by Larz60+ - Jun-07-2018, 08:12 PM
RE: Many errors - by tjnichols - Jun-07-2018, 08:44 PM
RE: Many errors - by Larz60+ - Jun-07-2018, 08:51 PM
RE: Many errors - by tjnichols - Jun-11-2018, 10:51 PM
RE: Many errors - by Larz60+ - Jun-11-2018, 11:38 PM
RE: Many errors - by tjnichols - Jun-12-2018, 08:12 PM
RE: Many errors - by Larz60+ - Jun-12-2018, 09:06 PM

Forum Jump:

User Panel Messages

Announcements
Announcement #1 8/1/2020
Announcement #2 8/2/2020
Announcement #3 8/6/2020