Sep-01-2018, 01:02 PM
(This post was last modified: Sep-01-2018, 01:02 PM by eddywinch82.)
I have had the Login Problem sorted, I paid a company for that to help me, but now when I run the modified code :-
I get an unexpected Indent Error, on page 64
Here is the latest Python Code :-
Which lines of the Code need Identing ? Or does more Code, need altering ?
Eddie
I get an unexpected Indent Error, on page 64
Here is the latest Python Code :-
import sys import getpass import hashlib import requests import os from bs4 import BeautifulSoup BASE_URL = 'https://www.flightsim.com/' LOGIN_PAGE = 'https://www.flightsim.com/vbfs/login.php?do=login' def do_login(credentials): session = requests.Session() session.get(BASE_URL) req = session.post(LOGIN_PAGE, params={'do': 'login'}, data=credentials) if req.status_code != 200: print('Login not successful') sys.exit(1) # session is now logged in return session def get_credentials(): username = input('Username: ') password = getpass.getpass() password_md5 = hashlib.md5(password.encode()).hexdigest() return { 'cookieuser': 1, 'do': 'login', 's': '', 'securitytoken': 'guest', 'vb_login_md5_password': password_md5, 'vb_login_md5_password_utf': password_md5, 'vb_login_password': '', 'vb_login_password_hint': 'Password', 'vb_login_username': username, } credentials = get_credentials() session = do_login(credentials) class ScrapeUrlList: def __init__(self): self.fpath = os.fspath self.ziplinks = [] def get_url(self, url): page = None response = requests.get(url) if response.status_code == 200: page = response.content else: print(f'Cannot load URL: {url}') # pass return page def get_catalog(self): base_url = 'https://www.flightsim.com/vbfs' with self.fpath.links.open('w') as fp: 'C:\Python37\Lib' baseurl = self.fpath.base_catalog_url for pageno in range(1, 254): url = f'https://www.flightsim.com/vbfs/fslib.php?searchid=65893537&page={pageno}' print(f'url: {url}') page = self.get_url(self.fpath.base_catalog_url) if page: soup = BeautifulSoup(page, 'lxml') zip_links = soup.find_all('div', class_="fsc_details") for link in zip_links: fp.write(f"{link.find('a').text}, {base_url}/{link.find('a').get('href')}") input() else: print(f'No page: {url}') def main(): sul = ScrapeUrlList() sul.get_catalog() if __name__ == '__main__': main()Does anyone know, how I can sort this Problem out ?
Which lines of the Code need Identing ? Or does more Code, need altering ?
Eddie