Python Forum

Full Version: How to scrape only unique values and save it into database
You're currently viewing a stripped down version of our content. View the full version with proper formatting.

I am scraping a website using beautifulsoup, requests and sqlite3. Right now i am scraping website and saving it's data into database. But now due to heavy inflow of data, scraping increases too much duplicate data into database. I tried to use command INSERT OR REPLACE to get only unique data but it didnt work. How can i save only unique data into database.

Below are my codes:

import csv
from bs4 import BeautifulSoup
import requests
import time
import pdb
import sqlite3

url = ""
url1 = ""
final_data = []
dct = {}

def writefiles(alldata, filename):
    with open ("./"+ filename, "w") as csvfile:
        csvfile = csv.writer(csvfile, delimiter=",")
        for i in range(0, len(alldata)):

def getbyGet(url, values):
    res = requests.get(url, data=values)
    text = res.text
    return text

def readHeaders():
    global url, url1
    html = getbyGet(url, {})
    soup  = BeautifulSoup(html, "html.parser")
    EVENTTARGET ="#__VIEWSTATE")[0]['value']
    VIEWSTATE ="#__VIEWSTATE")[0]['value']
    headers= {'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
              'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0'}

    formfields =  {'__EVENTARGUMENT':'',
                  "__VIEWSTATEGENERATOR": "4F1A7E70",
                  'ctl00$ContentPlaceHolder1$DdlprojectDistrict':0, #this is where your city name changes in each iteration
    s = requests.session()
    conn = sqlite3.connect("99_data_increment.db")
    #cur = conn.cursor()
    conn.execute("CREATE TABLE IF NOT EXISTS crawled (id INTEGER PRIMARY KEY, Rera_Number text, Project_Name text, Promoter_Name text, City text, ResComm text, Links text) ")
    cur = conn.cursor()
    res =, data=formfields, headers=headers).text
    soup = BeautifulSoup(res, "html.parser")
    get_details = soup.find_all(id="ctl00_ContentPlaceHolder1_GridView1")
    for details in get_details:
        gettr = details.find_all("tr")[1:]
        for tds in gettr:
            td = tds.find_all("td")[1]
            rera = td.find_all("span")
            rnumber = ""
            for num in rera:
                rnumber = num.text
                sublist = []
            name = tds.find_all("td")[2]
            prj_name = name.find_all("span")
            prj = ""
            for prjname in prj_name:
                prj = prjname.text
            promoter_name = tds.find_all("td")[3]
            promoter = promoter_name.find_all("span")
            prom = ""
            for promname in promoter:
                prom = promname.text
            district = tds.find_all("td")[4]
            dist = district.find_all("span")
            district_name = ""
            for districtname in dist:
                district_name = districtname.text
            protype = tds.find_all("td")[5]
            project = protype.find_all("span")
            projectype = ""
            for prjtype in project:
                projectype = prjtype.text
            cur.execute("INSERT OR REPLACE INTO crawled VALUES (NULL,?,?,?,?,?,?)",(rnumber, prj, prom , district_name , projectype, projectype))
    return final_data

def main():
    datas = readHeaders()
    writefiles(datas, "Up-new.csv")