is this correct way of doing multi processing.
import pyodbc import csv import os import time import multiprocessing def Extract_to_csv(tbl,cursor): qry = f"Select * from {tbl}" cursor.execute(qry) data = cursor.fetchall() print(len(data)) folderPath = f"D:\\test_data\\output{tbl}" count = 0 # for reply in cursor.execute(qry):** for x in data: count = count + 1 print(count) fname = "Row"+str(count)+".csv" fullpath = os.path.join(folderPath,fname) print(fullpath) with open(fullpath,"w",newline="") as outfile: writer = csv.writer(outfile,delimiter="|",quoting=csv.QUOTE_NONNUMERIC) writer.writerow(col[0] for col in cursor.description) writer.writerow(x) print(f"I am row {count}",x) def main(): connection = pyodbc.connect( 'DRIVER={ODBC Driver 17 for SQL Server};SERVER=DESKTOP-GQK64O6;DATABASE=Customer;Trusted_Connection=yes;') cursor = connection.cursor() p1 = multiprocessing.Process(target=Extract_to_csv, args=(table1,cursor,)) p2 = multiprocessing.Process(target=Extract_to_csv, args=(table2,cursor,)) p3 = multiprocessing.Process(target=Extract_to_csv, args=(table3,cursor,)) p4 = multiprocessing.Process(target=Extract_to_csv, args=(table4,cursor,)) p5 = multiprocessing.Process(target=Extract_to_csv, args=(table5,cursor,)) p6 = multiprocessing.Process(target=Extract_to_csv, args=(table6,cursor,)) p7 = multiprocessing.Process(target=Extract_to_csv, args=(table7,cursor,)) p8 = multiprocessing.Process(target=Extract_to_csv, args=(table8,cursor,)) p9 = multiprocessing.Process(target=Extract_to_csv, args=(table9,cursor,)) p10 = multiprocessing.Process(target=Extract_to_csv, args=(table10,cursor,)) p1.start() p2.start() p3.start() p4.start() p5.start() p6.start() p7.start() p8.start() p9.start() p10.start() p1.join() p2.join() p3.join() p4.join() p5.join() p6.join() p7.join() p8.join() p9.join() p10.join() print("End of main") if __name__ == "__main__": start_time = time.process_time() main() end_time = time.process_time() print("Execution time is : ", end_time - start_time)