Help visualizing and print out - Printable Version +- Python Forum (https://python-forum.io) +-- Forum: Python Coding (https://python-forum.io/forum-7.html) +--- Forum: General Coding Help (https://python-forum.io/forum-8.html) +--- Thread: Help visualizing and print out (/thread-32044.html) |
Help visualizing and print out - Tabary - Jan-17-2021 Sir, Please Help I make this code from following tutorial on youtube It supposed to predict price on stock or forex market. I write the code from the tutorial, but somehow after finish with the video, i click run and no price pop up on my pycharm console on the botom I really want to visualize it and print out the prediction price number How can i do that? Sorry for any misspeling, english is not my first language bellow the code i write from IPython.core.debugger import set_trace import pandas as pd import numpy as np import os import matplotlib.pyplot as plt import time plt.style.use(style="seaborn") df = pd.read_csv("LTCUSDHour.csv") df = df [["Close"]].copy() #print(df.head() df["Target"] = df.Close.shift(-1) df.dropna(inplace=True) #print(df.head()) # Train Test Split def train_test_split(data, perc): data = data.values n = int(len(data) * (1 - perc)) return data [:n], data [n:] train, test = train_test_split(df, 0.2) X = train[:, :-1] Y = train[:, -1] from xgboost import XGBRegressor model = XGBRegressor(objective=r"reg:squarederror", n_estimators=1000) model.fit(X, Y) test[0] val = np.array(test[0, 0]).reshape(1, -1) pred = model.predict(val) #to Predict def xgb_predict(train, val): train = np.array(train) X, Y = train[:, :-1], train[:, -1] model = XGBRegressor(objective=r"reg:squarederror", n_estimators=1000) model.fit(X, Y) val = np.array(val).reshape(1, -1) pred = model.predict(val) return pred[0] xgb_predict(train, test[0, 0]) from sklearn.metrics import mean_squared_error def validate(data, perc): predictions = [] train, test = train_test_split(data, perc) history = [x for x in train] for i in range(len(test)): test_X, test_Y = test[i, :-1], test[i, -1] pred = xgb_predict(history, test_X[0]) predictions.append(pred) history.append(test[i]) error = mean_squared_error(test[:, -1], predictions, squared=False) return error, test[:, -1], predictions time rmse, Y, pred = validate(df, 0.2) print(rmse) RE: Help visualizing and print out - Larz60+ - Jan-17-2021 Please include a sample LTCUSDHour.csv file, or tell us how to create one. RE: Help visualizing and print out - Tabary - Jan-17-2021 (Jan-17-2021, 04:27 PM)Larz60+ Wrote: Please include a sample LTCUSDHour.csv file, or tell us how to create one. Here i upload it to google drive sir i dont know how to upload it here https://drive.google.com/file/d/1HI_QpM2nDBHk5CgkYnnwF6dxZiHBaCcE/view?usp=sharing RE: Help visualizing and print out - Larz60+ - Jan-18-2021 This has an output, not checking accuracy. You forgot to specify delimiter, so pandas assumed a comma. Actual was a tab. I printed out the data frame, you can remove that statement. I am missing something in my python install that's needed for running ( lzma module ). I don't think this will show up on your app: # from IPython.core.debugger import set_trace import pandas as pd import numpy as np import os import matplotlib.pyplot as plt import time # Assert starting direstory is script directory os.chdir(os.path.abspath(os.path.dirname(__file__))) plt.style.use(style="seaborn") df = pd.read_csv("LTCUSDHour.csv", sep='\t') print(df) df = df [["Close"]].copy() #print(df.head() df["Target"] = df.Close.shift(-1) df.dropna(inplace=True) #print(df.head()) # Train Test Split def train_test_split(data, perc): data = data.values n = int(len(data) * (1 - perc)) return data [:n], data [n:] train, test = train_test_split(df, 0.2) X = train[:, :-1] Y = train[:, -1] from xgboost import XGBRegressor model = XGBRegressor(objective=r"reg:squarederror", n_estimators=1000) model.fit(X, Y) test[0] val = np.array(test[0, 0]).reshape(1, -1) pred = model.predict(val) #to Predict def xgb_predict(train, val): train = np.array(train) X, Y = train[:, :-1], train[:, -1] model = XGBRegressor(objective=r"reg:squarederror", n_estimators=1000) model.fit(X, Y) val = np.array(val).reshape(1, -1) pred = model.predict(val) return pred[0] xgb_predict(train, test[0, 0]) from sklearn.metrics import mean_squared_error def validate(data, perc): predictions = [] train, test = train_test_split(data, perc) history = [x for x in train] for i in range(len(test)): test_X, test_Y = test[i, :-1], test[i, -1] pred = xgb_predict(history, test_X[0]) predictions.append(pred) history.append(test[i]) error = mean_squared_error(test[:, -1], predictions, squared=False) return error, test[:, -1], predictions time rmse, Y, pred = validate(df, 0.2) print(rmse)
|