-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest.py
111 lines (84 loc) · 2.96 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
# -*- coding: utf-8 -*-
"""test.py
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1O-wPCO48ahHZK9McxMSooL4aGTGHMUEa
"""
# Commented out IPython magic to ensure Python compatibility.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import os
import torch
import torch.nn as nn
from torch.utils.data import Dataset,DataLoader
import torch.optim as optim
import yfinance as yf
from collections import deque
import random
import math
from tqdm import tqdm
dataset_dir = "/content/drive/MyDrive/Deep_RL_for_Stock_Trading"
# Define the ticker symbol for NIFTY50
nifty50_ticker = "^NSEI"
# Download the historical data for NIFTY50
nifty50_data = yf.download(nifty50_ticker, start="2010-01-01", end="2019-08-08")
plt.plot(nifty50_data['Close'])
plt.show()
file_name = f"{dataset_dir}/data.csv"
# Save the DataFrame to a CSV file
nifty50_data.to_csv(file_name, index=False)
print(f"File Saved at {file_name}")
#Data Cleaning and EDA
null_values = nifty50_data.isna().values.any()
print(f"Presence of Null value : {int(null_values)}")
if null_values:
nifty50_data = nifty50_data.fillna(method = "ffill")
#data splitting in 80-20% fashion for training and testing
X=list(nifty50_data["Close"])
data=[float(x) for x in X]
test_size = 0.2
train_data = data[:int(len(data)*(1-test_size))]
test_data = data[int(len(data)*(1-test_size)):]
print(f"Training Data shape : {len(train_data)} and Testing Data Shape : {len(test_data)}")
#testing
#agent is already defined in the training set above.
l_test = len(test_data) - 1
state = getState(test_data, 0, window_size + 1)
total_profit = 0
is_eval = True
done = False
states_sell_test = []
states_buy_test = []
#Get the trained model
# model_name = f"{dataset_dir}/model_ep" +"6"
# agent = DQN_Agent(window_size, is_eval, model_name)
state = getState(data, 0, window_size + 1)
total_profit = 0
agent_inventory = []
for t in tqdm(range(l_test), desc = "Testing Pipeline in progress"):
action = agent.act(state)
#print(action)
#set_trace()
next_state = getState(test_data, t + 1, window_size + 1)
reward = 0
if action == 1:
agent_inventory.append(test_data[t])
states_buy_test.append(t)
print(f"Buy:{test_data[t]}")
elif action == 2 and len(agent_inventory) > 0:
bought_price = agent_inventory.pop(0)
reward = max(test_data[t] - bought_price, 0)
total_profit += test_data[t] - bought_price
states_sell_test.append(t)
print(f"Sell: {test_data[t]} | profit: {test_data[t] - bought_price}")
if t == l_test - 1:
done = True
agent.memory.append((state, action, reward, next_state, done))
state = next_state
if done:
print("------------------------------------------")
print(f"Total Profit: {total_profit:.2f}")
print("------------------------------------------")
plot_behavior(test_data,states_buy_test, states_sell_test, total_profit)