Using Deep Learning RNN, build and evaluate the Bitcoin Closing prices(Compare the FNG indicator vs. Previous Closing Prices)
Due to the volatility of cryptocurrency speculation, investors often try to incorporate sentiment from social media and news articles to guide their trading strategies. Cyrpto Fear & Greed Index(FNG) is an indicator that attempts to use variety of data sources to produce a daily FNG value for cryptocurrency.
Using deep learning recurrent neural networks, build and evaluate bitcoin closing prices to determine if the FNG indicator provides better signal for cryptocurrencies than the normal closing price data. One model will use the FNG indicators to provide the closing prices while the second model will use a window of closing prices to predict the nth closing price.
Closing Prices Starter Notebook
python
# This function accepts the column number for the features (X) and the target (y). It chunks the data up with a rolling window of Xt-n to predict Xt. It returns a numpy array of X any y
def window_data(df, window, feature_col_number, target_col_number):
X = []
y = []
for i in range(len(df) - window - 1):
features = df.iloc[i:(i + window), feature_col_number]
target = df.iloc[(i + window), target_col_number]
X.append(features)
y.append(target)
return np.array(X), np.array(y).reshape(-1, 1)
python
# Predict Closing Prices using a 10 day window of previous closing prices. Try a window size anywhere from 1 to 10 and see how the model performance changes
window_size = 1
# Column index 1 is the `Close` column & Column index 0 is the 'FNG'
feature_column = 1
target_column = 1
X, y = window_data(df, window_size, feature_column, target_column)
python
split = int(0.7 * len(X))
X_train = X[: split - 1]
X_test = X[split:]
y_train = y[: split - 1]
y_test = y[split:]
python
# Use MinMaxScaler to scale the data between 0 and 1. Importing the MinMaxScaler from sklearn
from sklearn.preprocessing import MinMaxScaler
# Create a MinMaxScaler object
scaler = MinMaxScaler()
# Fit the MinMaxScaler object with the features data X
scaler.fit(X)
# Scale the features training and testing sets
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# Fit the MinMaxScaler object with the target data Y
scaler.fit(y)
# Scale the target training and testing sets
y_train = scaler.transform(y_train)
y_test = scaler.transform(y_test)
python
# Reshape the features for the model
X_train = X_train.reshape((X_train.shape[0], X_train.shape[1], 1))
X_test = X_test.reshape((X_test.shape[0], X_test.shape[1], 1))
python
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout
python
# Build the LSTM model.
model = Sequential()
number_units = 30
dropout_fraction = 0.2
# The return sequences need to be set to True if you are adding additional LSTM layers, but don't have to do this for the final layer.
# Layer 1
model.add(LSTM(
units=number_units,
return_sequences=True,
input_shape=(X_train.shape[1], 1))
)
model.add(Dropout(dropout_fraction))
# Layer 2
model.add(LSTM(units=number_units, return_sequences=True))
model.add(Dropout(dropout_fraction))
# Layer 3
model.add(LSTM(units=number_units))
model.add(Dropout(dropout_fraction))
# Output layer
model.add(Dense(1))
python
model.compile(optimizer="adam", loss="mean_squared_error")
python
# Train the model
# Use at least 10 epochs
# Do not shuffle the data
# Experiement with the batch size, but a smaller batch size is recommended
model.fit(X_train, y_train, epochs=10, shuffle=False, batch_size=5, verbose=1)
X_test
and y_test
dataX_test
data to make the predictionspython
predicted_prices = scaler.inverse_transform(predicted)
real_prices = scaler.inverse_transform(y_test.reshape(-1, 1))