Skip to content

Commit

Permalink
update
Browse files Browse the repository at this point in the history
  • Loading branch information
MuslemRahimi committed Jun 5, 2024
1 parent 4a8500e commit 7a2925a
Show file tree
Hide file tree
Showing 4 changed files with 26 additions and 14 deletions.
5 changes: 4 additions & 1 deletion app/cron_congress_trading.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,9 +88,12 @@ def replace_representative(office):
'Vance, J.D. (Senator)': 'James Vance',
'Neal Patrick MD, Facs Dunn': 'Neal Dunn',
'Neal Patrick MD, Facs Dunn (Senator)': 'Neal Dunn',
'Neal Patrick Dunn, MD, FACS': 'Neal Dunn',
'Neal P. Dunn': 'Neal Dunn',
'Tillis, Thom (Senator)': 'Thom Tillis',
'W. Gregory Steube': 'Greg Steube',
'W. Grego Steube': 'Greg Steube',
'W. Greg Steube': 'Greg Steube',
'David David Madison Cawthorn': 'David Madison Cawthorn',
'Blunt, Roy (Senator)': 'Roy Blunt',
'Thune, John (Senator)': 'John Thune',
Expand Down Expand Up @@ -303,7 +306,7 @@ async def run():

total_symbols = crypto_symbols +etf_symbols + stock_symbols
total_raw_data = stock_raw_data + etf_raw_data + crypto_raw_data
chunk_size = 250
chunk_size = 500
politician_list = []

except Exception as e:
Expand Down
19 changes: 12 additions & 7 deletions app/ml_models/fundamental_predictor.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,10 @@ async def download_data(ticker, con, start_date, end_date):
income = ujson.loads(query_df['income'].iloc[0])

#Only consider company with at least 10 year worth of data
'''
if len(income) < 40:
raise ValueError("Income data length is too small.")
'''

income = [{k: v for k, v in item.items() if k not in ["symbol","reportedCurrency","calendarYear","fillingDate","acceptedDate","period","cik","link", "finalLink"]} for item in income if int(item["date"][:4]) >= 2000]
income_growth = ujson.loads(query_df['income_growth'].iloc[0])
Expand Down Expand Up @@ -109,11 +111,11 @@ async def download_data(ticker, con, start_date, end_date):
combined_data = sorted(combined_data, key=lambda x: x['date'])


df_income = pd.DataFrame(combined_data).dropna()
df_combined = pd.DataFrame(combined_data).dropna()

df_income['Target'] = ((df_income['price'].shift(-1) - df_income['price']) / df_income['price'] > 0).astype(int)
df_combined['Target'] = ((df_combined['price'].shift(-1) - df_combined['price']) / df_combined['price'] > 0).astype(int)

df_copy = df_income.copy()
df_copy = df_combined.copy()

return df_copy

Expand Down Expand Up @@ -208,16 +210,17 @@ def evaluate_model(self, X_test, y_test):
return {'accuracy': round(test_accuracy*100), 'precision': round(test_precision*100), 'sentiment': 'Bullish' if next_value_prediction == 1 else 'Bearish'}, test_predictions

def feature_selection(self, X_train, y_train,k=8):
'''

selector = SelectKBest(score_func=f_classif, k=8)
selector.fit(X_train, y_train)

selector.transform(X_train)
selected_features = [col for i, col in enumerate(X_train.columns) if selector.get_support()[i]]

return selected_features
'''

# Calculate the variance of each feature with respect to the target
'''
variances = {}
for col in X_train.columns:
grouped_variance = X_train.groupby(y_train)[col].var().mean()
Expand All @@ -226,6 +229,7 @@ def feature_selection(self, X_train, y_train,k=8):
# Sort features by variance and select top k features
sorted_features = sorted(variances, key=variances.get, reverse=True)[:k]
return sorted_features
'''

#Train mode
async def train_process(tickers, con):
Expand Down Expand Up @@ -272,7 +276,7 @@ async def test_process(con):
start_date = datetime(2000, 1, 1).strftime("%Y-%m-%d")
end_date = datetime.today().strftime("%Y-%m-%d")
predictor = FundamentalPredictor()
df = await download_data('GME', con, start_date, end_date)
df = await download_data('RDDT', con, start_date, end_date)
split_size = int(len(df) * (1-test_size))
test_data = df.iloc[split_size:]
#selected_features = [col for col in test_data if col not in ['price','date','Target']]
Expand All @@ -283,8 +287,9 @@ async def main():
con = sqlite3.connect('../stocks.db')
cursor = con.cursor()
cursor.execute("PRAGMA journal_mode = wal")
cursor.execute("SELECT DISTINCT symbol FROM stocks WHERE marketCap >= 500E9")
cursor.execute("SELECT DISTINCT symbol FROM stocks WHERE marketCap >= 100E9")
stock_symbols = [row[0] for row in cursor.fetchall()]
print('Number of Stocks')
print(len(stock_symbols))
await train_process(stock_symbols, con)
await test_process(con)
Expand Down
4 changes: 4 additions & 0 deletions app/restart_json.py
Original file line number Diff line number Diff line change
Expand Up @@ -491,6 +491,7 @@ async def get_delisted_list():




def replace_representative(office):
replacements = {
'Carper, Thomas R. (Senator)': 'Tom Carper',
Expand Down Expand Up @@ -541,9 +542,12 @@ def replace_representative(office):
'Vance, J.D. (Senator)': 'James Vance',
'Neal Patrick MD, Facs Dunn': 'Neal Dunn',
'Neal Patrick MD, Facs Dunn (Senator)': 'Neal Dunn',
'Neal Patrick Dunn, MD, FACS': 'Neal Dunn',
'Neal P. Dunn': 'Neal Dunn',
'Tillis, Thom (Senator)': 'Thom Tillis',
'W. Gregory Steube': 'Greg Steube',
'W. Grego Steube': 'Greg Steube',
'W. Greg Steube': 'Greg Steube',
'David David Madison Cawthorn': 'David Madison Cawthorn',
'Blunt, Roy (Senator)': 'Roy Blunt',
'Thune, John (Senator)': 'John Thune',
Expand Down
12 changes: 6 additions & 6 deletions fastify/get-post/server.js
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,8 @@ module.exports = function (fastify, opts, done) {
filter += `&& created >= "${startDateStr}" && created <= "${endDateStr}" && pinned = false`
}

posts = (await pb.collection('posts').getList(data?.startPage, 50, {
sort: sort,
posts = (await pb.collection('posts').getList(data?.startPage, 5, {
sort: '-created',
filter: filter,
expand: 'user,comments(post),alreadyVoted(post)',
fields: "*,expand.user,expand.comments(post), expand.alreadyVoted(post).user,expand.alreadyVoted(post).type"
Expand All @@ -99,7 +99,7 @@ module.exports = function (fastify, opts, done) {

if (data?.userId) {

posts = (await pb.collection('posts').getList(data?.startPage, 10, {
posts = (await pb.collection('posts').getList(data?.startPage, 5, {
sort: sort,
filter: `user="${data?.userId}" && pinned=false`,
expand: `user,comments(post),alreadyVoted(post)`,
Expand All @@ -111,7 +111,7 @@ module.exports = function (fastify, opts, done) {

else if (data?.filterTicker) {

posts = await pb.collection('posts').getList(data?.startPage, 10, {
posts = await pb.collection('posts').getList(data?.startPage, 5, {
sort: sort,
filter: `tagline="${data?.filterTicker}" && pinned=false`,
expand: `user,comments(post),alreadyVoted(post)`,
Expand Down Expand Up @@ -141,8 +141,8 @@ module.exports = function (fastify, opts, done) {
else {
filter = `pinned=false`;
}
posts = await pb.collection('posts').getList(data?.startPage, 50, {
sort: sort,
posts = await pb.collection('posts').getList(1, 5, {
sort: '-created',
filter: filter,
expand: 'user, comments(post), alreadyVoted(post)',
fields: "*,expand.user,expand.comments(post), expand.alreadyVoted(post).user,expand.alreadyVoted(post).type"
Expand Down

0 comments on commit 7a2925a

Please sign in to comment.