diff --git a/GMF.py b/GMF.py index 8471310..4b5f66b 100644 --- a/GMF.py +++ b/GMF.py @@ -1,19 +1,22 @@ ''' +Updated on Jan 29, 2025 Created on Aug 9, 2016 - Keras Implementation of Generalized Matrix Factorization (GMF) recommender model in: He Xiangnan et al. Neural Collaborative Filtering. In WWW 2017. @author: Xiangnan He (xiangnanhe@gmail.com) +@Updated by: Amrita Yadav + ''' import numpy as np import theano.tensor as T import keras -from keras import backend as K -from keras import initializations -from keras.models import Sequential, Model, load_model, save_model -from keras.layers.core import Dense, Lambda, Activation -from keras.layers import Embedding, Input, Dense, merge, Reshape, Merge, Flatten +# from keras import initializations +from keras import initializers +from keras.initializers import RandomNormal, lecun_uniform +from keras.models import Model +from keras.layers import Dense +from keras.layers import Embedding, Input, Dense, Multiply, Flatten from keras.optimizers import Adagrad, Adam, SGD, RMSprop from keras.regularizers import l2 from Dataset import Dataset @@ -52,7 +55,7 @@ def parse_args(): return parser.parse_args() def init_normal(shape, name=None): - return initializations.normal(shape, scale=0.01, name=name) + return initializers.normal(shape, scale=0.01, name=name) def get_model(num_users, num_items, latent_dim, regs=[0,0]): # Input variables @@ -60,23 +63,23 @@ def get_model(num_users, num_items, latent_dim, regs=[0,0]): item_input = Input(shape=(1,), dtype='int32', name = 'item_input') MF_Embedding_User = Embedding(input_dim = num_users, output_dim = latent_dim, name = 'user_embedding', - init = init_normal, W_regularizer = l2(regs[0]), input_length=1) + embeddings_initializer=RandomNormal(mean=0.0, stddev=0.05), embeddings_regularizer = l2(regs[0]), input_length=1) MF_Embedding_Item = Embedding(input_dim = num_items, output_dim = latent_dim, name = 'item_embedding', - init = init_normal, W_regularizer = l2(regs[1]), input_length=1) + embeddings_initializer=RandomNormal(mean=0.0, stddev=0.05), embeddings_regularizer = l2(regs[1]), input_length=1) # Crucial to flatten an embedding vector! user_latent = Flatten()(MF_Embedding_User(user_input)) item_latent = Flatten()(MF_Embedding_Item(item_input)) # Element-wise product of user and item embeddings - predict_vector = merge([user_latent, item_latent], mode = 'mul') + predict_vector = Multiply()([user_latent, item_latent]) # Final prediction layer #prediction = Lambda(lambda x: K.sigmoid(K.sum(x)), output_shape=(1,))(predict_vector) - prediction = Dense(1, activation='sigmoid', init='lecun_uniform', name = 'prediction')(predict_vector) + prediction = Dense(1, activation='sigmoid', kernel_initializer=lecun_uniform(), name = 'prediction')(predict_vector) - model = Model(input=[user_input, item_input], - output=prediction) + model = Model(inputs=[user_input, item_input], + outputs=prediction) return model @@ -89,9 +92,9 @@ def get_train_instances(train, num_negatives): item_input.append(i) labels.append(1) # negative instances - for t in xrange(num_negatives): + for t in range(num_negatives): j = np.random.randint(num_items) - while train.has_key((u, j)): + while (u, j) in train: j = np.random.randint(num_items) user_input.append(u) item_input.append(j) @@ -112,6 +115,11 @@ def get_train_instances(train, num_negatives): topK = 10 evaluation_threads = 1 #mp.cpu_count() print("GMF arguments: %s" %(args)) + + # # When saving only the model weights, use: + # model_out_file = 'Pretrain/%s_GMF_%d_%d.weights.h5' %(args.dataset, num_factors, time()) + + # If you want to save the full model (architecture + weights) model_out_file = 'Pretrain/%s_GMF_%d_%d.h5' %(args.dataset, num_factors, time()) # Loading data @@ -125,13 +133,13 @@ def get_train_instances(train, num_negatives): # Build model model = get_model(num_users, num_items, num_factors, regs) if learner.lower() == "adagrad": - model.compile(optimizer=Adagrad(lr=learning_rate), loss='binary_crossentropy') + model.compile(optimizer=Adagrad(learning_rate=learning_rate), loss='binary_crossentropy') elif learner.lower() == "rmsprop": - model.compile(optimizer=RMSprop(lr=learning_rate), loss='binary_crossentropy') + model.compile(optimizer=RMSprop(learning_rate=learning_rate), loss='binary_crossentropy') elif learner.lower() == "adam": - model.compile(optimizer=Adam(lr=learning_rate), loss='binary_crossentropy') + model.compile(optimizer=Adam(learning_rate=learning_rate), loss='binary_crossentropy') else: - model.compile(optimizer=SGD(lr=learning_rate), loss='binary_crossentropy') + model.compile(optimizer=SGD(learning_rate=learning_rate), loss='binary_crossentropy') #print(model.summary()) # Init performance @@ -144,7 +152,7 @@ def get_train_instances(train, num_negatives): # Train model best_hr, best_ndcg, best_iter = hr, ndcg, -1 - for epoch in xrange(epochs): + for epoch in range(epochs): t1 = time() # Generate training instances user_input, item_input, labels = get_train_instances(train, num_negatives) @@ -152,7 +160,7 @@ def get_train_instances(train, num_negatives): # Training hist = model.fit([np.array(user_input), np.array(item_input)], #input np.array(labels), # labels - batch_size=batch_size, nb_epoch=1, verbose=0, shuffle=True) + batch_size=batch_size, epochs=1, verbose=0, shuffle=True) t2 = time() # Evaluation @@ -164,7 +172,11 @@ def get_train_instances(train, num_negatives): if hr > best_hr: best_hr, best_ndcg, best_iter = hr, ndcg, epoch if args.out > 0: - model.save_weights(model_out_file, overwrite=True) + # # When saving only the model weights, use: + # model.save_weights(model_out_file, overwrite=True) + + # If you want to save the full model (architecture + weights) + model.save(model_out_file) print("End. Best Iteration %d: HR = %.4f, NDCG = %.4f. " %(best_iter, best_hr, best_ndcg)) if args.out > 0: diff --git a/MLP.py b/MLP.py index 70566c7..70b77a9 100644 --- a/MLP.py +++ b/MLP.py @@ -1,23 +1,22 @@ ''' -Created on Aug 9, 2016 +Updated on Jan 29, 2025 Keras Implementation of Multi-Layer Perceptron (GMF) recommender model in: He Xiangnan et al. Neural Collaborative Filtering. In WWW 2017. @author: Xiangnan He (xiangnanhe@gmail.com) +@Updated by: Amrita Yadav ''' import numpy as np - import theano import theano.tensor as T import keras -from keras import backend as K -from keras import initializations -from keras.regularizers import l2, activity_l2 -from keras.models import Sequential, Graph, Model -from keras.layers.core import Dense, Lambda, Activation -from keras.layers import Embedding, Input, Dense, merge, Reshape, Merge, Flatten, Dropout -from keras.constraints import maxnorm +from keras import initializers +from keras.initializers import RandomNormal, lecun_uniform +from keras.regularizers import l2 +from keras.models import Model +from keras.layers import Dense +from keras.layers import Embedding, Input, Dense, Flatten, Concatenate from keras.optimizers import Adagrad, Adam, SGD, RMSprop from evaluate import evaluate_model from Dataset import Dataset @@ -54,7 +53,7 @@ def parse_args(): return parser.parse_args() def init_normal(shape, name=None): - return initializations.normal(shape, scale=0.01, name=name) + return initializers.normal(shape, scale=0.01, name=name) def get_model(num_users, num_items, layers = [20,10], reg_layers=[0,0]): assert len(layers) == len(reg_layers) @@ -63,28 +62,30 @@ def get_model(num_users, num_items, layers = [20,10], reg_layers=[0,0]): user_input = Input(shape=(1,), dtype='int32', name = 'user_input') item_input = Input(shape=(1,), dtype='int32', name = 'item_input') - MLP_Embedding_User = Embedding(input_dim = num_users, output_dim = layers[0]/2, name = 'user_embedding', - init = init_normal, W_regularizer = l2(reg_layers[0]), input_length=1) - MLP_Embedding_Item = Embedding(input_dim = num_items, output_dim = layers[0]/2, name = 'item_embedding', - init = init_normal, W_regularizer = l2(reg_layers[0]), input_length=1) + print("\n\n Dimension : ",int(layers[0]/2),layers[0]/2 ) + + MLP_Embedding_User = Embedding(input_dim = num_users, output_dim = int(layers[0]/2), name = 'user_embedding', + embeddings_initializer=RandomNormal(mean=0.0, stddev=0.05), embeddings_regularizer = l2(reg_layers[0]), input_length=1) + MLP_Embedding_Item = Embedding(input_dim = num_items, output_dim = int(layers[0]/2), name = 'item_embedding', + embeddings_initializer=RandomNormal(mean=0.0, stddev=0.05), embeddings_regularizer = l2(reg_layers[0]), input_length=1) # Crucial to flatten an embedding vector! user_latent = Flatten()(MLP_Embedding_User(user_input)) item_latent = Flatten()(MLP_Embedding_Item(item_input)) # The 0-th layer is the concatenation of embedding layers - vector = merge([user_latent, item_latent], mode = 'concat') + vector = Concatenate()([user_latent, item_latent]) # MLP layers - for idx in xrange(1, num_layer): - layer = Dense(layers[idx], W_regularizer= l2(reg_layers[idx]), activation='relu', name = 'layer%d' %idx) + for idx in range(1, num_layer): + layer = Dense(layers[idx], kernel_regularizer = l2(reg_layers[idx]), activation='relu', name = 'layer%d' %idx) vector = layer(vector) # Final prediction layer - prediction = Dense(1, activation='sigmoid', init='lecun_uniform', name = 'prediction')(vector) + prediction = Dense(1, activation='sigmoid', kernel_initializer=lecun_uniform(), name = 'prediction')(vector) - model = Model(input=[user_input, item_input], - output=prediction) + model = Model(inputs=[user_input, item_input], + outputs=prediction) return model @@ -97,9 +98,9 @@ def get_train_instances(train, num_negatives): item_input.append(i) labels.append(1) # negative instances - for t in xrange(num_negatives): + for t in range(num_negatives): j = np.random.randint(num_items) - while train.has_key((u, j)): + while (u, j) in train: j = np.random.randint(num_items) user_input.append(u) item_input.append(j) @@ -122,6 +123,12 @@ def get_train_instances(train, num_negatives): topK = 10 evaluation_threads = 1 #mp.cpu_count() print("MLP arguments: %s " %(args)) + + # # When saving only the model weights, use: + #model_out_file = 'Pretrain/%s_MLP_%s_%d.weights.h5' %(args.dataset, args.layers, time()) + + + # If you want to save the full model (architecture + weights) model_out_file = 'Pretrain/%s_MLP_%s_%d.h5' %(args.dataset, args.layers, time()) # Loading data @@ -135,13 +142,13 @@ def get_train_instances(train, num_negatives): # Build model model = get_model(num_users, num_items, layers, reg_layers) if learner.lower() == "adagrad": - model.compile(optimizer=Adagrad(lr=learning_rate), loss='binary_crossentropy') + model.compile(optimizer=Adagrad(learning_rate=learning_rate), loss='binary_crossentropy') elif learner.lower() == "rmsprop": - model.compile(optimizer=RMSprop(lr=learning_rate), loss='binary_crossentropy') + model.compile(optimizer=RMSprop(learning_rate=learning_rate), loss='binary_crossentropy') elif learner.lower() == "adam": - model.compile(optimizer=Adam(lr=learning_rate), loss='binary_crossentropy') + model.compile(optimizer=Adam(learning_rate=learning_rate), loss='binary_crossentropy') else: - model.compile(optimizer=SGD(lr=learning_rate), loss='binary_crossentropy') + model.compile(optimizer=SGD(learning_rate=learning_rate), loss='binary_crossentropy') # Check Init performance t1 = time() @@ -151,7 +158,7 @@ def get_train_instances(train, num_negatives): # Train model best_hr, best_ndcg, best_iter = hr, ndcg, -1 - for epoch in xrange(epochs): + for epoch in range(epochs): t1 = time() # Generate training instances user_input, item_input, labels = get_train_instances(train, num_negatives) @@ -159,7 +166,7 @@ def get_train_instances(train, num_negatives): # Training hist = model.fit([np.array(user_input), np.array(item_input)], #input np.array(labels), # labels - batch_size=batch_size, nb_epoch=1, verbose=0, shuffle=True) + batch_size=batch_size, epochs=1, verbose=0, shuffle=True) t2 = time() # Evaluation @@ -171,7 +178,11 @@ def get_train_instances(train, num_negatives): if hr > best_hr: best_hr, best_ndcg, best_iter = hr, ndcg, epoch if args.out > 0: - model.save_weights(model_out_file, overwrite=True) + # # When saving only the model weights, use: + # model.save_weights(model_out_file, overwrite=True) + + # If you want to save the full model (architecture + weights) + model.save(model_out_file) print("End. Best Iteration %d: HR = %.4f, NDCG = %.4f. " %(best_iter, best_hr, best_ndcg)) if args.out > 0: diff --git a/NeuMF.py b/NeuMF.py index be04d53..678aa6f 100644 --- a/NeuMF.py +++ b/NeuMF.py @@ -1,9 +1,11 @@ ''' +Updated on Jan 29, 2025 Created on Aug 9, 2016 Keras Implementation of Neural Matrix Factorization (NeuMF) recommender model in: He Xiangnan et al. Neural Collaborative Filtering. In WWW 2017. @author: Xiangnan He (xiangnanhe@gmail.com) +@Updated by: Amrita Yadav ''' import numpy as np @@ -11,11 +13,12 @@ import theano.tensor as T import keras from keras import backend as K -from keras import initializations -from keras.regularizers import l1, l2, l1l2 -from keras.models import Sequential, Model -from keras.layers.core import Dense, Lambda, Activation -from keras.layers import Embedding, Input, Dense, merge, Reshape, Merge, Flatten, Dropout +from keras.initializers import RandomNormal, lecun_uniform +from keras import initializers +from keras.regularizers import l2 +from keras.models import Model +from keras.layers import Dense +from keras.layers import Embedding, Input, Dense, Multiply, Concatenate, Flatten from keras.optimizers import Adagrad, Adam, SGD, RMSprop from evaluate import evaluate_model from Dataset import Dataset @@ -60,7 +63,7 @@ def parse_args(): return parser.parse_args() def init_normal(shape, name=None): - return initializations.normal(shape, scale=0.01, name=name) + return initializers.normal(shape, scale=0.01, name=name) def get_model(num_users, num_items, mf_dim=10, layers=[10], reg_layers=[0], reg_mf=0): assert len(layers) == len(reg_layers) @@ -71,38 +74,38 @@ def get_model(num_users, num_items, mf_dim=10, layers=[10], reg_layers=[0], reg_ # Embedding layer MF_Embedding_User = Embedding(input_dim = num_users, output_dim = mf_dim, name = 'mf_embedding_user', - init = init_normal, W_regularizer = l2(reg_mf), input_length=1) + embeddings_initializer=RandomNormal(mean=0.0, stddev=0.05), embeddings_regularizer = l2(reg_mf), input_length=1) MF_Embedding_Item = Embedding(input_dim = num_items, output_dim = mf_dim, name = 'mf_embedding_item', - init = init_normal, W_regularizer = l2(reg_mf), input_length=1) + embeddings_initializer=RandomNormal(mean=0.0, stddev=0.05), embeddings_regularizer = l2(reg_mf), input_length=1) - MLP_Embedding_User = Embedding(input_dim = num_users, output_dim = layers[0]/2, name = "mlp_embedding_user", - init = init_normal, W_regularizer = l2(reg_layers[0]), input_length=1) - MLP_Embedding_Item = Embedding(input_dim = num_items, output_dim = layers[0]/2, name = 'mlp_embedding_item', - init = init_normal, W_regularizer = l2(reg_layers[0]), input_length=1) + MLP_Embedding_User = Embedding(input_dim = num_users, output_dim = int(layers[0]/2), name = "mlp_embedding_user", + embeddings_initializer=RandomNormal(mean=0.0, stddev=0.05), embeddings_regularizer = l2(reg_layers[0]), input_length=1) + MLP_Embedding_Item = Embedding(input_dim = num_items, output_dim = int(layers[0]/2), name = 'mlp_embedding_item', + embeddings_initializer=RandomNormal(mean=0.0, stddev=0.05), embeddings_regularizer = l2(reg_layers[0]), input_length=1) # MF part mf_user_latent = Flatten()(MF_Embedding_User(user_input)) mf_item_latent = Flatten()(MF_Embedding_Item(item_input)) - mf_vector = merge([mf_user_latent, mf_item_latent], mode = 'mul') # element-wise multiply + mf_vector = Multiply()([mf_user_latent, mf_item_latent]) # element-wise multiply # MLP part mlp_user_latent = Flatten()(MLP_Embedding_User(user_input)) mlp_item_latent = Flatten()(MLP_Embedding_Item(item_input)) - mlp_vector = merge([mlp_user_latent, mlp_item_latent], mode = 'concat') - for idx in xrange(1, num_layer): - layer = Dense(layers[idx], W_regularizer= l2(reg_layers[idx]), activation='relu', name="layer%d" %idx) + mlp_vector = Concatenate()([mlp_user_latent, mlp_item_latent]) + for idx in range(1, num_layer): + layer = Dense(layers[idx], kernel_regularizer = l2(reg_layers[idx]), activation='relu', name="layer%d" %idx) mlp_vector = layer(mlp_vector) # Concatenate MF and MLP parts #mf_vector = Lambda(lambda x: x * alpha)(mf_vector) #mlp_vector = Lambda(lambda x : x * (1-alpha))(mlp_vector) - predict_vector = merge([mf_vector, mlp_vector], mode = 'concat') + predict_vector = Concatenate()([mf_vector, mlp_vector]) # Final prediction layer - prediction = Dense(1, activation='sigmoid', init='lecun_uniform', name = "prediction")(predict_vector) + prediction = Dense(1, activation='sigmoid', kernel_initializer=lecun_uniform(), name = "prediction")(predict_vector) - model = Model(input=[user_input, item_input], - output=prediction) + model = Model(inputs=[user_input, item_input], + outputs=prediction) return model @@ -120,13 +123,14 @@ def load_pretrain_model(model, gmf_model, mlp_model, num_layers): model.get_layer('mlp_embedding_item').set_weights(mlp_item_embeddings) # MLP layers - for i in xrange(1, num_layers): + for i in range(1, num_layers): mlp_layer_weights = mlp_model.get_layer('layer%d' %i).get_weights() model.get_layer('layer%d' %i).set_weights(mlp_layer_weights) # Prediction weights gmf_prediction = gmf_model.get_layer('prediction').get_weights() mlp_prediction = mlp_model.get_layer('prediction').get_weights() + new_weights = np.concatenate((gmf_prediction[0], mlp_prediction[0]), axis=0) new_b = gmf_prediction[1] + mlp_prediction[1] model.get_layer('prediction').set_weights([0.5*new_weights, 0.5*new_b]) @@ -141,9 +145,9 @@ def get_train_instances(train, num_negatives): item_input.append(i) labels.append(1) # negative instances - for t in xrange(num_negatives): + for t in range(num_negatives): j = np.random.randint(num_items) - while train.has_key((u, j)): + while (u, j) in train: j = np.random.randint(num_items) user_input.append(u) item_input.append(j) @@ -168,6 +172,11 @@ def get_train_instances(train, num_negatives): topK = 10 evaluation_threads = 1#mp.cpu_count() print("NeuMF arguments: %s " %(args)) + + # # When saving only the model weights, use: + # model_out_file = 'Pretrain/%s_NeuMF_%d_%s_%d.weights.h5' %(args.dataset, mf_dim, args.layers, time()) + + # If you want to save the full model (architecture + weights) model_out_file = 'Pretrain/%s_NeuMF_%d_%s_%d.h5' %(args.dataset, mf_dim, args.layers, time()) # Loading data @@ -181,13 +190,13 @@ def get_train_instances(train, num_negatives): # Build model model = get_model(num_users, num_items, mf_dim, layers, reg_layers, reg_mf) if learner.lower() == "adagrad": - model.compile(optimizer=Adagrad(lr=learning_rate), loss='binary_crossentropy') + model.compile(optimizer=Adagrad(learning_rate=learning_rate), loss='binary_crossentropy') elif learner.lower() == "rmsprop": - model.compile(optimizer=RMSprop(lr=learning_rate), loss='binary_crossentropy') + model.compile(optimizer=RMSprop(learning_rate=learning_rate), loss='binary_crossentropy') elif learner.lower() == "adam": - model.compile(optimizer=Adam(lr=learning_rate), loss='binary_crossentropy') + model.compile(optimizer=Adam(learning_rate=learning_rate), loss='binary_crossentropy') else: - model.compile(optimizer=SGD(lr=learning_rate), loss='binary_crossentropy') + model.compile(optimizer=SGD(learning_rate=learning_rate), loss='binary_crossentropy') # Load pretrain model if mf_pretrain != '' and mlp_pretrain != '': @@ -204,10 +213,17 @@ def get_train_instances(train, num_negatives): print('Init: HR = %.4f, NDCG = %.4f' % (hr, ndcg)) best_hr, best_ndcg, best_iter = hr, ndcg, -1 if args.out > 0: - model.save_weights(model_out_file, overwrite=True) + # # When saving only the model weights, use: + + # model.save_weights(model_out_file, overwrite=True) + + # If you want to save the full model (architecture + weights) + model.save(model_out_file) + + # Training model - for epoch in xrange(num_epochs): + for epoch in range(num_epochs): t1 = time() # Generate training instances user_input, item_input, labels = get_train_instances(train, num_negatives) @@ -215,7 +231,7 @@ def get_train_instances(train, num_negatives): # Training hist = model.fit([np.array(user_input), np.array(item_input)], #input np.array(labels), # labels - batch_size=batch_size, nb_epoch=1, verbose=0, shuffle=True) + batch_size=batch_size, epochs=1, verbose=0, shuffle=True) t2 = time() # Evaluation @@ -227,7 +243,12 @@ def get_train_instances(train, num_negatives): if hr > best_hr: best_hr, best_ndcg, best_iter = hr, ndcg, epoch if args.out > 0: - model.save_weights(model_out_file, overwrite=True) + ## When saving only the model weights, use: + + # model.save_weights(model_out_file, overwrite=True) + + # If you want to save the full model (architecture + weights) + model.save(model_out_file) print("End. Best Iteration %d: HR = %.4f, NDCG = %.4f. " %(best_iter, best_hr, best_ndcg)) if args.out > 0: diff --git a/README.md b/README.md index eabf9d3..f27cb9a 100644 --- a/README.md +++ b/README.md @@ -11,10 +11,22 @@ Three collaborative filtering models: Generalized Matrix Factorization (GMF), Mu Author: Dr. Xiangnan He (http://www.comp.nus.edu.sg/~xiangnan/) ## Environment Settings -We use Keras with Theano as the backend. -- Keras version: '1.0.7' -- Theano version: '0.8.0' +I have updated the code to the latest Python version: Python 3.9.21. +You can create a Python environment using: +``` +python3.9 -m venv env_name +``` +The Updates utilizes Keras and TensorFlow with Theano as the backend: + +- keras==3.8.0 +- tensorflow==2.18.0 +- Theano-PyMC==1.1.2 +To install the required dependencies, run: + +``` +pip install -r requirements.txt +``` ## Example to run the codes. The instruction of commands has been clearly stated in the codes (see the parse_args function). diff --git a/evaluate.py b/evaluate.py index 729f07a..432846e 100644 --- a/evaluate.py +++ b/evaluate.py @@ -44,7 +44,7 @@ def evaluate_model(model, testRatings, testNegatives, K, num_thread): ndcgs = [r[1] for r in res] return (hits, ndcgs) # Single thread - for idx in xrange(len(_testRatings)): + for idx in range(len(_testRatings)): (hr,ndcg) = eval_one_rating(idx) hits.append(hr) ndcgs.append(ndcg) @@ -61,7 +61,7 @@ def eval_one_rating(idx): users = np.full(len(items), u, dtype = 'int32') predictions = _model.predict([users, np.array(items)], batch_size=100, verbose=0) - for i in xrange(len(items)): + for i in range(len(items)): item = items[i] map_item_score[item] = predictions[i] items.pop() @@ -79,7 +79,7 @@ def getHitRatio(ranklist, gtItem): return 0 def getNDCG(ranklist, gtItem): - for i in xrange(len(ranklist)): + for i in range(len(ranklist)): item = ranklist[i] if item == gtItem: return math.log(2) / math.log(i+2) diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..a807bb9 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,5 @@ +keras==3.8.0 +numpy==2.0.2 +scipy==1.13.1 +tensorflow==2.18.0 +Theano-PyMC==1.1.2 \ No newline at end of file