Spaces:
Sleeping
Sleeping
# app.py | |
import torch | |
import pickle | |
import gradio as gr | |
from model import NCFModel | |
import numpy as np | |
# Load encoders and user_positive_items | |
with open('user_encoder.pkl', 'rb') as f: | |
user_encoder = pickle.load(f) | |
with open('item_encoder.pkl', 'rb') as f: | |
item_encoder = pickle.load(f) | |
with open('user_positive_items.pkl', 'rb') as f: | |
user_positive_items = pickle.load(f) | |
# Load the trained model | |
class NCFModelWrapper: | |
def __init__(self, model_path, num_users, num_items, embedding_size=50, device='cpu'): | |
self.device = torch.device(device) | |
self.model = NCFModel(num_users, num_items, embedding_size=embedding_size).to(self.device) | |
self.model.load_state_dict(torch.load(model_path, map_location=self.device)) | |
self.model.eval() | |
def predict(self, user, item): | |
with torch.no_grad(): | |
user = torch.tensor([user], dtype=torch.long).to(self.device) | |
item = torch.tensor([item], dtype=torch.long).to(self.device) | |
output = self.model(user, item) | |
score = torch.sigmoid(output).item() | |
return score | |
# Determine number of users and items from encoders | |
num_users = len(user_encoder.classes_) | |
num_items = len(item_encoder.classes_) | |
# Initialize the model | |
model = NCFModelWrapper( | |
model_path='best_ncf_model.pth', | |
num_users=num_users, | |
num_items=num_items, | |
embedding_size=50, # Ensure this matches your trained model | |
device='cpu' # Change to 'cuda' if GPU is available and desired | |
) | |
def recommend(user_id, num_recommendations=5): | |
try: | |
user = user_encoder.transform([user_id])[0] | |
except: | |
return f"User ID '{user_id}' not found." | |
# Get items the user has interacted with | |
pos_items = user_positive_items.get(user, set()) | |
# Get all possible items | |
all_items = set(range(num_items)) | |
# Candidate items are those not interacted with | |
candidate_items = list(all_items - pos_items) | |
# Predict scores for candidate items | |
scores = [] | |
for item in candidate_items: | |
score = model.predict(user, item) | |
scores.append((item, score)) | |
# Sort items based on score | |
scores.sort(key=lambda x: x[1], reverse=True) | |
# Get top N recommendations | |
top_items = scores[:num_recommendations] | |
recommendations = [] | |
for item_id, score in top_items: | |
original_item_id = item_encoder.inverse_transform([item_id])[0] | |
recommendations.append(f"Item ID: {original_item_id} (Score: {score:.4f})") | |
return "\n".join(recommendations) | |
# Define Gradio interface | |
iface = gr.Interface( | |
fn=recommend, | |
inputs=[ | |
gr.Textbox(lines=1, placeholder="Enter User ID", label="User ID"), | |
gr.Slider(minimum=1, maximum=20, step=1, value=5, label="Number of Recommendations") | |
], | |
outputs="text", | |
title="Neural Collaborative Filtering Recommendation System", | |
description="Enter a User ID to receive personalized item recommendations.", | |
examples=[ | |
["user_1", 5], | |
["user_2", 10], | |
["user_3", 7] | |
] | |
) | |
if __name__ == "__main__": | |
iface.launch() |