Spaces:
Sleeping
Sleeping
updated
Browse files
app.py
CHANGED
@@ -1,5 +1,3 @@
|
|
1 |
-
# app.py
|
2 |
-
|
3 |
import torch
|
4 |
import pickle
|
5 |
import gradio as gr
|
@@ -16,92 +14,64 @@ with open('item_encoder.pkl', 'rb') as f:
|
|
16 |
with open('user_positive_items.pkl', 'rb') as f:
|
17 |
user_positive_items = pickle.load(f)
|
18 |
|
19 |
-
# Load the trained model
|
20 |
-
class NCFModelWrapper:
|
21 |
-
def __init__(self, model_path, num_users, num_items, embedding_size=50, device='cpu'):
|
22 |
-
self.device = torch.device(device)
|
23 |
-
self.model = NCFModel(num_users, num_items, embedding_size=embedding_size).to(self.device)
|
24 |
-
self.model.load_state_dict(torch.load(model_path, map_location=self.device))
|
25 |
-
self.model.eval()
|
26 |
-
|
27 |
-
def predict(self, user, item):
|
28 |
-
with torch.no_grad():
|
29 |
-
user = torch.tensor([user], dtype=torch.long).to(self.device)
|
30 |
-
item = torch.tensor([item], dtype=torch.long).to(self.device)
|
31 |
-
output = self.model(user, item)
|
32 |
-
score = torch.sigmoid(output).item()
|
33 |
-
return score
|
34 |
-
|
35 |
-
# Determine number of users and items from encoders
|
36 |
-
num_users = len(user_encoder.classes_)
|
37 |
-
num_items = len(item_encoder.classes_)
|
38 |
-
|
39 |
-
# Initialize the model
|
40 |
-
model = NCFModelWrapper(
|
41 |
-
model_path='best_ncf_model.pth',
|
42 |
-
num_users=num_users,
|
43 |
-
num_items=num_items,
|
44 |
-
embedding_size=50, # Ensure this matches your trained model
|
45 |
-
device='cpu' # Change to 'cuda' if GPU is available and desired
|
46 |
-
)
|
47 |
|
48 |
def recommend(user_id, num_recommendations=5):
|
49 |
"""
|
50 |
Given a user ID, recommend top N items.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
"""
|
|
|
52 |
try:
|
53 |
user = user_encoder.transform([user_id])[0]
|
54 |
-
except:
|
55 |
-
return f"User ID '{user_id}'
|
56 |
-
|
57 |
# Get items the user has interacted with
|
58 |
pos_items = user_positive_items.get(user, set())
|
59 |
-
|
60 |
# Get all possible items
|
61 |
-
all_items = set(range(
|
62 |
-
|
63 |
# Candidate items are those not interacted with
|
64 |
candidate_items = list(all_items - pos_items)
|
65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
# Predict scores for candidate items
|
67 |
scores = []
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
|
|
|
|
72 |
# Sort items based on score
|
73 |
scores.sort(key=lambda x: x[1], reverse=True)
|
74 |
-
|
75 |
# Get top N recommendations
|
76 |
top_items = scores[:num_recommendations]
|
77 |
recommendations = []
|
78 |
for item_id, score in top_items:
|
79 |
original_item_id = item_encoder.inverse_transform([item_id])[0]
|
80 |
recommendations.append(f"Item ID: {original_item_id} (Score: {score:.4f})")
|
81 |
-
|
82 |
return "\n".join(recommendations)
|
83 |
|
84 |
-
# Define your recommendation function
|
85 |
-
def recommend(user_id, num_recommendations):
|
86 |
-
# Your recommendation logic here
|
87 |
-
return f"Recommended {num_recommendations} items for User ID: {user_id}"
|
88 |
|
89 |
# Define Gradio interface
|
90 |
iface = gr.Interface(
|
91 |
fn=recommend,
|
92 |
inputs=[
|
93 |
-
gr.Textbox(lines=1, placeholder="Enter User ID", label="User ID
|
94 |
-
gr.Slider(minimum=1, maximum=20, step=1, value=5, label="Number of Recommendations") # 'value' instead of 'default'
|
95 |
-
],
|
96 |
-
outputs="text",
|
97 |
-
title="Neural Collaborative Filtering Recommendation System",
|
98 |
-
description="Enter a User ID to receive personalized item recommendations.",
|
99 |
-
examples=[
|
100 |
-
["user_1", 5],
|
101 |
-
["user_2", 10],
|
102 |
-
["user_3", 7]
|
103 |
-
]
|
104 |
-
)
|
105 |
-
|
106 |
-
if __name__ == "__main__":
|
107 |
-
iface.launch()
|
|
|
|
|
|
|
1 |
import torch
|
2 |
import pickle
|
3 |
import gradio as gr
|
|
|
14 |
with open('user_positive_items.pkl', 'rb') as f:
|
15 |
user_positive_items = pickle.load(f)
|
16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
def recommend(user_id, num_recommendations=5):
|
19 |
"""
|
20 |
Given a user ID, recommend top N items.
|
21 |
+
|
22 |
+
Args:
|
23 |
+
user_id (str): User ID for recommendation.
|
24 |
+
num_recommendations (int, optional): Number of recommendations to return. Defaults to 5.
|
25 |
+
|
26 |
+
Returns:
|
27 |
+
str: Top N recommendations for the user or an error message if the user ID is invalid.
|
28 |
"""
|
29 |
+
|
30 |
try:
|
31 |
user = user_encoder.transform([user_id])[0]
|
32 |
+
except ValueError:
|
33 |
+
return f"Invalid User ID: '{user_id}'"
|
34 |
+
|
35 |
# Get items the user has interacted with
|
36 |
pos_items = user_positive_items.get(user, set())
|
37 |
+
|
38 |
# Get all possible items
|
39 |
+
all_items = set(range(len(item_encoder.classes_)))
|
40 |
+
|
41 |
# Candidate items are those not interacted with
|
42 |
candidate_items = list(all_items - pos_items)
|
43 |
+
|
44 |
+
# Load and use the model
|
45 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
46 |
+
model = NCFModel(num_users=len(user_encoder.classes_),
|
47 |
+
num_items=len(item_encoder.classes_),
|
48 |
+
embedding_size=50).to(device)
|
49 |
+
model.load_state_dict(torch.load('best_ncf_model.pth', map_location=device))
|
50 |
+
model.eval()
|
51 |
+
|
52 |
# Predict scores for candidate items
|
53 |
scores = []
|
54 |
+
with torch.no_grad():
|
55 |
+
for item in candidate_items:
|
56 |
+
score = torch.sigmoid(model(torch.tensor([user]).to(device),
|
57 |
+
torch.tensor([item]).to(device))).item()
|
58 |
+
scores.append((item, score))
|
59 |
+
|
60 |
# Sort items based on score
|
61 |
scores.sort(key=lambda x: x[1], reverse=True)
|
62 |
+
|
63 |
# Get top N recommendations
|
64 |
top_items = scores[:num_recommendations]
|
65 |
recommendations = []
|
66 |
for item_id, score in top_items:
|
67 |
original_item_id = item_encoder.inverse_transform([item_id])[0]
|
68 |
recommendations.append(f"Item ID: {original_item_id} (Score: {score:.4f})")
|
69 |
+
|
70 |
return "\n".join(recommendations)
|
71 |
|
|
|
|
|
|
|
|
|
72 |
|
73 |
# Define Gradio interface
|
74 |
iface = gr.Interface(
|
75 |
fn=recommend,
|
76 |
inputs=[
|
77 |
+
gr.Textbox(lines=1, placeholder="Enter User ID", label="User ID
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|