MARI-posa commited on
Commit
c1b3993
1 Parent(s): 7f410f4

Update stri.py

Browse files
Files changed (1) hide show
  1. stri.py +18 -12
stri.py CHANGED
@@ -2,6 +2,7 @@ import streamlit as st
2
  import torch
3
  import numpy as np
4
  import pandas as pd
 
5
  from transformers import AutoTokenizer, AutoModel
6
  import re
7
  import pickle
@@ -17,7 +18,6 @@ model = AutoModel.from_pretrained(model_name, output_hidden_states=True)
17
  books = pd.read_csv('books_6000.csv')
18
  books.dropna(inplace=True)
19
 
20
-
21
  books = books[books['annotation'].apply(lambda x: len(x.split()) >= 10)]
22
  books.drop_duplicates(subset='title', keep='first', inplace=True)
23
  books = books.reset_index(drop=True)
@@ -42,24 +42,24 @@ max_len = 128
42
  # Определение запроса пользователя
43
  query = st.text_input("Введите запрос")
44
 
45
- if st.button('**Generating recommendations**'):
46
  with open("book_embeddings.pkl", "rb") as f:
47
  book_embeddings = pickle.load(f)
48
-
49
  query_tokens = tokenizer.encode(query, add_special_tokens=True,
50
  truncation=True, max_length=max_len)
51
-
52
  query_padded = np.array(query_tokens + [0] * (max_len - len(query_tokens)))
53
  query_mask = np.where(query_padded != 0, 1, 0)
54
-
55
  # Переведем numpy массивы в тензоры PyTorch
56
  query_padded = torch.tensor(query_padded, dtype=torch.long)
57
  query_mask = torch.tensor(query_mask, dtype=torch.long)
58
-
59
  with torch.no_grad():
60
  query_embedding = model(query_padded.unsqueeze(0), query_mask.unsqueeze(0))
61
- query_embedding = query_embedding[0][:, 0, :]
62
-
63
  # Вычисление косинусного расстояния между эмбеддингом запроса и каждой аннотацией
64
  cosine_similarities = torch.nn.functional.cosine_similarity(
65
  query_embedding.squeeze(0),
@@ -67,8 +67,14 @@ if st.button('**Generating recommendations**'):
67
  )
68
 
69
  cosine_similarities = cosine_similarities.numpy()
70
-
71
  indices = np.argsort(cosine_similarities)[::-1] # Сортировка по убыванию
72
-
73
- for i in indices[:10]:
74
- st.write(books['title'][i])
 
 
 
 
 
 
 
2
  import torch
3
  import numpy as np
4
  import pandas as pd
5
+ from PIL import Image
6
  from transformers import AutoTokenizer, AutoModel
7
  import re
8
  import pickle
 
18
  books = pd.read_csv('books_6000.csv')
19
  books.dropna(inplace=True)
20
 
 
21
  books = books[books['annotation'].apply(lambda x: len(x.split()) >= 10)]
22
  books.drop_duplicates(subset='title', keep='first', inplace=True)
23
  books = books.reset_index(drop=True)
 
42
  # Определение запроса пользователя
43
  query = st.text_input("Введите запрос")
44
 
45
+ if st.button('Сгенерировать'):
46
  with open("book_embeddings.pkl", "rb") as f:
47
  book_embeddings = pickle.load(f)
48
+
49
  query_tokens = tokenizer.encode(query, add_special_tokens=True,
50
  truncation=True, max_length=max_len)
51
+
52
  query_padded = np.array(query_tokens + [0] * (max_len - len(query_tokens)))
53
  query_mask = np.where(query_padded != 0, 1, 0)
54
+
55
  # Переведем numpy массивы в тензоры PyTorch
56
  query_padded = torch.tensor(query_padded, dtype=torch.long)
57
  query_mask = torch.tensor(query_mask, dtype=torch.long)
58
+
59
  with torch.no_grad():
60
  query_embedding = model(query_padded.unsqueeze(0), query_mask.unsqueeze(0))
61
+ query_embedding = query_embedding[0][:, 0, :]
62
+
63
  # Вычисление косинусного расстояния между эмбеддингом запроса и каждой аннотацией
64
  cosine_similarities = torch.nn.functional.cosine_similarity(
65
  query_embedding.squeeze(0),
 
67
  )
68
 
69
  cosine_similarities = cosine_similarities.numpy()
70
+
71
  indices = np.argsort(cosine_similarities)[::-1] # Сортировка по убыванию
72
+
73
+ num_books_per_page = st.selectbox("Количество книг на странице:", [3, 5, 10], index=0)
74
+
75
+ for i in indices[:num_books_per_page]:
76
+ st.write("## " + books['title'][i])
77
+ st.write("**Автор:**", books['author'][i])
78
+ st.write("**Аннотация:**", books['annotation'][i])
79
+ st.image(Image.open(books['image_url'][i]))
80
+ st.write("---")