# Importing required libraries import pandas as pd import numpy as np import streamlit as st from sentence_transformers import SentenceTransformer, util st.title("Semantic-Search-Transformer") # Importing the Data df = pd.read_csv('medium_articles.csv') # Downloading the sentence transformer model embedder = SentenceTransformer('all-MiniLM-L6-v2') #Predictions # User-Test function (prediction_script.py) # load saved model all_embeddings = np.load('mediumArticle_embeddings.npy') # Function def prediction(query,top_k,corpus_embeddings,df): query_embedding = embedder.encode(query, convert_to_tensor=True) hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=top_k) hits = hits[0] # Get the hits for the first query print(f"\nTop {top_k} most similar sentences in corpus:") for hit in hits: hit_id = hit['corpus_id'] article_data = df.iloc[hit_id] title = article_data["title"] st.write("-", title, "(Score: {:.4f})".format(hit['score'])) query = st.text_input('Enter your query here','Artificial Intelligence') # query = input("Enter the Input Query:- ") # top_sent = int(input("Enter the number of similarity sentences you want: ")) top_k = st.number_input('How many results do you want to see?',min_value= 2) if st.button("Search"): prediction(query,top_k,all_embeddings,df)