File size: 4,207 Bytes
6eed325
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import os
import time
import openai
import gradio as gr
import polars as pl
from sentence_transformers import SentenceTransformer
from langchain.vectorstores.azuresearch import AzureSearch

# from langchain.chat_models import AzureChatOpenAI
# from langchain.schema import SystemMessage, HumanMessage
from dotenv import load_dotenv

load_dotenv()

openai.api_type = "azure"
openai.api_version = "2023-03-15-preview"
openai.api_key = os.getenv("OPENAI_API_KEY")
openai.api_base = os.getenv("OPENAI_API_BASE")
vector_store_address = os.getenv("VECTOR_STORE_URL")
vector_store_password = os.getenv("VECTOR_STORE_KEY")
index_name = "motor-gm-search"

df = pl.read_csv("year-make-model.csv")

years = df["year"].unique().to_list()
makes = df["make"].unique().to_list()
models = df["model"].unique().to_list()

with open("sys_prompt.txt", "r") as f:
    prompt = f.read()


def embed(message):
    return embedder.encode([message])[0]


# llm = AzureChatOpenAI(deployment_name="chatserver35turbo")
embedder = SentenceTransformer("BAAI/bge-small-en")
search = AzureSearch(
    azure_search_endpoint=vector_store_address,
    azure_search_key=vector_store_password,
    index_name=index_name,
    embedding_function=embed,
)


def filter_makes(year):
    df1 = df.filter(pl.col("year") == int(year))
    choices = sorted(df1["make"].unique().to_list())
    return gr.Dropdown.update(choices=choices, interactive=True)


def filter_models(year, make):
    df1 = df.filter(pl.col("year") == int(year))
    df1 = df1.filter(pl.col("make") == make)
    choices = sorted(df1["model"].unique().to_list())
    return gr.Dropdown.update(choices=choices, interactive=True)


def search_db(query, year, make, model, k=5, s_type="similarity"):
    filters = f"year eq {year} and make eq '{make}' and model eq '{model}'"

    res = []
    if search_type == "hybrid":
        res = search.similarity_search(query, k, search_type=s_type, filters=filters)
    else:
        mult = 1
        while len(res) < k or mult <= 16:
            res = search.similarity_search(
                query, 100 * mult, search_type=s_type, filters=filters
            )
            mult *= 2
        res = res[:k]

    results = []
    for r in res:
        results.append(
            {
                "title": r.metadata["title"],
                "content": r.page_content,
            }
        )
    return str(results)


def respond(message, history, year, make, model, search_type):
    if not year or not make or not model:
        msg = "Please select a year, make, and model."
        # return msg
        for i in range(len(msg)):
            time.sleep(0.02)
            yield msg[: i + 1]
    else:
        results = search_db(message, year, make, model, k=5, s_type=search_type)

        hist = []
        hist.append(
            {
                "role": "system",
                "content": prompt + results,
            }
        )
        hist.append(
            {
                "role": "user",
                "content": f"Year: {year}\nMake: {make}\nModel: {model}\n\n{message}",
            }
        )
        model = "chatserver35turbo16k"
        res = openai.ChatCompletion.create(
            deployment_id=model, messages=hist, temperature=0.0, stream=True
        )
        msg = ""
        # return str(res["choices"][0]["message"]["content"])
        for chunk in res:
            if "content" in chunk["choices"][0]["delta"]:
                msg = msg + chunk["choices"][0]["delta"]["content"]
                yield msg


with gr.Blocks(
    css="footer {visibility: hidden} #component-8 {height: 80vh !important} #component-9 {height: 75vh !important}"
) as app:
    with gr.Row():
        year = gr.Dropdown(years, label="Year")
        make = gr.Dropdown([], label="Make", interactive=False)
        model = gr.Dropdown([], label="Model", interactive=False)
        types = ["similarity", "hybrid"]
        search_type = gr.Dropdown(types, label="Search Type", value="hybrid")
        year.change(filter_makes, year, make)
        make.change(filter_models, [year, make], model)
    row = [year, make, model, search_type]
    gr.ChatInterface(respond, additional_inputs=row).queue()
    app.queue().launch()