isayahc commited on
Commit
0a19530
1 Parent(s): b8bc54c

Upload 4 files

Browse files
Files changed (4) hide show
  1. app.py +114 -0
  2. create_chain.py +80 -0
  3. prompt.py +81 -0
  4. requirements.txt +88 -0
app.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ from langchain import LLMChain
4
+ from langchain import PromptTemplate
5
+ from langchain.llms import Cohere
6
+
7
+ # from create_chain import chain as llm_chain
8
+ from create_chain import create_chain_from_template
9
+ from prompt import wikipedia_template, general_internet_template
10
+ from langchain.retrievers import CohereRagRetriever
11
+ from langchain.chat_models import ChatCohere
12
+
13
+ import os
14
+ from dotenv import load_dotenv
15
+
16
+
17
+
18
+ load_dotenv() # take environment variables from .env.
19
+ # https://pypi.org/project/python-dotenv/
20
+
21
+ COHERE_API_KEY = os.getenv("COHERE_API_KEY")
22
+
23
+
24
+ examples = [
25
+ ["What is Cellular Automata and who created it?"],
26
+ ["What is Cohere"],
27
+ ["who is Katherine Johnson"],
28
+ ]
29
+
30
+ def create_UI(llm_chain):
31
+ with gr.Blocks() as demo:
32
+ # radio = gr.Radio(
33
+ # ["wikipedia only", "any website", "none"], label="What kind of essay would you like to write?", value="wikipedia only"
34
+ # )
35
+ radio = gr.Radio(
36
+ ["wikipedia only", "any website", ], label="What kind of essay would you like to write?", value="wikipedia only"
37
+ )
38
+
39
+
40
+ chatbot = gr.Chatbot()
41
+ msg = gr.Textbox(info="Enter your question here, press enter to submit query")
42
+ clear = gr.Button("Clear")
43
+ # submit_btn = gr.Button("Submit", variant="primary")
44
+
45
+ gr.Examples(examples=examples, label="Examples", inputs=msg,)
46
+
47
+
48
+ def user(user_message, history):
49
+ return "", history + [[user_message, None]]
50
+
51
+ def bot(history):
52
+ print("Question: ", history[-1][0])
53
+ bot_message = llm_chain.invoke(history[-1][0])
54
+
55
+ bot_message = bot_message
56
+ print("Response: ", bot_message)
57
+ history[-1][1] = ""
58
+ history[-1][1] += bot_message
59
+ return history
60
+
61
+ def change_textbox(choice):
62
+ if choice == "wikipedia only":
63
+ template = wikipedia_template
64
+ llm_chain = create_chain_from_template(
65
+ template,
66
+ rag,
67
+ llm_model
68
+ )
69
+ return llm_chain
70
+ elif choice == "any website":
71
+ template = general_internet_template
72
+ llm_chain = create_chain_from_template(
73
+ template,
74
+ rag,
75
+ llm_model
76
+ )
77
+ return llm_chain
78
+ elif choice == "none":
79
+ submit_btn = gr.Button("Submit", variant="primary")
80
+ return gr.Textbox(lines=8, visible=True, value="Lorem ipsum dolor sit amet"), gr.Button("Submit", variant="primary")
81
+ else:
82
+ return gr.Textbox(visible=False), gr.Button(interactive=False)
83
+
84
+ text = gr.Textbox(lines=2, interactive=True, show_copy_button=True)
85
+ # radio.change(fn=change_textbox, inputs=radio, outputs=[text, submit_btn])
86
+ radio.change(fn=change_textbox, inputs=radio, outputs=[text])
87
+ msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(bot, chatbot, chatbot)
88
+ clear.click(lambda: None, None, chatbot, queue=False)
89
+ return demo
90
+
91
+
92
+ if __name__ == "__main__":
93
+ template = wikipedia_template
94
+ prompt = PromptTemplate(template=template, input_variables=["query"])
95
+
96
+
97
+ llm_model = ChatCohere(
98
+ cohere_api_key=COHERE_API_KEY,
99
+ )
100
+
101
+ rag = CohereRagRetriever(llm=llm_model,)
102
+
103
+
104
+ llm_chain = create_chain_from_template(
105
+ template,
106
+ rag,
107
+ llm_model
108
+ )
109
+
110
+ demo = create_UI(llm_chain)
111
+ demo.queue()
112
+ # demo.launch()
113
+ demo.launch(share=True)
114
+ # pass
create_chain.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from langchain.chat_models import ChatCohere
4
+ from langchain.schema import AIMessage, HumanMessage
5
+
6
+
7
+ ## cohere with connector
8
+ ## cohere with internet
9
+
10
+ # https://python.langchain.com/docs/modules/data_connection/retrievers/
11
+ # https://python.langchain.com/docs/integrations/llms/cohere
12
+
13
+ from langchain.chat_models import ChatCohere
14
+ from langchain.retrievers import CohereRagRetriever
15
+ from langchain.schema.document import Document
16
+
17
+ from langchain.chains import LLMChain
18
+ from langchain.prompts import PromptTemplate
19
+
20
+
21
+ from langchain.chat_models import ChatOpenAI
22
+ from langchain.prompts import ChatPromptTemplate
23
+ from langchain.schema import StrOutputParser
24
+ from langchain.schema.runnable import RunnablePassthrough
25
+
26
+
27
+ from langchain.prompts import (
28
+ ChatPromptTemplate,
29
+ MessagesPlaceholder,
30
+ SystemMessagePromptTemplate,
31
+ HumanMessagePromptTemplate,
32
+ )
33
+ from dotenv import load_dotenv
34
+
35
+ from prompt import wikipedia_template, general_internet_template
36
+
37
+ load_dotenv() # take environment variables from .env.
38
+ # https://pypi.org/project/python-dotenv/
39
+
40
+ COHERE_API_KEY = os.getenv("COHERE_API_KEY")
41
+
42
+ def format_docs(docs):
43
+ return "\n\n".join([d.page_content for d in docs])
44
+
45
+
46
+ def create_chain_from_template(template, retriever, model):
47
+ prompt = PromptTemplate(template=template, input_variables=["query"])
48
+ chain = (
49
+ {"context": retriever | format_docs, "query": RunnablePassthrough()}
50
+ | prompt
51
+ | model
52
+ | StrOutputParser()
53
+ )
54
+ return chain
55
+
56
+
57
+
58
+ if __name__ == "__main__":
59
+
60
+
61
+ llm_model = ChatCohere(
62
+ cohere_api_key=COHERE_API_KEY,
63
+ )
64
+
65
+ template = wikipedia_template
66
+ prompt = PromptTemplate(template=template, input_variables=["query"])
67
+
68
+ rag = CohereRagRetriever(llm=llm_model,)
69
+
70
+ llm_chain = create_chain_from_template(
71
+ template,
72
+ rag,
73
+ llm_model
74
+ )
75
+
76
+ sample_query = "What is Cellular Automata and who created it?"
77
+ sample_output = llm_chain.invoke(sample_query)
78
+ print(sample_output)
79
+
80
+
prompt.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wikipedia_template = """Question: {query}
2
+
3
+ Please only use wikipedia when searching for the answer.
4
+
5
+
6
+
7
+
8
+ When given a query you must generate a wikipedia article based on the query given;
9
+ You must oranization your article into sections just like in wikipedia
10
+ The structure is open ended however you must write this article in markdown;
11
+ Also you must have a reference section at the end with a list of all your refernces;
12
+ If you are unsure about the exact person the user is refering to please ask questions;
13
+
14
+ For the sake of clarity please add new lines between your inital output and the
15
+ generated wikipedia article
16
+
17
+
18
+ If there are many pages for a similar person or entity please as
19
+ the user to specify which one they are talking about before geenrating the article
20
+
21
+ Please make sure to include in-line citations
22
+
23
+ for example:
24
+ fact_1 [source_1]
25
+ fact_2 [source_2, source_3]
26
+ Answer:
27
+ """
28
+
29
+ # general_internet_template = """Question: {query}
30
+
31
+ # Please only use {website_list} when searching for the answer.
32
+
33
+
34
+ # When given a query you must generate a wikipedia article based on the query given;
35
+ # You must oranization your article into sections just like in wikipedia
36
+ # The structure is open ended however you must write this article in markdown;
37
+ # Also you must have a reference section at the end with a list of all your refernces;
38
+ # If you are unsure about the exact person the user is refering to please ask questions;
39
+
40
+ # For the sake of clarity please add new lines between your inital output and the
41
+ # generated wikipedia article
42
+
43
+
44
+ # If there are many pages for a similar person or entity please as
45
+ # the user to specify which one they are talking about before geenrating the article
46
+
47
+ # Please make sure to include in-line citations
48
+
49
+ # for example:
50
+ # fact_1 [source_1]
51
+ # fact_2 [source_2, source_3]
52
+ # Answer:
53
+ # """
54
+
55
+
56
+ general_internet_template = """Question: {query}
57
+
58
+ Use any website so needed to help the user.
59
+
60
+
61
+
62
+ When given a query you must generate a wikipedia article based on the query given;
63
+ You must oranization your article into sections just like in wikipedia
64
+ The structure is open ended however you must write this article in markdown;
65
+ Also you must have a reference section at the end with a list of all your refernces;
66
+ If you are unsure about the exact person the user is refering to please ask questions;
67
+
68
+ For the sake of clarity please add new lines between your inital output and the
69
+ generated wikipedia article
70
+
71
+
72
+ If there are many pages for a similar person or entity please as
73
+ the user to specify which one they are talking about before geenrating the article
74
+
75
+ Please make sure to include in-line citations
76
+
77
+ for example:
78
+ fact_1 [source_1]
79
+ fact_2 [source_2, source_3]
80
+ Answer:
81
+ """
requirements.txt ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiofiles==23.2.1
2
+ aiohttp==3.8.6
3
+ aiosignal==1.3.1
4
+ altair==5.1.2
5
+ annotated-types==0.6.0
6
+ anyio==3.7.1
7
+ async-timeout==4.0.3
8
+ attrs==23.1.0
9
+ backoff==2.2.1
10
+ certifi==2023.7.22
11
+ charset-normalizer==3.3.2
12
+ click==8.1.7
13
+ cohere==4.34
14
+ colorama==0.4.6
15
+ contourpy==1.2.0
16
+ cycler==0.12.1
17
+ dataclasses-json==0.6.2
18
+ exceptiongroup==1.1.3
19
+ fastapi==0.104.1
20
+ fastavro==1.8.2
21
+ ffmpy==0.3.1
22
+ filelock==3.13.1
23
+ fonttools==4.44.3
24
+ frozenlist==1.4.0
25
+ fsspec==2023.10.0
26
+ gradio==4.4.0
27
+ gradio_client==0.7.0
28
+ greenlet==3.0.1
29
+ h11==0.14.0
30
+ httpcore==1.0.2
31
+ httpx==0.25.1
32
+ huggingface-hub==0.19.4
33
+ idna==3.4
34
+ importlib-metadata==6.8.0
35
+ importlib-resources==6.1.1
36
+ Jinja2==3.1.2
37
+ jsonpatch==1.33
38
+ jsonpointer==2.4
39
+ jsonschema==4.20.0
40
+ jsonschema-specifications==2023.11.1
41
+ kiwisolver==1.4.5
42
+ langchain==0.0.336
43
+ langsmith==0.0.64
44
+ markdown-it-py==3.0.0
45
+ MarkupSafe==2.1.3
46
+ marshmallow==3.20.1
47
+ matplotlib==3.8.1
48
+ mdurl==0.1.2
49
+ multidict==6.0.4
50
+ mypy-extensions==1.0.0
51
+ numpy==1.26.2
52
+ orjson==3.9.10
53
+ packaging==23.2
54
+ pandas==2.1.3
55
+ Pillow==10.1.0
56
+ pydantic==2.5.1
57
+ pydantic_core==2.14.3
58
+ pydub==0.25.1
59
+ Pygments==2.16.1
60
+ pyparsing==3.1.1
61
+ python-dateutil==2.8.2
62
+ python-dotenv==1.0.0
63
+ python-multipart==0.0.6
64
+ pytz==2023.3.post1
65
+ PyYAML==6.0.1
66
+ referencing==0.31.0
67
+ requests==2.31.0
68
+ rich==13.7.0
69
+ rpds-py==0.13.0
70
+ semantic-version==2.10.0
71
+ shellingham==1.5.4
72
+ six==1.16.0
73
+ sniffio==1.3.0
74
+ SQLAlchemy==2.0.23
75
+ starlette==0.27.0
76
+ tenacity==8.2.3
77
+ tomlkit==0.12.0
78
+ toolz==0.12.0
79
+ tqdm==4.66.1
80
+ typer==0.9.0
81
+ typing-inspect==0.9.0
82
+ typing_extensions==4.8.0
83
+ tzdata==2023.3
84
+ urllib3==2.1.0
85
+ uvicorn==0.24.0.post1
86
+ websockets==11.0.3
87
+ yarl==1.9.2
88
+ zipp==3.17.0