test2 / app.py
rohand1's picture
Update app.py
49b450f
# imports
import together
import os
os.environ["TOGETHER_API_KEY"] = "6e132bb99c767328701e4870bad6b3234b94ee701dbf7b995cdbec44fb01687a"
# !pip show langchain
# set your API key
together.api_key = os.environ["TOGETHER_API_KEY"]
# list available models and descriptons
models = together.Models.list()
together.Models.start("togethercomputer/llama-2-70b-chat")
import logging
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
class TogetherLLM(LLM):
"""Together large language models."""
model: str = "togethercomputer/llama-2-70b-chat"
"""model endpoint to use"""
together_api_key: str = os.environ["TOGETHER_API_KEY"]
"""Together API key"""
temperature: float = 0.0
"""What sampling temperature to use."""
max_tokens: int = 512
"""The maximum number of tokens to generate in the completion."""
class Config:
extra = Extra.forbid
# @root_validator()
# def validate_environment(cls, values: Dict) -> Dict:
# """Validate that the API key is set."""
# api_key = get_from_dict_or_env(
# values, "together_api_key", "TOGETHER_API_KEY"
# )
# values["together_api_key"] = api_key
# return values
@property
def _llm_type(self) -> str:
"""Return type of LLM."""
return "together"
def _call(
self,
prompt: str,
**kwargs: Any,
) -> str:
"""Call to Together endpoint."""
together.api_key = self.together_api_key
output = together.Complete.create(prompt,
model=self.model,
max_tokens=self.max_tokens,
temperature=self.temperature,
)
text = output['output']['choices'][0]['text']
return text
import os
"""# import"""
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQA
from langchain.document_loaders import DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import HuggingFaceInstructEmbeddings
# from langchain.document_loaders import TextLoader
# from langchain.document_loaders import PyPDFLoader
from InstructorEmbedding import INSTRUCTOR
loader = DirectoryLoader('Data')
documents = loader.load()
len(documents)
#splitting the text into
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1024, chunk_overlap=64)
texts = text_splitter.split_documents(documents)
# HF Instructor Embeddings
from langchain.embeddings import HuggingFaceInstructEmbeddings
instructor_embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-base",
# model_kwargs={"device": "cuda"})
model_kwargs={"device": "cpu"})
"""## create the DB
This will take a bit of time on a T4 GPU
"""
persist_directory = 'db'
## Here is the nmew embeddings being used
embedding = instructor_embeddings
vectordb = Chroma.from_documents(documents=texts,
embedding=embedding,
persist_directory=persist_directory)
"""## Make a retriever"""
retriever = vectordb.as_retriever(search_kwargs={"k": 5})
"""## Make a chain"""
llm = TogetherLLM(
model= "togethercomputer/llama-2-70b-chat",
temperature = 0.0,
max_tokens = 1024
)
DEFAULT_SYSTEM_PROMPT = """
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
""".strip()
def generate_prompt(prompt: str, system_prompt: str = DEFAULT_SYSTEM_PROMPT) -> str:
return f"""
[INST] <>
{system_prompt}
<>
{prompt} [/INST]
""".strip()
# SYSTEM_PROMPT = "Answer from following context, if question is out of context respond you don't know and do not explain the same"
SYSTEM_PROMPT = "Answer from following context, if question is out of context respond i don't know"
template = generate_prompt(
"""
{context}
Question: {question}
""",
system_prompt=SYSTEM_PROMPT,
)
print(template)
from langchain import HuggingFacePipeline, PromptTemplate
prompt = PromptTemplate(template=template, input_variables=["context", "question"])
print(prompt)
# create the chain to answer questions
qa_chain = RetrievalQA.from_chain_type(llm=llm,
chain_type="stuff",
retriever=retriever,
return_source_documents=True,
chain_type_kwargs={"prompt": prompt})
import gradio
def greet(query):
llm_response = qa_chain(query)
return llm_response['result']
gradio.Interface(greet, "text", "text").launch()