Rams901's picture
Update utils.py
0f4e2ab
raw
history blame contribute delete
No virus
2.24 kB
from langchain.llms.base import LLM
from typing import Optional, List, Mapping, Any
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
from urllib.parse import urlparse
import os
class ClaudeLLM(LLM):
@property
def _llm_type(self) -> str:
return "custom"
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
client = Anthropic(
# defaults to os.environ.get("ANTHROPIC_API_KEY")
api_key= os.environ.get("ANTHROPIC_API_KEY"),
)
# How about the formatted prompt?
prompt_formatted = (
f"{HUMAN_PROMPT}{prompt}\n{AI_PROMPT}"
)
response = client.completions.create(
model="claude-instant-v1-100k",
prompt=prompt_formatted,
stop_sequences=[HUMAN_PROMPT],
max_tokens_to_sample=100000,
temperature=0,
)
return response.completion
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
}
class ClaudeLLM2(LLM):
@property
def _llm_type(self) -> str:
return "custom"
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
client = Anthropic(
# defaults to os.environ.get("ANTHROPIC_API_KEY")
api_key= os.environ.get("ANTHROPIC_API_KEY"),
)
# How about the formatted prompt?
prompt_formatted = (
f"{HUMAN_PROMPT}{prompt}\n{AI_PROMPT}"
)
response = client.completions.create(
model="claude-2",
prompt=prompt_formatted,
stop_sequences=[HUMAN_PROMPT],
max_tokens_to_sample=100000,
temperature=0,
)
return response.completion
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
}
def remove_numbers(question):
return question.translate(str.maketrans('', '', '0123456789'))
def extract_website_name(url):
parsed_url = urlparse(url)
if parsed_url.netloc.startswith("www."):
return parsed_url.netloc.split("www.")[1].split(".")[0]
return parsed_url.netloc.split(".")[0]