Update TextGen/router.py
Browse files- TextGen/router.py +11 -9
TextGen/router.py
CHANGED
@@ -1,12 +1,14 @@
|
|
1 |
from pydantic import BaseModel
|
2 |
|
3 |
-
from .ConfigEnv import config
|
4 |
from fastapi.middleware.cors import CORSMiddleware
|
5 |
|
6 |
-
from langchain.llms import Clarifai
|
7 |
from langchain.chains import LLMChain
|
8 |
from langchain.prompts import PromptTemplate
|
9 |
-
|
|
|
|
|
|
|
|
|
10 |
from TextGen import app
|
11 |
|
12 |
class Generate(BaseModel):
|
@@ -18,12 +20,12 @@ def generate_text(prompt: str):
|
|
18 |
else:
|
19 |
prompt = PromptTemplate(template=prompt, input_variables=['Prompt'])
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
)
|
28 |
|
29 |
llmchain = LLMChain(
|
|
|
1 |
from pydantic import BaseModel
|
2 |
|
|
|
3 |
from fastapi.middleware.cors import CORSMiddleware
|
4 |
|
|
|
5 |
from langchain.chains import LLMChain
|
6 |
from langchain.prompts import PromptTemplate
|
7 |
+
from langchain_google_genai import (
|
8 |
+
ChatGoogleGenerativeAI,
|
9 |
+
HarmBlockThreshold,
|
10 |
+
HarmCategory,
|
11 |
+
)
|
12 |
from TextGen import app
|
13 |
|
14 |
class Generate(BaseModel):
|
|
|
20 |
else:
|
21 |
prompt = PromptTemplate(template=prompt, input_variables=['Prompt'])
|
22 |
|
23 |
+
# Initialize the LLM
|
24 |
+
llm = ChatGoogleGenerativeAI(
|
25 |
+
model="gemini-pro",
|
26 |
+
safety_settings={
|
27 |
+
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
|
28 |
+
},
|
29 |
)
|
30 |
|
31 |
llmchain = LLMChain(
|