Spaces:
Sleeping
Sleeping
anmolsahai
commited on
Commit
•
e675e34
1
Parent(s):
3f682ab
update
Browse files- .gitignore +1 -0
- app.py +1 -1
- langchain_pipeline.py +20 -23
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
langchain_pipeline copy.py
|
app.py
CHANGED
@@ -46,7 +46,7 @@ def redline_changes(original_path, revised_path, output_path):
|
|
46 |
st.title("Canarie AI Prototype")
|
47 |
st.subheader("Finding the canarie in the coal mine")
|
48 |
|
49 |
-
model_name = st.selectbox("Model",
|
50 |
|
51 |
balance_type = st.selectbox("Do you charge on available balance or ledger balance?", ["available balance", "ledger balance"])
|
52 |
|
|
|
46 |
st.title("Canarie AI Prototype")
|
47 |
st.subheader("Finding the canarie in the coal mine")
|
48 |
|
49 |
+
model_name = st.selectbox("Model", ["gemini-1.5-pro-001", "other-model-name"])
|
50 |
|
51 |
balance_type = st.selectbox("Do you charge on available balance or ledger balance?", ["available balance", "ledger balance"])
|
52 |
|
langchain_pipeline.py
CHANGED
@@ -11,19 +11,17 @@ from vertexai.generative_models import GenerativeModel, Part, FinishReason
|
|
11 |
import vertexai.preview.generative_models as generative_models
|
12 |
|
13 |
def generate():
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
stream=True,
|
23 |
-
)
|
24 |
|
25 |
-
|
26 |
-
|
27 |
|
28 |
document1 = Part.from_data(
|
29 |
mime_type="application/pdf",
|
@@ -59,17 +57,15 @@ Please output in the following format:
|
|
59 |
{entire updated disclosure text with changes bolded}
|
60 |
------
|
61 |
{reasons for each change listed and cases cited}"""
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
min_transaction_overdraft=min_transaction_overdraft,
|
72 |
-
)
|
73 |
|
74 |
generation_config = {
|
75 |
"max_output_tokens": 8192,
|
@@ -87,3 +83,4 @@ safety_settings = {
|
|
87 |
generate()
|
88 |
|
89 |
return chat_response.content
|
|
|
|
11 |
import vertexai.preview.generative_models as generative_models
|
12 |
|
13 |
def generate():
|
14 |
+
vertexai.init(project="akroda", location="us-central1")
|
15 |
+
model = GenerativeModel("gemini-1.5-pro-001")
|
16 |
+
responses = model.generate_content(
|
17 |
+
[document1, document2, document3, document4, document5, document6, document7, document8, document9, text1],
|
18 |
+
generation_config=generation_config,
|
19 |
+
safety_settings=safety_settings,
|
20 |
+
stream=True,
|
21 |
+
)
|
|
|
|
|
22 |
|
23 |
+
for response in responses:
|
24 |
+
print(response.text, end="")
|
25 |
|
26 |
document1 = Part.from_data(
|
27 |
mime_type="application/pdf",
|
|
|
57 |
{entire updated disclosure text with changes bolded}
|
58 |
------
|
59 |
{reasons for each change listed and cases cited}"""
|
60 |
+
val = prompt.format(
|
61 |
+
context=legal_cases_context,
|
62 |
+
disclosure=disclosure_text,
|
63 |
+
balance_type=balance_type,
|
64 |
+
apsn_transactions=apsn_transactions,
|
65 |
+
max_fees_per_day=max_fees_per_day,
|
66 |
+
min_overdrawn_fee=min_overdrawn_fee,
|
67 |
+
min_transaction_overdraft=min_transaction_overdraft,
|
68 |
+
)
|
|
|
|
|
69 |
|
70 |
generation_config = {
|
71 |
"max_output_tokens": 8192,
|
|
|
83 |
generate()
|
84 |
|
85 |
return chat_response.content
|
86 |
+
|