anmolsahai commited on
Commit
e675e34
1 Parent(s): 3f682ab
Files changed (3) hide show
  1. .gitignore +1 -0
  2. app.py +1 -1
  3. langchain_pipeline.py +20 -23
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ langchain_pipeline copy.py
app.py CHANGED
@@ -46,7 +46,7 @@ def redline_changes(original_path, revised_path, output_path):
46
  st.title("Canarie AI Prototype")
47
  st.subheader("Finding the canarie in the coal mine")
48
 
49
- model_name = st.selectbox("Model", model_names())
50
 
51
  balance_type = st.selectbox("Do you charge on available balance or ledger balance?", ["available balance", "ledger balance"])
52
 
 
46
  st.title("Canarie AI Prototype")
47
  st.subheader("Finding the canarie in the coal mine")
48
 
49
+ model_name = st.selectbox("Model", ["gemini-1.5-pro-001", "other-model-name"])
50
 
51
  balance_type = st.selectbox("Do you charge on available balance or ledger balance?", ["available balance", "ledger balance"])
52
 
langchain_pipeline.py CHANGED
@@ -11,19 +11,17 @@ from vertexai.generative_models import GenerativeModel, Part, FinishReason
11
  import vertexai.preview.generative_models as generative_models
12
 
13
  def generate():
14
- vertexai.init(project="akroda", location="us-central1")
15
- model = GenerativeModel(
16
- "gemini-1.5-pro-001",
17
- )
18
- responses = model.generate_content(
19
- [document1, document2, document3, document4, document5, document6, document7, document8, document9, text1],
20
- generation_config=generation_config,
21
- safety_settings=safety_settings,
22
- stream=True,
23
- )
24
 
25
- for response in responses:
26
- print(response.text, end="")
27
 
28
  document1 = Part.from_data(
29
  mime_type="application/pdf",
@@ -59,17 +57,15 @@ Please output in the following format:
59
  {entire updated disclosure text with changes bolded}
60
  ------
61
  {reasons for each change listed and cases cited}"""
62
-
63
- )
64
- val = prompt.format(
65
- context=legal_cases_context,
66
- disclosure=disclosure_text,
67
- balance_type=balance_type,
68
- apsn_transactions=apsn_transactions,
69
- max_fees_per_day=max_fees_per_day,
70
- min_overdrawn_fee=min_overdrawn_fee,
71
- min_transaction_overdraft=min_transaction_overdraft,
72
- )
73
 
74
  generation_config = {
75
  "max_output_tokens": 8192,
@@ -87,3 +83,4 @@ safety_settings = {
87
  generate()
88
 
89
  return chat_response.content
 
 
11
  import vertexai.preview.generative_models as generative_models
12
 
13
  def generate():
14
+ vertexai.init(project="akroda", location="us-central1")
15
+ model = GenerativeModel("gemini-1.5-pro-001")
16
+ responses = model.generate_content(
17
+ [document1, document2, document3, document4, document5, document6, document7, document8, document9, text1],
18
+ generation_config=generation_config,
19
+ safety_settings=safety_settings,
20
+ stream=True,
21
+ )
 
 
22
 
23
+ for response in responses:
24
+ print(response.text, end="")
25
 
26
  document1 = Part.from_data(
27
  mime_type="application/pdf",
 
57
  {entire updated disclosure text with changes bolded}
58
  ------
59
  {reasons for each change listed and cases cited}"""
60
+ val = prompt.format(
61
+ context=legal_cases_context,
62
+ disclosure=disclosure_text,
63
+ balance_type=balance_type,
64
+ apsn_transactions=apsn_transactions,
65
+ max_fees_per_day=max_fees_per_day,
66
+ min_overdrawn_fee=min_overdrawn_fee,
67
+ min_transaction_overdraft=min_transaction_overdraft,
68
+ )
 
 
69
 
70
  generation_config = {
71
  "max_output_tokens": 8192,
 
83
  generate()
84
 
85
  return chat_response.content
86
+