File size: 2,118 Bytes
d49c183
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import google.generativeai as genai
from config import gemini_api
import tiktoken

def get_answer(query, company_name, chunked_raw_content):

    genai.configure(api_key=gemini_api)

    # Create the model
    # See https://ai.google.dev/api/python/google/generativeai/GenerativeModel
    generation_config = {
    "temperature": 1,
    "top_p": 0.95,
    "top_k": 64,
    "max_output_tokens": 8192,
    "response_mime_type": "text/plain",
    }
    safety_settings = [
    {
        "category": "HARM_CATEGORY_HARASSMENT",
        "threshold": "BLOCK_MEDIUM_AND_ABOVE",
    },
    {
        "category": "HARM_CATEGORY_HATE_SPEECH",
        "threshold": "BLOCK_MEDIUM_AND_ABOVE",
    },
    {
        "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
        "threshold": "BLOCK_MEDIUM_AND_ABOVE",
    },
    {
        "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
        "threshold": "BLOCK_MEDIUM_AND_ABOVE",
    },
    ]
    chunks = []
    for chunk in chunked_raw_content:
        chunk = chunk.replace("PDF_FILE_____data_dumpster_", "")
        chunk = chunk.replace("data_dumpster_", "")
        chunks.append(chunk)
    context = str(chunks)
        
    enc = tiktoken.get_encoding("cl100k_base")

    
    toks = enc.encode(context)
    if len(toks) >= 900000:
        chunk_size = int(len(context)  // (len(toks) / 900000))
        chunk_size
        context = context[:chunk_size]


    model = genai.GenerativeModel(
    model_name="gemini-1.5-flash-latest",
    safety_settings=safety_settings,
    generation_config=generation_config,
    system_instruction=f"You are an expert at a Private Equity fund. You are helping a colleague with his due diligence on {company_name}. All the questions you will receive are in the context of this due diligence. You always precise the sources from the context (given below) you use.\nYou answer any question based on the following context elements:\n{context}",
    )
    chat_session = model.start_chat(
    history=[],
    )


    response = chat_session.send_message(f"{query} - (Bain style answer + sources properly renamed if needed)")
    return response.text