File size: 2,336 Bytes
798fa73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
flow:  # Overrides the OpenAIChatAtomicFlow config
  _target_: aiflows.ChatWithDemonstrationsFlowModule.ChatWithDemonstrationsFlow.instantiate_from_default_config
  name: "SimpleQA_Flow_with_Demonstrations"
  description: "A sequential flow that answers questions with demonstrations"
  
  input_interface:  # Connector between the "input data" and the Flow
    - "questions"
  output_interface:  # Connector between the Flow's output and the caller
    - "answer"
  
  subflows_config:
    demonstration_flow:
      data: 
        - query_data:
            query: "What is the capital of Turkey?"
          response_data:
            response: "Istambul, my sir."
        - query_data:
            query: "what is the capital of Germany?"
          response_data:
            response: "Berlin, my sir."
      params:
        data_dir: null
        demonstrations_id: my_sir_demo
      query_prompt_template:
        template: |2-
          Answer the following question: {{query}}
        input_variables:
          - "query"
      response_prompt_template:
        template: |2-
          {{response}}
        input_variables:
          - response

    chat_flow:
      name: "SimpleQA_Flow"
      # ~~~ Input interface specification ~~~
      input_interface_non_initialized:
        - "question"

      # ~~~ backend model parameters ~~
      backend:
        _target_: flows.backends.llm_lite.LiteLLMBackend
        api_infos: ???
        model_name: 
          openai: "gpt-3.5-turbo"
          azure: "azure/gpt-4"
    
        # ~~~ generation_parameters ~~
        n: 1
        max_tokens: 3000
        temperature: 0.3

        top_p: 0.2
        frequency_penalty: 0
        presence_penalty: 0

      n_api_retries: 6
      wait_time_between_retries: 20

      # ~~~ Prompt specification ~~~
      system_message_prompt_template:
        _target_: flows.prompt_template.JinjaPrompt
        template: |2-
          You are a helpful chatbot that truthfully answers questions. Answer in a similar way to your previous replies.
        input_variables: []
        partial_variables: {}
 

      init_human_message_prompt_template:
        _target_: flows.prompt_template.JinjaPrompt
        template: |2-
          Answer the following question: {{question}}
        input_variables: ["question"]
        partial_variables: {}