renamed flows to aiflows
Browse files- README.md +4 -4
- VisionAtomicFlow.py +5 -5
- VisionAtomicFlow.yaml +4 -4
- __init__.py +1 -1
- demo.yaml +1 -1
- run.py +6 -6
README.md
CHANGED
@@ -53,13 +53,13 @@ whose default value is overwritten:
|
|
53 |
- `frequency_penalty` (float): The higher this value, the more likely the model will repeat itself. Default: 0.0
|
54 |
- `presence_penalty` (float): The higher this value, the less likely the model will talk about a new topic. Default: 0.0
|
55 |
- `system_message_prompt_template` (Dict[str,Any]): The template of the system message. It is used to generate the system message.
|
56 |
-
By default its of type
|
57 |
None of the parameters of the prompt are defined by default and therefore need to be defined if one wants to use the system prompt.
|
58 |
-
Default parameters are defined in
|
59 |
- `init_human_message_prompt_template` (Dict[str,Any]): The prompt template of the human/user message used to initialize the conversation
|
60 |
(first time in). It is used to generate the human message. It's passed as the user message to the LLM.
|
61 |
-
By default its of type
|
62 |
-
wants to use the init_human_message_prompt_template. Default parameters are defined in
|
63 |
- `previous_messages` (Dict[str,Any]): Defines which previous messages to include in the input of the LLM. Note that if `first_k`and `last_k` are both none,
|
64 |
all the messages of the flows's history are added to the input of the LLM. Default:
|
65 |
- `first_k` (int): If defined, adds the first_k earliest messages of the flow's chat history to the input of the LLM. Default: None
|
|
|
53 |
- `frequency_penalty` (float): The higher this value, the more likely the model will repeat itself. Default: 0.0
|
54 |
- `presence_penalty` (float): The higher this value, the less likely the model will talk about a new topic. Default: 0.0
|
55 |
- `system_message_prompt_template` (Dict[str,Any]): The template of the system message. It is used to generate the system message.
|
56 |
+
By default its of type aiflows.prompt_template.JinjaPrompt.
|
57 |
None of the parameters of the prompt are defined by default and therefore need to be defined if one wants to use the system prompt.
|
58 |
+
Default parameters are defined in aiflows.prompt_template.jinja2_prompts.JinjaPrompt.
|
59 |
- `init_human_message_prompt_template` (Dict[str,Any]): The prompt template of the human/user message used to initialize the conversation
|
60 |
(first time in). It is used to generate the human message. It's passed as the user message to the LLM.
|
61 |
+
By default its of type aiflows.prompt_template.JinjaPrompt. None of the parameters of the prompt are defined by default and therefore need to be defined if one
|
62 |
+
wants to use the init_human_message_prompt_template. Default parameters are defined in aiflows.prompt_template.jinja2_prompts.JinjaPrompt.
|
63 |
- `previous_messages` (Dict[str,Any]): Defines which previous messages to include in the input of the LLM. Note that if `first_k`and `last_k` are both none,
|
64 |
all the messages of the flows's history are added to the input of the LLM. Default:
|
65 |
- `first_k` (int): If defined, adds the first_k earliest messages of the flow's chat history to the input of the LLM. Default: None
|
VisionAtomicFlow.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
|
2 |
from typing import Dict, Any
|
3 |
from flow_modules.aiflows.ChatFlowModule import ChatAtomicFlow
|
4 |
-
from
|
5 |
import cv2
|
6 |
|
7 |
|
@@ -36,13 +36,13 @@ class VisionAtomicFlow(ChatAtomicFlow):
|
|
36 |
- `frequency_penalty` (float): The higher this value, the more likely the model will repeat itself. Default: 0.0
|
37 |
- `presence_penalty` (float): The higher this value, the less likely the model will talk about a new topic. Default: 0.0
|
38 |
- `system_message_prompt_template` (Dict[str,Any]): The template of the system message. It is used to generate the system message.
|
39 |
-
By default its of type
|
40 |
None of the parameters of the prompt are defined by default and therefore need to be defined if one wants to use the system prompt.
|
41 |
-
Default parameters are defined in
|
42 |
- `init_human_message_prompt_template` (Dict[str,Any]): The prompt template of the human/user message used to initialize the conversation
|
43 |
(first time in). It is used to generate the human message. It's passed as the user message to the LLM.
|
44 |
-
By default its of type
|
45 |
-
wants to use the init_human_message_prompt_template. Default parameters are defined in
|
46 |
- `previous_messages` (Dict[str,Any]): Defines which previous messages to include in the input of the LLM. Note that if `first_k`and `last_k` are both none,
|
47 |
all the messages of the flows's history are added to the input of the LLM. Default:
|
48 |
- `first_k` (int): If defined, adds the first_k earliest messages of the flow's chat history to the input of the LLM. Default: None
|
|
|
1 |
|
2 |
from typing import Dict, Any
|
3 |
from flow_modules.aiflows.ChatFlowModule import ChatAtomicFlow
|
4 |
+
from aiflows.utils.general_helpers import encode_image,encode_from_buffer
|
5 |
import cv2
|
6 |
|
7 |
|
|
|
36 |
- `frequency_penalty` (float): The higher this value, the more likely the model will repeat itself. Default: 0.0
|
37 |
- `presence_penalty` (float): The higher this value, the less likely the model will talk about a new topic. Default: 0.0
|
38 |
- `system_message_prompt_template` (Dict[str,Any]): The template of the system message. It is used to generate the system message.
|
39 |
+
By default its of type aiflows.prompt_template.JinjaPrompt.
|
40 |
None of the parameters of the prompt are defined by default and therefore need to be defined if one wants to use the system prompt.
|
41 |
+
Default parameters are defined in aiflows.prompt_template.jinja2_prompts.JinjaPrompt.
|
42 |
- `init_human_message_prompt_template` (Dict[str,Any]): The prompt template of the human/user message used to initialize the conversation
|
43 |
(first time in). It is used to generate the human message. It's passed as the user message to the LLM.
|
44 |
+
By default its of type aiflows.prompt_template.JinjaPrompt. None of the parameters of the prompt are defined by default and therefore need to be defined if one
|
45 |
+
wants to use the init_human_message_prompt_template. Default parameters are defined in aiflows.prompt_template.jinja2_prompts.JinjaPrompt.
|
46 |
- `previous_messages` (Dict[str,Any]): Defines which previous messages to include in the input of the LLM. Note that if `first_k`and `last_k` are both none,
|
47 |
all the messages of the flows's history are added to the input of the LLM. Default:
|
48 |
- `first_k` (int): If defined, adds the first_k earliest messages of the flow's chat history to the input of the LLM. Default: None
|
VisionAtomicFlow.yaml
CHANGED
@@ -11,7 +11,7 @@ user_name: user
|
|
11 |
assistant_name: assistant
|
12 |
|
13 |
backend:
|
14 |
-
_target_:
|
15 |
api_infos: ???
|
16 |
model_name: "gpt-4-vision-preview"
|
17 |
n: 1
|
@@ -22,13 +22,13 @@ backend:
|
|
22 |
presence_penalty: 0
|
23 |
|
24 |
system_message_prompt_template:
|
25 |
-
_target_:
|
26 |
|
27 |
init_human_message_prompt_template:
|
28 |
-
_target_:
|
29 |
|
30 |
human_message_prompt_template:
|
31 |
-
_target_:
|
32 |
template: "{{query}}"
|
33 |
input_variables:
|
34 |
- "query"
|
|
|
11 |
assistant_name: assistant
|
12 |
|
13 |
backend:
|
14 |
+
_target_: aiflows.backends.llm_lite.LiteLLMBackend
|
15 |
api_infos: ???
|
16 |
model_name: "gpt-4-vision-preview"
|
17 |
n: 1
|
|
|
22 |
presence_penalty: 0
|
23 |
|
24 |
system_message_prompt_template:
|
25 |
+
_target_: aiflows.prompt_template.JinjaPrompt
|
26 |
|
27 |
init_human_message_prompt_template:
|
28 |
+
_target_: aiflows.prompt_template.JinjaPrompt
|
29 |
|
30 |
human_message_prompt_template:
|
31 |
+
_target_: aiflows.prompt_template.JinjaPrompt
|
32 |
template: "{{query}}"
|
33 |
input_variables:
|
34 |
- "query"
|
__init__.py
CHANGED
@@ -2,6 +2,6 @@
|
|
2 |
dependencies = [
|
3 |
{"url": "aiflows/ChatFlowModule", "revision": "main"}
|
4 |
]
|
5 |
-
from
|
6 |
flow_verse.sync_dependencies(dependencies)
|
7 |
from .VisionAtomicFlow import VisionAtomicFlow
|
|
|
2 |
dependencies = [
|
3 |
{"url": "aiflows/ChatFlowModule", "revision": "main"}
|
4 |
]
|
5 |
+
from aiflows import flow_verse
|
6 |
flow_verse.sync_dependencies(dependencies)
|
7 |
from .VisionAtomicFlow import VisionAtomicFlow
|
demo.yaml
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
flow:
|
2 |
-
_target_: aiflows.VisionFlowModule.VisionAtomicFlow.instantiate_from_default_config
|
3 |
name: "Demo Vision Flow"
|
4 |
description: "A flow that, given a textual input, and a set of images and/or videos, generates a textual output."
|
5 |
backend:
|
|
|
1 |
flow:
|
2 |
+
_target_: flow_modules.aiflows.VisionFlowModule.VisionAtomicFlow.instantiate_from_default_config
|
3 |
name: "Demo Vision Flow"
|
4 |
description: "A flow that, given a textual input, and a set of images and/or videos, generates a textual output."
|
5 |
backend:
|
run.py
CHANGED
@@ -2,19 +2,19 @@ import os
|
|
2 |
|
3 |
import hydra
|
4 |
|
5 |
-
from
|
6 |
-
from
|
7 |
-
from
|
8 |
|
9 |
-
from
|
10 |
-
from
|
11 |
|
12 |
CACHING_PARAMETERS.do_caching = False # Set to True in order to disable caching
|
13 |
# clear_cache() # Uncomment this line to clear the cache
|
14 |
|
15 |
logging.set_verbosity_debug() # Uncomment this line to see verbose logs
|
16 |
|
17 |
-
from
|
18 |
|
19 |
dependencies = [
|
20 |
{"url": "aiflows/VisionFlowModule", "revision": os.getcwd()},
|
|
|
2 |
|
3 |
import hydra
|
4 |
|
5 |
+
from aiflows.flow_launchers import FlowLauncher
|
6 |
+
from aiflows.backends.api_info import ApiInfo
|
7 |
+
from aiflows.utils.general_helpers import read_yaml_file
|
8 |
|
9 |
+
from aiflows import logging
|
10 |
+
from aiflows.flow_cache import CACHING_PARAMETERS, clear_cache
|
11 |
|
12 |
CACHING_PARAMETERS.do_caching = False # Set to True in order to disable caching
|
13 |
# clear_cache() # Uncomment this line to clear the cache
|
14 |
|
15 |
logging.set_verbosity_debug() # Uncomment this line to see verbose logs
|
16 |
|
17 |
+
from aiflows import flow_verse
|
18 |
|
19 |
dependencies = [
|
20 |
{"url": "aiflows/VisionFlowModule", "revision": os.getcwd()},
|