from naptha_sdk.utils import get_logger | |
logger = get_logger(__name__) | |
def run(inputs, worker_nodes = None, orchestrator_node = None, flow_run = None, cfg: dict = None): | |
from litellm import completion | |
prompt = inputs | |
messages = [ | |
{"role": "system", "content": "You are a helpful AI assistant."}, | |
{"role": "user", "content": prompt}, | |
] | |
response = completion( | |
model="ollama/llama3.1:70b", | |
messages=messages, | |
temperature=0, | |
max_tokens=1000, | |
api_base="http://localhost:11434", | |
) | |
response = response.choices[0].message["content"] | |
logger.info(f"Response: {response}") | |
return response |