adamelliotfields commited on
Commit
6f2752e
β€’
1 Parent(s): b7490f8

Add OpenAI API

Browse files
0_🏠_Home.py CHANGED
@@ -52,6 +52,7 @@ st.markdown("""
52
  - [Black Forest Labs](https://docs.bfl.ml)
53
  - [fal.ai](https://fal.ai/docs)
54
  - [Hugging Face](https://huggingface.co/docs/api-inference/index)
 
55
  - [Perplexity](https://docs.perplexity.ai/home)
56
  - [together.ai](https://docs.together.ai/docs/introduction)
57
  """)
@@ -59,7 +60,7 @@ st.markdown("""
59
  st.markdown("""
60
  ## Usage
61
 
62
- Choose a task. Select a service. Enter your API key (refresh browser to clear).
63
 
64
  I recommend [duplicating this space](https://huggingface.co/spaces/adamelliotfields/api-inference?duplicate=true) **privately** and persisting your keys as secrets. See [`README.md`](https://huggingface.co/spaces/adamelliotfields/api-inference/blob/main/README.md).
65
  """)
 
52
  - [Black Forest Labs](https://docs.bfl.ml)
53
  - [fal.ai](https://fal.ai/docs)
54
  - [Hugging Face](https://huggingface.co/docs/api-inference/index)
55
+ - [OpenAI](https://platform.openai.com/docs/api-reference/introduction)
56
  - [Perplexity](https://docs.perplexity.ai/home)
57
  - [together.ai](https://docs.together.ai/docs/introduction)
58
  """)
 
60
  st.markdown("""
61
  ## Usage
62
 
63
+ Select a task. Choose a service. Enter your API key (refresh browser to clear).
64
 
65
  I recommend [duplicating this space](https://huggingface.co/spaces/adamelliotfields/api-inference?duplicate=true) **privately** and persisting your keys as secrets. See [`README.md`](https://huggingface.co/spaces/adamelliotfields/api-inference/blob/main/README.md).
66
  """)
README.md CHANGED
@@ -30,6 +30,7 @@ Setting keys as environment variables persists them so you don't have to enter t
30
  BFL_API_KEY=...
31
  FAL_KEY=...
32
  HF_TOKEN=...
 
33
  PPLX_API_KEY=...
34
  TOGETHER_API_KEY=...
35
  ```
@@ -49,7 +50,7 @@ source .venv/bin/activate
49
  uv pip install -r requirements.txt
50
 
51
  # run
52
- python app.py
53
  ```
54
 
55
  ## Development
 
30
  BFL_API_KEY=...
31
  FAL_KEY=...
32
  HF_TOKEN=...
33
+ OPENAI_API_KEY=...
34
  PPLX_API_KEY=...
35
  TOGETHER_API_KEY=...
36
  ```
 
50
  uv pip install -r requirements.txt
51
 
52
  # run
53
+ python 0_🏠_Home.py
54
  ```
55
 
56
  ## Development
lib/api.py CHANGED
@@ -12,6 +12,7 @@ from .config import config
12
 
13
  def txt2txt_generate(api_key, service, model, parameters, **kwargs):
14
  base_url = config.services[service].url
 
15
  if service == "hf":
16
  base_url = f"{base_url}/{model}/v1"
17
  client = OpenAI(api_key=api_key, base_url=base_url)
 
12
 
13
  def txt2txt_generate(api_key, service, model, parameters, **kwargs):
14
  base_url = config.services[service].url
15
+
16
  if service == "hf":
17
  base_url = f"{base_url}/{model}/v1"
18
  client = OpenAI(api_key=api_key, base_url=base_url)
lib/config.py CHANGED
@@ -53,6 +53,8 @@ class TextModelConfig(ModelConfig):
53
  system_prompt: Optional[str] = None
54
  frequency_penalty: Optional[float] = None
55
  frequency_penalty_range: Optional[tuple[float, float]] = None
 
 
56
  max_tokens: Optional[int] = None
57
  max_tokens_range: Optional[tuple[int, int]] = None
58
  temperature: Optional[float] = None
@@ -106,6 +108,19 @@ _hf_text_kwargs = {
106
  "parameters": ["max_tokens", "temperature", "frequency_penalty", "seed"],
107
  }
108
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
  _pplx_text_kwargs = {
110
  "system_prompt": TEXT_SYSTEM_PROMPT,
111
  "frequency_penalty": 1.0,
@@ -302,18 +317,9 @@ config = AppConfig(
302
  url="https://api-inference.huggingface.co/models",
303
  api_key=os.environ.get("HF_TOKEN"),
304
  text={
305
- "codellama/codellama-34b-instruct-hf": TextModelConfig(
306
- "Code Llama 34B",
307
- **_hf_text_kwargs,
308
- ),
309
- "meta-llama/llama-2-13b-chat-hf": TextModelConfig(
310
- "Meta Llama 2 13B",
311
- **_hf_text_kwargs,
312
- ),
313
- "mistralai/mistral-7b-instruct-v0.2": TextModelConfig(
314
- "Mistral 0.2 7B",
315
- **_hf_text_kwargs,
316
- ),
317
  "nousresearch/nous-hermes-2-mixtral-8x7b-dpo": TextModelConfig(
318
  "Nous Hermes 2 Mixtral 8x7B",
319
  **_hf_text_kwargs,
@@ -365,6 +371,20 @@ config = AppConfig(
365
  ),
366
  },
367
  ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
368
  "pplx": ServiceConfig(
369
  name="Perplexity",
370
  url="https://api.perplexity.ai",
@@ -392,7 +412,7 @@ config = AppConfig(
392
  ),
393
  },
394
  ),
395
- # TODO: text models
396
  "together": ServiceConfig(
397
  name="Together",
398
  url="https://api.together.xyz/v1/images/generations",
 
53
  system_prompt: Optional[str] = None
54
  frequency_penalty: Optional[float] = None
55
  frequency_penalty_range: Optional[tuple[float, float]] = None
56
+ presence_penalty: Optional[float] = None
57
+ presence_penalty_range: Optional[tuple[float, float]] = None
58
  max_tokens: Optional[int] = None
59
  max_tokens_range: Optional[tuple[int, int]] = None
60
  temperature: Optional[float] = None
 
108
  "parameters": ["max_tokens", "temperature", "frequency_penalty", "seed"],
109
  }
110
 
111
+ _openai_text_kwargs = {
112
+ "system_prompt": TEXT_SYSTEM_PROMPT,
113
+ "frequency_penalty": 0.0,
114
+ "frequency_penalty_range": (-2.0, 2.0),
115
+ "presence_penalty": 0.0,
116
+ "presence_penalty_range": (-2.0, 2.0),
117
+ "max_tokens": 512,
118
+ "max_tokens_range": (512, 4096),
119
+ "temperature": 1.0,
120
+ "temperature_range": (0.0, 2.0),
121
+ "parameters": ["max_tokens", "temperature", "frequency_penalty", "presence_penalty", "seed"],
122
+ }
123
+
124
  _pplx_text_kwargs = {
125
  "system_prompt": TEXT_SYSTEM_PROMPT,
126
  "frequency_penalty": 1.0,
 
317
  url="https://api-inference.huggingface.co/models",
318
  api_key=os.environ.get("HF_TOKEN"),
319
  text={
320
+ "codellama/codellama-34b-instruct-hf": TextModelConfig("Code Llama 34B", **_hf_text_kwargs),
321
+ "meta-llama/llama-2-13b-chat-hf": TextModelConfig("Meta Llama 2 13B", **_hf_text_kwargs),
322
+ "mistralai/mistral-7b-instruct-v0.2": TextModelConfig("Mistral 0.2 7B", **_hf_text_kwargs),
 
 
 
 
 
 
 
 
 
323
  "nousresearch/nous-hermes-2-mixtral-8x7b-dpo": TextModelConfig(
324
  "Nous Hermes 2 Mixtral 8x7B",
325
  **_hf_text_kwargs,
 
371
  ),
372
  },
373
  ),
374
+ "openai": ServiceConfig(
375
+ name="OpenAI",
376
+ url="https://api.openai.com/v1",
377
+ api_key=os.environ.get("OPENAI_API_KEY"),
378
+ text={
379
+ "chatgpt-4o-latest": TextModelConfig("ChatGPT-4o", **_openai_text_kwargs),
380
+ "gpt-3.5-turbo": TextModelConfig("GPT-3.5 Turbo", **_openai_text_kwargs),
381
+ "gpt-4-turbo": TextModelConfig("GPT-4 Turbo", **_openai_text_kwargs),
382
+ "gpt-4o": TextModelConfig("GPT-4o", **_openai_text_kwargs),
383
+ "gpt-4o-mini": TextModelConfig("GPT-4o mini", **_openai_text_kwargs),
384
+ "o1-preview": TextModelConfig("o1-preview", **_openai_text_kwargs),
385
+ "o1-mini": TextModelConfig("o1-mini", **_openai_text_kwargs),
386
+ },
387
+ ),
388
  "pplx": ServiceConfig(
389
  name="Perplexity",
390
  url="https://api.perplexity.ai",
 
412
  ),
413
  },
414
  ),
415
+ # TODO: text models, more image models
416
  "together": ServiceConfig(
417
  name="Together",
418
  url="https://api.together.xyz/v1/images/generations",
pages/1_πŸ’¬_Text_Generation.py CHANGED
@@ -4,17 +4,18 @@ import streamlit as st
4
 
5
  from lib import config, txt2txt_generate
6
 
7
- # config
8
  st.set_page_config(
9
  page_title=f"{config.title} | Text Generation",
10
  page_icon=config.logo,
11
  layout=config.layout,
12
  )
13
 
14
- # initialize state
15
  if "api_key_hf" not in st.session_state:
16
  st.session_state.api_key_hf = ""
17
 
 
 
 
18
  if "api_key_pplx" not in st.session_state:
19
  st.session_state.api_key_pplx = ""
20
 
@@ -27,7 +28,6 @@ if "txt2txt_messages" not in st.session_state:
27
  if "txt2txt_seed" not in st.session_state:
28
  st.session_state.txt2txt_seed = 0
29
 
30
- # sidebar
31
  st.logo(config.logo)
32
  st.sidebar.header("Settings")
33
 
@@ -74,7 +74,12 @@ system = st.sidebar.text_area(
74
  disabled=st.session_state.running,
75
  )
76
 
77
- # build parameters from preset
 
 
 
 
 
78
  parameters = {}
79
  for param in model_config.parameters:
80
  if param == "max_tokens":
@@ -107,6 +112,16 @@ for param in model_config.parameters:
107
  disabled=st.session_state.running,
108
  help="Penalize new tokens based on their existing frequency in the text (default: 0.0)",
109
  )
 
 
 
 
 
 
 
 
 
 
110
  if param == "seed":
111
  parameters[param] = st.sidebar.number_input(
112
  "Seed",
@@ -117,18 +132,12 @@ for param in model_config.parameters:
117
  help="Make a best effort to sample deterministically (default: -1)",
118
  )
119
 
120
- # heading
121
- st.html("""
122
- <h1>Text Generation</h1>
123
- <p>Chat with large language models.</p>
124
- """)
125
-
126
- # chat messages
127
  for message in st.session_state.txt2txt_messages:
128
  with st.chat_message(message["role"]):
129
  st.markdown(message["content"])
130
 
131
- # button row
132
  if st.session_state.txt2txt_messages:
133
  button_container = st.empty()
134
  with button_container.container():
@@ -156,7 +165,7 @@ if st.session_state.txt2txt_messages:
156
  else:
157
  button_container = None
158
 
159
- # chat input
160
  if prompt := st.chat_input(
161
  "What would you like to know?",
162
  on_submit=lambda: setattr(st.session_state, "running", True),
 
4
 
5
  from lib import config, txt2txt_generate
6
 
 
7
  st.set_page_config(
8
  page_title=f"{config.title} | Text Generation",
9
  page_icon=config.logo,
10
  layout=config.layout,
11
  )
12
 
 
13
  if "api_key_hf" not in st.session_state:
14
  st.session_state.api_key_hf = ""
15
 
16
+ if "api_key_openai" not in st.session_state:
17
+ st.session_state.api_key_openai = ""
18
+
19
  if "api_key_pplx" not in st.session_state:
20
  st.session_state.api_key_pplx = ""
21
 
 
28
  if "txt2txt_seed" not in st.session_state:
29
  st.session_state.txt2txt_seed = 0
30
 
 
31
  st.logo(config.logo)
32
  st.sidebar.header("Settings")
33
 
 
74
  disabled=st.session_state.running,
75
  )
76
 
77
+ st.html("""
78
+ <h1>Text Generation</h1>
79
+ <p>Chat with large language models.</p>
80
+ """)
81
+
82
+ # Build parameters from preset by rendering the appropriate input widgets
83
  parameters = {}
84
  for param in model_config.parameters:
85
  if param == "max_tokens":
 
112
  disabled=st.session_state.running,
113
  help="Penalize new tokens based on their existing frequency in the text (default: 0.0)",
114
  )
115
+ if param == "presence_penalty":
116
+ parameters[param] = st.sidebar.slider(
117
+ "Presence Penalty",
118
+ step=0.1,
119
+ value=model_config.presence_penalty,
120
+ min_value=model_config.presence_penalty_range[0],
121
+ max_value=model_config.presence_penalty_range[1],
122
+ disabled=st.session_state.running,
123
+ help="Penalize new tokens based on their presence in the text so far (default: 0.0)",
124
+ )
125
  if param == "seed":
126
  parameters[param] = st.sidebar.number_input(
127
  "Seed",
 
132
  help="Make a best effort to sample deterministically (default: -1)",
133
  )
134
 
135
+ # Chat messages
 
 
 
 
 
 
136
  for message in st.session_state.txt2txt_messages:
137
  with st.chat_message(message["role"]):
138
  st.markdown(message["content"])
139
 
140
+ # Buttons for deleting last message or clearing all messages
141
  if st.session_state.txt2txt_messages:
142
  button_container = st.empty()
143
  with button_container.container():
 
165
  else:
166
  button_container = None
167
 
168
+ # Chat input
169
  if prompt := st.chat_input(
170
  "What would you like to know?",
171
  on_submit=lambda: setattr(st.session_state, "running", True),
pages/2_🎨_Text_to_Image.py CHANGED
@@ -10,7 +10,6 @@ st.set_page_config(
10
  layout=config.layout,
11
  )
12
 
13
- # Initialize Streamlit session state
14
  if "api_key_bfl" not in st.session_state:
15
  st.session_state.api_key_bfl = ""
16
 
@@ -72,7 +71,6 @@ model = st.sidebar.selectbox(
72
 
73
  model_config = service_config.image[model]
74
 
75
- # heading
76
  st.html("""
77
  <h1>Text to Image</h1>
78
  <p>Generate an image from a text prompt.</p>
 
10
  layout=config.layout,
11
  )
12
 
 
13
  if "api_key_bfl" not in st.session_state:
14
  st.session_state.api_key_bfl = ""
15
 
 
71
 
72
  model_config = service_config.image[model]
73
 
 
74
  st.html("""
75
  <h1>Text to Image</h1>
76
  <p>Generate an image from a text prompt.</p>