Spaces:
Sleeping
Sleeping
soutrik
commited on
Commit
·
853a5c8
1
Parent(s):
37c6b85
added: testing app and also the workflow file
Browse files- .github/workflows/main.yml +31 -0
- .gitignore +1 -0
- app.py +30 -0
- builder.py +0 -0
- chatbot/chatbot_app.py +69 -0
- concurrent_client.py +57 -0
- image_classifier/batch_classifier.py +83 -0
- image_classifier/single_classifier.py +59 -0
- poetry.lock +35 -4
- pyproject.toml +3 -0
- requirements.txt +3 -0
.github/workflows/main.yml
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Sync to Hugging Face Hub
|
2 |
+
|
3 |
+
on:
|
4 |
+
push:
|
5 |
+
branches: [main]
|
6 |
+
workflow_dispatch:
|
7 |
+
|
8 |
+
jobs:
|
9 |
+
sync-to-hub:
|
10 |
+
runs-on: ubuntu-latest
|
11 |
+
steps:
|
12 |
+
- uses: actions/checkout@v3
|
13 |
+
with:
|
14 |
+
fetch-depth: 0
|
15 |
+
lfs: true
|
16 |
+
|
17 |
+
- name: Add remote
|
18 |
+
run: |
|
19 |
+
git remote add space https://$USER:$HF_TOKEN@huggingface.co/spaces/$USER/$SPACE
|
20 |
+
env:
|
21 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
22 |
+
USER: soutrik
|
23 |
+
SPACE: gradio_demo
|
24 |
+
|
25 |
+
- name: Push to hub
|
26 |
+
run: |
|
27 |
+
git push --force https://$USER:$HF_TOKEN@huggingface.co/spaces/$USER/$SPACE main
|
28 |
+
env:
|
29 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
30 |
+
USER: soutrik
|
31 |
+
SPACE: gradio_demo
|
.gitignore
CHANGED
@@ -30,3 +30,4 @@ artifacts/*
|
|
30 |
*jpg
|
31 |
*jpeg
|
32 |
artifacts/image_prediction.png
|
|
|
|
30 |
*jpg
|
31 |
*jpeg
|
32 |
artifacts/image_prediction.png
|
33 |
+
*.csv
|
app.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import pipeline
|
2 |
+
import gradio as gr
|
3 |
+
|
4 |
+
# Load the summarization model once
|
5 |
+
model = pipeline("summarization")
|
6 |
+
|
7 |
+
|
8 |
+
# Prediction function
|
9 |
+
def predict(prompt):
|
10 |
+
try:
|
11 |
+
# Generate summary and return
|
12 |
+
summary = model(prompt, max_length=150, min_length=30, do_sample=False)[0][
|
13 |
+
"summary_text"
|
14 |
+
]
|
15 |
+
return summary
|
16 |
+
except Exception as e:
|
17 |
+
return f"Error: {str(e)}"
|
18 |
+
|
19 |
+
|
20 |
+
# Gradio interface
|
21 |
+
with gr.Interface(
|
22 |
+
fn=predict,
|
23 |
+
inputs=gr.Textbox(
|
24 |
+
label="Enter text to summarize", placeholder="Type your content here..."
|
25 |
+
),
|
26 |
+
outputs=gr.Textbox(label="Summary"),
|
27 |
+
title="Text Summarizer",
|
28 |
+
description="Enter text and get a concise summary powered by Hugging Face transformers.",
|
29 |
+
) as interface:
|
30 |
+
interface.launch()
|
builder.py
ADDED
File without changes
|
chatbot/chatbot_app.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
3 |
+
import torch
|
4 |
+
from threading import Thread
|
5 |
+
|
6 |
+
# Load model and tokenizer
|
7 |
+
checkpoint = "HuggingFaceTB/SmolLM2-1.7B-Instruct"
|
8 |
+
device = "cuda"
|
9 |
+
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
10 |
+
model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map="auto")
|
11 |
+
torch.cuda.empty_cache()
|
12 |
+
|
13 |
+
|
14 |
+
def chat_response(message, history):
|
15 |
+
print(f"Received message: {message}")
|
16 |
+
print(f"History: {history}")
|
17 |
+
|
18 |
+
messages = []
|
19 |
+
for h in history:
|
20 |
+
messages.append(h) # Each h is already a dict with 'role' and 'content'
|
21 |
+
messages.append({"role": "user", "content": message})
|
22 |
+
|
23 |
+
# Generate response
|
24 |
+
input_text = tokenizer.apply_chat_template(messages, tokenize=False)
|
25 |
+
inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
|
26 |
+
|
27 |
+
# Setup streamer
|
28 |
+
streamer = TextIteratorStreamer(
|
29 |
+
tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True
|
30 |
+
)
|
31 |
+
|
32 |
+
# Generate with streaming
|
33 |
+
generation_kwargs = dict(
|
34 |
+
inputs=inputs,
|
35 |
+
max_new_tokens=256,
|
36 |
+
temperature=0.2,
|
37 |
+
top_p=0.9,
|
38 |
+
do_sample=True,
|
39 |
+
pad_token_id=tokenizer.eos_token_id,
|
40 |
+
streamer=streamer,
|
41 |
+
)
|
42 |
+
|
43 |
+
# Create a thread to run the generation
|
44 |
+
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
45 |
+
thread.start()
|
46 |
+
|
47 |
+
# Stream the response
|
48 |
+
partial_message = ""
|
49 |
+
for new_token in streamer:
|
50 |
+
partial_message += new_token
|
51 |
+
yield partial_message
|
52 |
+
|
53 |
+
|
54 |
+
# Create and launch the Gradio interface
|
55 |
+
demo = gr.ChatInterface(
|
56 |
+
fn=chat_response,
|
57 |
+
type="messages",
|
58 |
+
title="SmolLM2 Chatbot",
|
59 |
+
description="A chatbot powered by SmolLM2-1.7B-Instruct model",
|
60 |
+
examples=[
|
61 |
+
"What is the capital of France?",
|
62 |
+
"How does photosynthesis work?",
|
63 |
+
"Write a short poem about autumn.",
|
64 |
+
],
|
65 |
+
cache_examples=True,
|
66 |
+
)
|
67 |
+
|
68 |
+
if __name__ == "__main__":
|
69 |
+
demo.launch()
|
concurrent_client.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from gradio_client import Client, handle_file
|
2 |
+
import concurrent.futures
|
3 |
+
import time
|
4 |
+
from pathlib import Path
|
5 |
+
|
6 |
+
|
7 |
+
def make_prediction(client, image_url):
|
8 |
+
"""Make a single prediction"""
|
9 |
+
try:
|
10 |
+
result = client.predict(
|
11 |
+
# image_list=handle_file(image_url),
|
12 |
+
image_list=handle_file(image_url),
|
13 |
+
api_name="/predict",
|
14 |
+
)
|
15 |
+
return result
|
16 |
+
except Exception as e:
|
17 |
+
return f"Error: {str(e)}"
|
18 |
+
|
19 |
+
|
20 |
+
def main(requests=16):
|
21 |
+
# Single test image URL
|
22 |
+
image_url = "https://img.freepik.com/free-photo/closeup-shot-cute-grey-kitty-isolated-white-background_181624-35013.jpg?ga=GA1.1.302994776.1729496489&semt=ais_hybrid"
|
23 |
+
|
24 |
+
# Initialize client
|
25 |
+
client = Client("http://127.0.0.1:7860/")
|
26 |
+
|
27 |
+
print("\nSending 16 concurrent requests with the same image...")
|
28 |
+
start_time = time.time()
|
29 |
+
|
30 |
+
# Use ThreadPoolExecutor to send 16 requests concurrently
|
31 |
+
with concurrent.futures.ThreadPoolExecutor(max_workers=16) as executor:
|
32 |
+
futures = [
|
33 |
+
executor.submit(make_prediction, client, image_url) for _ in range(requests)
|
34 |
+
]
|
35 |
+
|
36 |
+
# Collect results as they complete
|
37 |
+
results = []
|
38 |
+
for i, future in enumerate(concurrent.futures.as_completed(futures)):
|
39 |
+
try:
|
40 |
+
result = future.result()
|
41 |
+
results.append(result)
|
42 |
+
print(f"Completed prediction {i+1}/{requests}")
|
43 |
+
except Exception as e:
|
44 |
+
print(f"Error in request {i+1}: {str(e)}")
|
45 |
+
|
46 |
+
end_time = time.time()
|
47 |
+
|
48 |
+
# Print results
|
49 |
+
print(f"\nAll predictions completed in {end_time - start_time:.2f} seconds")
|
50 |
+
print("\nResults:")
|
51 |
+
for i, result in enumerate(results):
|
52 |
+
print(f"\nRequest {i+1}:")
|
53 |
+
print(result)
|
54 |
+
|
55 |
+
|
56 |
+
if __name__ == "__main__":
|
57 |
+
main(16)
|
image_classifier/batch_classifier.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
import timm
|
4 |
+
from PIL import Image
|
5 |
+
import time
|
6 |
+
from tqdm import tqdm
|
7 |
+
import numpy as np
|
8 |
+
import requests
|
9 |
+
|
10 |
+
|
11 |
+
class ImageClassifier:
|
12 |
+
def __init__(self):
|
13 |
+
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
14 |
+
self.model = timm.create_model("resnet50.a1_in1k", pretrained=True)
|
15 |
+
self.model = self.model.to(self.device)
|
16 |
+
self.model.eval()
|
17 |
+
|
18 |
+
data_config = timm.data.resolve_model_data_config(self.model)
|
19 |
+
self.transform = timm.data.create_transform(**data_config, is_training=False)
|
20 |
+
url = "https://storage.googleapis.com/bit_models/ilsvrc2012_wordnet_lemmas.txt"
|
21 |
+
self.labels = requests.get(url).text.strip().split("\n")
|
22 |
+
|
23 |
+
@torch.no_grad()
|
24 |
+
def predict_batch(self, image_list, progress=gr.Progress(track_tqdm=True)):
|
25 |
+
if isinstance(image_list, tuple) and len(image_list) == 1:
|
26 |
+
image_list = [image_list[0]]
|
27 |
+
|
28 |
+
if not image_list or image_list[0] is None:
|
29 |
+
return [[{"none": 1.0}]]
|
30 |
+
|
31 |
+
progress(0.1, desc="Starting preprocessing...")
|
32 |
+
tensors = []
|
33 |
+
|
34 |
+
# Process each image in the batch
|
35 |
+
for image in image_list:
|
36 |
+
if image is None:
|
37 |
+
continue
|
38 |
+
# Convert numpy array to PIL Image
|
39 |
+
img = Image.fromarray(image).convert("RGB")
|
40 |
+
tensor = self.transform(img)
|
41 |
+
tensors.append(tensor)
|
42 |
+
|
43 |
+
if not tensors:
|
44 |
+
return [[{"none": 1.0}]]
|
45 |
+
|
46 |
+
progress(0.4, desc="Batching tensors...")
|
47 |
+
batch = torch.stack(tensors).to(self.device)
|
48 |
+
|
49 |
+
progress(0.6, desc="Running inference...")
|
50 |
+
outputs = self.model(batch)
|
51 |
+
probabilities = torch.nn.functional.softmax(outputs, dim=1)
|
52 |
+
|
53 |
+
progress(0.8, desc="Processing results...")
|
54 |
+
batch_results = []
|
55 |
+
for probs in probabilities:
|
56 |
+
top5_prob, top5_catid = torch.topk(probs, 5)
|
57 |
+
result = {
|
58 |
+
self.labels[idx.item()]: float(prob)
|
59 |
+
for prob, idx in zip(top5_prob, top5_catid)
|
60 |
+
}
|
61 |
+
batch_results.append(result)
|
62 |
+
|
63 |
+
progress(1.0, desc="Done!")
|
64 |
+
# Return results in the required format: list of list of dicts
|
65 |
+
return [batch_results]
|
66 |
+
|
67 |
+
|
68 |
+
# Create classifier instance
|
69 |
+
classifier = ImageClassifier()
|
70 |
+
|
71 |
+
# Create Gradio interface
|
72 |
+
demo = gr.Interface(
|
73 |
+
fn=classifier.predict_batch,
|
74 |
+
inputs=gr.Image(),
|
75 |
+
outputs=gr.Label(num_top_classes=5),
|
76 |
+
title="Advanced Image Classification with Mamba",
|
77 |
+
description="Upload images for batch classification with the resnet50.a1_in1k model",
|
78 |
+
batch=True,
|
79 |
+
max_batch_size=4,
|
80 |
+
)
|
81 |
+
|
82 |
+
if __name__ == "__main__":
|
83 |
+
demo.launch(server_name="0.0.0.0", server_port=7860)
|
image_classifier/single_classifier.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
import timm
|
4 |
+
from PIL import Image
|
5 |
+
import requests
|
6 |
+
|
7 |
+
|
8 |
+
class ImageClassifier:
|
9 |
+
def __init__(self):
|
10 |
+
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
11 |
+
# Create model and move to appropriate device
|
12 |
+
self.model = timm.create_model("resnet50.a1_in1k", pretrained=True)
|
13 |
+
self.model = self.model.to(self.device)
|
14 |
+
self.model.eval()
|
15 |
+
|
16 |
+
# Get model specific transforms
|
17 |
+
data_config = timm.data.resolve_model_data_config(self.model)
|
18 |
+
self.transform = timm.data.create_transform(**data_config, is_training=False)
|
19 |
+
|
20 |
+
# Load ImageNet labels
|
21 |
+
url = "https://storage.googleapis.com/bit_models/ilsvrc2012_wordnet_lemmas.txt"
|
22 |
+
self.labels = requests.get(url).text.strip().split("\n")
|
23 |
+
|
24 |
+
@torch.no_grad()
|
25 |
+
def predict(self, image):
|
26 |
+
if image is None:
|
27 |
+
return None
|
28 |
+
|
29 |
+
# Preprocess image
|
30 |
+
img = Image.fromarray(image).convert("RGB")
|
31 |
+
img_tensor = self.transform(img).unsqueeze(0).to(self.device)
|
32 |
+
|
33 |
+
# Get prediction
|
34 |
+
output = self.model(img_tensor)
|
35 |
+
probabilities = torch.nn.functional.softmax(output[0], dim=0)
|
36 |
+
|
37 |
+
# Get top 5 predictions
|
38 |
+
top5_prob, top5_catid = torch.topk(probabilities, 5)
|
39 |
+
|
40 |
+
return {
|
41 |
+
self.labels[idx.item()]: float(prob)
|
42 |
+
for prob, idx in zip(top5_prob, top5_catid)
|
43 |
+
}
|
44 |
+
|
45 |
+
|
46 |
+
# Create classifier instance
|
47 |
+
classifier = ImageClassifier()
|
48 |
+
|
49 |
+
# Create Gradio interface
|
50 |
+
demo = gr.Interface(
|
51 |
+
fn=classifier.predict,
|
52 |
+
inputs=gr.Image(type="numpy", label="Input Image"),
|
53 |
+
outputs=gr.Label(num_top_classes=5, label="Top 5 Predictions"),
|
54 |
+
title="Basic Image Classification with Mamba",
|
55 |
+
description="Upload an image to classify it using the resnet50.a1_in1k model",
|
56 |
+
)
|
57 |
+
|
58 |
+
if __name__ == "__main__":
|
59 |
+
demo.launch()
|
poetry.lock
CHANGED
@@ -11,6 +11,37 @@ files = [
|
|
11 |
{file = "absl_py-2.1.0-py3-none-any.whl", hash = "sha256:526a04eadab8b4ee719ce68f204172ead1027549089702d99b9059f129ff1308"},
|
12 |
]
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
[[package]]
|
15 |
name = "adlfs"
|
16 |
version = "2024.7.0"
|
@@ -5679,13 +5710,13 @@ yaml = ["pyyaml (>=6.0.1)"]
|
|
5679 |
|
5680 |
[[package]]
|
5681 |
name = "pydot"
|
5682 |
-
version = "3.0.
|
5683 |
description = "Python interface to Graphviz's Dot"
|
5684 |
optional = false
|
5685 |
python-versions = ">=3.8"
|
5686 |
files = [
|
5687 |
-
{file = "pydot-3.0.
|
5688 |
-
{file = "pydot-3.0.
|
5689 |
]
|
5690 |
|
5691 |
[package.dependencies]
|
@@ -8304,4 +8335,4 @@ type = ["pytest-mypy"]
|
|
8304 |
[metadata]
|
8305 |
lock-version = "2.0"
|
8306 |
python-versions = "3.12.6"
|
8307 |
-
content-hash = "
|
|
|
11 |
{file = "absl_py-2.1.0-py3-none-any.whl", hash = "sha256:526a04eadab8b4ee719ce68f204172ead1027549089702d99b9059f129ff1308"},
|
12 |
]
|
13 |
|
14 |
+
[[package]]
|
15 |
+
name = "accelerate"
|
16 |
+
version = "1.1.1"
|
17 |
+
description = "Accelerate"
|
18 |
+
optional = false
|
19 |
+
python-versions = ">=3.9.0"
|
20 |
+
files = [
|
21 |
+
{file = "accelerate-1.1.1-py3-none-any.whl", hash = "sha256:61edd81762131b8d4bede008643fa1e1f3bf59bec710ebda9771443e24feae02"},
|
22 |
+
{file = "accelerate-1.1.1.tar.gz", hash = "sha256:0d39dfac557052bc735eb2703a0e87742879e1e40b88af8a2f9a93233d4cd7db"},
|
23 |
+
]
|
24 |
+
|
25 |
+
[package.dependencies]
|
26 |
+
huggingface-hub = ">=0.21.0"
|
27 |
+
numpy = ">=1.17,<3.0.0"
|
28 |
+
packaging = ">=20.0"
|
29 |
+
psutil = "*"
|
30 |
+
pyyaml = "*"
|
31 |
+
safetensors = ">=0.4.3"
|
32 |
+
torch = ">=1.10.0"
|
33 |
+
|
34 |
+
[package.extras]
|
35 |
+
deepspeed = ["deepspeed"]
|
36 |
+
dev = ["bitsandbytes", "black (>=23.1,<24.0)", "datasets", "diffusers", "evaluate", "hf-doc-builder (>=0.3.0)", "parameterized", "pytest (>=7.2.0,<=8.0.0)", "pytest-subtests", "pytest-xdist", "rich", "ruff (>=0.6.4,<0.7.0)", "scikit-learn", "scipy", "timm", "torchdata (>=0.8.0)", "torchpippy (>=0.2.0)", "tqdm", "transformers"]
|
37 |
+
quality = ["black (>=23.1,<24.0)", "hf-doc-builder (>=0.3.0)", "ruff (>=0.6.4,<0.7.0)"]
|
38 |
+
rich = ["rich"]
|
39 |
+
sagemaker = ["sagemaker"]
|
40 |
+
test-dev = ["bitsandbytes", "datasets", "diffusers", "evaluate", "scikit-learn", "scipy", "timm", "torchdata (>=0.8.0)", "torchpippy (>=0.2.0)", "tqdm", "transformers"]
|
41 |
+
test-prod = ["parameterized", "pytest (>=7.2.0,<=8.0.0)", "pytest-subtests", "pytest-xdist"]
|
42 |
+
test-trackers = ["comet-ml", "dvclive", "tensorboard", "wandb"]
|
43 |
+
testing = ["bitsandbytes", "datasets", "diffusers", "evaluate", "parameterized", "pytest (>=7.2.0,<=8.0.0)", "pytest-subtests", "pytest-xdist", "scikit-learn", "scipy", "timm", "torchdata (>=0.8.0)", "torchpippy (>=0.2.0)", "tqdm", "transformers"]
|
44 |
+
|
45 |
[[package]]
|
46 |
name = "adlfs"
|
47 |
version = "2024.7.0"
|
|
|
5710 |
|
5711 |
[[package]]
|
5712 |
name = "pydot"
|
5713 |
+
version = "3.0.3"
|
5714 |
description = "Python interface to Graphviz's Dot"
|
5715 |
optional = false
|
5716 |
python-versions = ">=3.8"
|
5717 |
files = [
|
5718 |
+
{file = "pydot-3.0.3-py3-none-any.whl", hash = "sha256:9b0b3081e0bd362d0c61148da10eb1281ec80089b02a28cf06f9093843986f3d"},
|
5719 |
+
{file = "pydot-3.0.3.tar.gz", hash = "sha256:5e009d97b2fff92b7a88f09ec1fd5b163f07f3b10469c927d362471d6faa0d50"},
|
5720 |
]
|
5721 |
|
5722 |
[package.dependencies]
|
|
|
8335 |
[metadata]
|
8336 |
lock-version = "2.0"
|
8337 |
python-versions = "3.12.6"
|
8338 |
+
content-hash = "ce5c192bdc9f69e1c6fa059963bee1111580cd1665739f1453e9c2a4f8c2ee3a"
|
pyproject.toml
CHANGED
@@ -74,6 +74,9 @@ gpustat = "^1.1.1"
|
|
74 |
nvitop = "^1.3.2"
|
75 |
gradio = "5.7.1"
|
76 |
gradio-client = "^1.5.0"
|
|
|
|
|
|
|
77 |
|
78 |
[tool.poetry.dev-dependencies]
|
79 |
pytest-asyncio = "^0.20.3"
|
|
|
74 |
nvitop = "^1.3.2"
|
75 |
gradio = "5.7.1"
|
76 |
gradio-client = "^1.5.0"
|
77 |
+
accelerate = "^1.1.1"
|
78 |
+
pyopenssl = "<23.0.0"
|
79 |
+
cryptography = "^44.0.0"
|
80 |
|
81 |
[tool.poetry.dev-dependencies]
|
82 |
pytest-asyncio = "^0.20.3"
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
transformers
|
3 |
+
torch
|