leadr64 commited on
Commit
a2b1b52
1 Parent(s): fc8cb68

Ajouter le script Gradio et les dépendances*

Browse files
Files changed (3) hide show
  1. .env +2 -0
  2. .gitignore +2 -0
  3. app.py +23 -24
.env ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ QDRANT_URL="https://ebe79742-e3ac-4d09-a2c6-63946024cc7a.us-east4-0.gcp.cloud.qdrant.io"
2
+ QDRANT_KEY=""_NnGLuSMH4Qwv-ancoFh88YvzuR7WbyidAorVOVQ_eMCbPhxTb2TSw"
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Ignore le fichier .env
2
+ .env
app.py CHANGED
@@ -1,55 +1,54 @@
1
  import os
 
2
  import gradio as gr
3
  from qdrant_client import QdrantClient
4
  from transformers import ClapModel, ClapProcessor
5
- from huggingface_hub import HfApi
6
-
7
- # Utilisation de Hugging Face Hub API pour récupérer les secrets
8
- api = HfApi()
9
- secrets = api.repository_config("leadr64/audi_data") # Remplacez par votre nom d'utilisateur et nom du repository
10
-
11
- qdrant_url = secrets.get("QDRANT_URL")
12
- qdrant_key = secrets.get("QDRANT_KEY")
13
-
14
- if not qdrant_url or not qdrant_key:
15
- raise ValueError("QDRANT_URL and QDRANT_KEY must be set as secrets in your Hugging Face repository.")
16
 
17
- client = QdrantClient(qdrant_url, qdrant_key)
 
18
 
19
- # Chargement de la base de données Qdrant en local
20
- print("[INFO] Client créé...")
 
 
 
 
21
 
22
- # Chargement du modèle
23
- print("[INFO] Chargement du modèle...")
24
- model_name = "laion/clap-large-v2"
25
  model = ClapModel.from_pretrained(model_name)
26
  processor = ClapProcessor.from_pretrained(model_name)
27
 
28
- # Interface Gradio
29
  max_results = 10
30
 
 
31
  def sound_search(query):
32
  text_inputs = processor(text=query, return_tensors="pt")
33
  text_embed = model.get_text_features(**text_inputs)[0]
34
 
35
  hits = client.search(
36
  collection_name="demo_spaces_db",
37
- query_vector=text_embed.tolist(), # Convertir le tenseur en liste
38
  limit=max_results,
39
  )
40
  return [
41
  gr.Audio(
42
- value=hit.payload['audio_path'],
43
  label=f"style: {hit.payload['style']} -- score: {hit.score}")
44
  for hit in hits
45
  ]
46
 
 
47
  with gr.Blocks() as demo:
48
  gr.Markdown(
49
- """# Base de données de recherche de sons """
50
  )
51
- inp = gr.Textbox(placeholder="Quel son recherchez-vous ?")
52
- out = [gr.Audio(label=f"{x}") for x in range(max_results)] # Nécessaire d'avoir des objets différents
53
- inp.change(fn=sound_search, inputs=inp, outputs=out)
54
 
55
  demo.launch()
 
1
  import os
2
+
3
  import gradio as gr
4
  from qdrant_client import QdrantClient
5
  from transformers import ClapModel, ClapProcessor
6
+ from dotenv import load_dotenv
7
+ import os
 
 
 
 
 
 
 
 
 
8
 
9
+ # Charger les variables d'environnement à partir du fichier .env
10
+ load_dotenv()
11
 
12
+ # Récupérer le mot de passe depuis les variables d'environnement
13
+ QDRANT_URL = os.getenv('QDRANT_URL')
14
+ QDRANT_KEY = os.getenv('QDRANT_KEY')
15
+ # Loading the Qdrant DB in local ###################################################################
16
+ client = QdrantClient(QDRANT_URL, api_key=QDRANT_KEY)
17
+ print("[INFO] Client created...")
18
 
19
+ # loading the model
20
+ print("[INFO] Loading the model...")
21
+ model_name = "laion/larger_clap_general"
22
  model = ClapModel.from_pretrained(model_name)
23
  processor = ClapProcessor.from_pretrained(model_name)
24
 
25
+ # Gradio Interface #################################################################################
26
  max_results = 10
27
 
28
+
29
  def sound_search(query):
30
  text_inputs = processor(text=query, return_tensors="pt")
31
  text_embed = model.get_text_features(**text_inputs)[0]
32
 
33
  hits = client.search(
34
  collection_name="demo_spaces_db",
35
+ query_vector=text_embed,
36
  limit=max_results,
37
  )
38
  return [
39
  gr.Audio(
40
+ hit.payload['audio_path'],
41
  label=f"style: {hit.payload['style']} -- score: {hit.score}")
42
  for hit in hits
43
  ]
44
 
45
+
46
  with gr.Blocks() as demo:
47
  gr.Markdown(
48
+ """# Sound search database """
49
  )
50
+ inp = gr.Textbox(placeholder="What sound are you looking for ?")
51
+ out = [gr.Audio(label=f"{x}") for x in range(max_results)] # Necessary to have different objs
52
+ inp.change(sound_search, inp, out)
53
 
54
  demo.launch()