Spaces:
Runtime error
Runtime error
commit
Browse files- Mindfulness.txt +42 -0
- README.md +6 -5
- app.py +160 -0
- gitattributes.txt +27 -0
- requirements.txt +6 -0
- test.json +12 -0
Mindfulness.txt
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Mindful Path to Health and Happiness
|
2 |
+
Target Path
|
3 |
+
Stress Break Cycle of Stress and Anxiety
|
4 |
+
Stress Feel Calm in Stressful Situations
|
5 |
+
Stress Deal with Work Pressure
|
6 |
+
Stress Learn to Reduce Feelings of Overwhelmed, Burned Out, Exhausted
|
7 |
+
Stress 1) Mental Body Scan. 2) Stretch, Calm, Breath. 3) Relaxed Seat Breath. 4) Walk Feel.
|
8 |
+
Stress Brain gamification - Health trackers only if alleviating stress or helping breathing, satisfaction
|
9 |
+
Pain Relieve Stress, Build Support
|
10 |
+
Pain 1) Relaxation Response. 2) Deep Breaths. 3) CBT- ID, Delete Not Helpful Thoughts, Strengthen Helpful. 4) Reprogram Pain Stress Reactions
|
11 |
+
Sleep Sleep Better and Find Joy
|
12 |
+
Sleep Yoga Sleep: 1) All Fours, Back Up Down. 2) Kneel Arms Overhead Down. 3) Feet Lift and Lower. 4) Side Hip Knee Raise. 5) Flat on Back
|
13 |
+
Goals Being a Happier and Healthier Person
|
14 |
+
Pain Relieve Pain
|
15 |
+
Happiness Learn to Use Mindfulness to Affect Well Being
|
16 |
+
Actions Build and Boost Mental Strength
|
17 |
+
Actions Spending Time Outdoors
|
18 |
+
Actions Daily Routine Tasks
|
19 |
+
Actions Eating and Drinking - Find Healthy Nutrition Habits
|
20 |
+
Actions Drinking - Find Reasons and Cut Back or Quit Entirely
|
21 |
+
Actions Niksen, Do Nothing, To Just Be, Idle, Hang Around, Look Around, Listen Music, Without Purpose
|
22 |
+
Actions Tai Chi
|
23 |
+
Actions Yoga and Meditation
|
24 |
+
Actions Take a Brain Break - Let Old Brain Amygdala Which Affects Feelings and Emotions, Settle and Be Quiet
|
25 |
+
Actions Learn with Neocortex Tools and Techniques to Strengthen Attention
|
26 |
+
Actions Infuse Mindfulness into Easy Daily Activities
|
27 |
+
Actions 1) Sit. 2) Just Breath. 3) Thoughts Come, Let Go, Breath. 4) Wandering, Let Go, Breath. 5) Note Your Attention Strength Build
|
28 |
+
Actions 1) Gratitude. 2) Escape Comfort Zone. 3) Be Alone w Thoughts. 4) Self Compassion. 5) Assert Power. 6) Label Emotions. 7) Use Mind Wisely
|
29 |
+
Actions 1) Walk with Me. 2) Reflect on Day. 3) Learn to Meditate.
|
30 |
+
Actions Work Smarter: 1) Cut Yourself Slack. 2) Set Goals. 3) Take Breaks. 4) Just Breath. 5) One Thing at a Time. 6) Schedule.
|
31 |
+
Actions Mental Strength: 1) Your Friend You Talk. 2) Affirmation Mantra. 3) Future Self. 4) Name Feelings. 5) Coping Methods. 6) First Smart Step
|
32 |
+
Physical Practices to Remedy Physical and Emotional Issues
|
33 |
+
Physical Learn Minds Ability to Naturally Heal
|
34 |
+
Physical Feel Better Physically
|
35 |
+
Physical Manage Blood Sugar Levels Affecting Weight Gain and Muscles
|
36 |
+
Physical Manage Heart and Cardiovascular Risk
|
37 |
+
Practice Ways to Be Mindful Each Day
|
38 |
+
Practice Define and Try Your Best Version of Meditation
|
39 |
+
Practice Self Care and Community Care - Authentic Connection in RL and Online
|
40 |
+
Practice 1) Stay Social. 2) Be Neighborly. 3) Hobbies. 4) Exercise. 5) Be Kinder than Necessary. 6) Inhale, Exhale.
|
41 |
+
Practice Learning and Memory: 1) Work your Brain Muscle. 2) Delete and Clear Distractions. 3) Minimize Switching Attention Residue. 4) Completion and Save Points.
|
42 |
+
Practice Focus: 1) Attentiveness. 2) Learning. 3) Mindfulness.
|
README.md
CHANGED
@@ -1,12 +1,13 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
colorTo: purple
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 3.
|
8 |
app_file: app.py
|
9 |
pinned: false
|
|
|
10 |
---
|
11 |
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces
|
|
|
1 |
---
|
2 |
+
title: 🗣️SpeakUp🙉 - NLP Speech 2 Text 2 Speech Generator AI Pipeline
|
3 |
+
emoji: 🗣️🎤🙉
|
4 |
+
colorFrom: red
|
5 |
colorTo: purple
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 3.0.11
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
+
license: mit
|
11 |
---
|
12 |
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
|
app.py
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import datetime
|
3 |
+
from transformers import pipeline
|
4 |
+
import gradio as gr
|
5 |
+
|
6 |
+
import tempfile
|
7 |
+
from typing import Optional
|
8 |
+
import numpy as np
|
9 |
+
from TTS.utils.manage import ModelManager
|
10 |
+
from TTS.utils.synthesizer import Synthesizer
|
11 |
+
|
12 |
+
# PersistDataset -----
|
13 |
+
import os
|
14 |
+
import csv
|
15 |
+
import gradio as gr
|
16 |
+
from gradio import inputs, outputs
|
17 |
+
import huggingface_hub
|
18 |
+
from huggingface_hub import Repository, hf_hub_download, upload_file
|
19 |
+
from datetime import datetime
|
20 |
+
|
21 |
+
# created new dataset as awacke1/MindfulStory.csv
|
22 |
+
DATASET_REPO_URL = "https://huggingface.co/datasets/awacke1/MindfulStory.csv"
|
23 |
+
DATASET_REPO_ID = "awacke1/MindfulStory.csv"
|
24 |
+
DATA_FILENAME = "MindfulStory.csv"
|
25 |
+
DATA_FILE = os.path.join("data", DATA_FILENAME)
|
26 |
+
HF_TOKEN = os.environ.get("HF_TOKEN")
|
27 |
+
|
28 |
+
# Download dataset repo using hub download
|
29 |
+
try:
|
30 |
+
hf_hub_download(
|
31 |
+
repo_id=DATASET_REPO_ID,
|
32 |
+
filename=DATA_FILENAME,
|
33 |
+
cache_dir=DATA_DIRNAME,
|
34 |
+
force_filename=DATA_FILENAME
|
35 |
+
)
|
36 |
+
except:
|
37 |
+
print("file not found")
|
38 |
+
|
39 |
+
def AIMemory(name: str, message: str):
|
40 |
+
if name and message:
|
41 |
+
with open(DATA_FILE, "a") as csvfile:
|
42 |
+
writer = csv.DictWriter(csvfile, fieldnames=["name", "message", "time"])
|
43 |
+
writer.writerow({"name": name, "message": message, "time": str(datetime.now())})
|
44 |
+
commit_url = repo.push_to_hub()
|
45 |
+
return {"name": name, "message": message, "time": str(datetime.now())}
|
46 |
+
|
47 |
+
with open('Mindfulness.txt', 'r') as file:
|
48 |
+
context = file.read()
|
49 |
+
|
50 |
+
# Set up cloned dataset from repo for operations
|
51 |
+
repo = Repository( local_dir="data", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN)
|
52 |
+
|
53 |
+
# set up ASR
|
54 |
+
asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h")
|
55 |
+
|
56 |
+
# set up TTS
|
57 |
+
MODEL_NAMES = [
|
58 |
+
"en/ljspeech/tacotron2-DDC",
|
59 |
+
"en/ljspeech/glow-tts",
|
60 |
+
"en/ljspeech/speedy-speech-wn",
|
61 |
+
"en/ljspeech/vits",
|
62 |
+
"en/sam/tacotron-DDC",
|
63 |
+
"fr/mai/tacotron2-DDC",
|
64 |
+
"de/thorsten/tacotron2-DCA",
|
65 |
+
]
|
66 |
+
|
67 |
+
# Use Model Manager to load vocoders
|
68 |
+
MODELS = {}
|
69 |
+
manager = ModelManager()
|
70 |
+
for MODEL_NAME in MODEL_NAMES:
|
71 |
+
print(f"downloading {MODEL_NAME}")
|
72 |
+
model_path, config_path, model_item = manager.download_model(f"tts_models/{MODEL_NAME}")
|
73 |
+
vocoder_name: Optional[str] = model_item["default_vocoder"]
|
74 |
+
vocoder_path = None
|
75 |
+
vocoder_config_path = None
|
76 |
+
if vocoder_name is not None:
|
77 |
+
vocoder_path, vocoder_config_path, _ = manager.download_model(vocoder_name)
|
78 |
+
|
79 |
+
synthesizer = Synthesizer(
|
80 |
+
model_path, config_path, None, vocoder_path, vocoder_config_path,
|
81 |
+
)
|
82 |
+
MODELS[MODEL_NAME] = synthesizer
|
83 |
+
|
84 |
+
# transcribe
|
85 |
+
def transcribe(audio):
|
86 |
+
text = asr(audio)["text"]
|
87 |
+
return text
|
88 |
+
|
89 |
+
#text classifier
|
90 |
+
classifier = pipeline("text-classification")
|
91 |
+
|
92 |
+
|
93 |
+
def speech_to_text(speech):
|
94 |
+
text = asr(speech)["text"]
|
95 |
+
#rMem = AIMemory("STT", text)
|
96 |
+
return text
|
97 |
+
|
98 |
+
def text_to_sentiment(text):
|
99 |
+
sentiment = classifier(text)[0]["label"]
|
100 |
+
#rMem = AIMemory(text, sentiment)
|
101 |
+
return sentiment
|
102 |
+
|
103 |
+
def upsert(text):
|
104 |
+
date_time =str(datetime.datetime.today())
|
105 |
+
doc_ref = db.collection('Text2SpeechSentimentSave').document(date_time)
|
106 |
+
doc_ref.set({u'firefield': 'Recognize Speech', u'first': 'https://huggingface.co/spaces/awacke1/TTS-STT-Blocks/', u'last': text, u'born': date_time,})
|
107 |
+
saved = select('TTS-STT', date_time)
|
108 |
+
return saved
|
109 |
+
|
110 |
+
def select(collection, document):
|
111 |
+
doc_ref = db.collection(collection).document(document)
|
112 |
+
doc = doc_ref.get()
|
113 |
+
docid = ("The id is: ", doc.id)
|
114 |
+
contents = ("The contents are: ", doc.to_dict())
|
115 |
+
return contents
|
116 |
+
|
117 |
+
def selectall(text):
|
118 |
+
docs = db.collection('Text2SpeechSentimentSave').stream()
|
119 |
+
doclist=''
|
120 |
+
for doc in docs:
|
121 |
+
r=(f'{doc.id} => {doc.to_dict()}')
|
122 |
+
doclist += r
|
123 |
+
return doclist
|
124 |
+
|
125 |
+
def tts(text: str, model_name: str):
|
126 |
+
print(text, model_name)
|
127 |
+
synthesizer = MODELS.get(model_name, None)
|
128 |
+
if synthesizer is None:
|
129 |
+
raise NameError("model not found")
|
130 |
+
wavs = synthesizer.tts(text)
|
131 |
+
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp:
|
132 |
+
synthesizer.save_wav(wavs, fp)
|
133 |
+
|
134 |
+
#rMem = AIMemory("TTS", text + model_name)
|
135 |
+
|
136 |
+
return fp.name
|
137 |
+
|
138 |
+
demo = gr.Blocks()
|
139 |
+
with demo:
|
140 |
+
audio_file = gr.inputs.Audio(source="microphone", type="filepath")
|
141 |
+
text = gr.Textbox(label="Speech to Text")
|
142 |
+
#label = gr.Label()
|
143 |
+
#saved = gr.Textbox(label="Saved")
|
144 |
+
#savedAll = gr.Textbox(label="SavedAll")
|
145 |
+
TTSchoice = gr.inputs.Radio( label="Pick a Text to Speech Model", choices=MODEL_NAMES, )
|
146 |
+
audio = gr.Audio(label="Output", interactive=False)
|
147 |
+
|
148 |
+
b1 = gr.Button("Recognize Speech")
|
149 |
+
#b2 = gr.Button("Classify Sentiment")
|
150 |
+
#b3 = gr.Button("Save Speech to Text")
|
151 |
+
#b4 = gr.Button("Retrieve All")
|
152 |
+
b5 = gr.Button("Read It Back Aloud")
|
153 |
+
|
154 |
+
b1.click(speech_to_text, inputs=audio_file, outputs=text)
|
155 |
+
#b2.click(text_to_sentiment, inputs=text, outputs=label)
|
156 |
+
#b3.click(upsert, inputs=text, outputs=saved)
|
157 |
+
#b4.click(selectall, inputs=text, outputs=savedAll)
|
158 |
+
b5.click(tts, inputs=[text,TTSchoice], outputs=audio)
|
159 |
+
|
160 |
+
demo.launch(share=True)
|
gitattributes.txt
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
19 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
transformers
|
2 |
+
streamlit
|
3 |
+
Werkzeug==2.0.3
|
4 |
+
huggingface_hub==0.4.0
|
5 |
+
torch
|
6 |
+
TTS==0.2.1
|
test.json
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"type": "service_account",
|
3 |
+
"project_id": "clinical-nlp-b9117",
|
4 |
+
"private_key_id": "6972d02311e8ee0c5b582551fbcf9c99b9169b58",
|
5 |
+
"private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCmrSoB92G/ihxL\nzIk7Y8RUNc6Iezr6pZ+eSz2RGxEz2qPMfWjNeOJEAlACYJp4aUwyX5IHGb8Eh/oj\nkr7nVsgvuDyrTWpCAv16AuRycKgxvqj0+uDaVrF0vLgTumy62x5QM7i+n2YTDXoP\nXHMHX7yXZ6zc9Ibmm065f2kgWyjmIZDt+flTBYeBS203ZIzMBHhN1e1jdtzR36z/\n1MBmLjpRKvmuHF2SnraVjoRh7Xe6R99K8DxRQ61TJt9xLukvLBYelnqf2/cK8bZM\n5p2pErR4FE7ki3MX7HWdMJQSe+Uj10hurjNBdHcCaNUou5EL5+NRgqLow0tfatWC\n+Jpiw3K9AgMBAAECggEAGpT7YhzmBfos0RnpuQMMSLHcIoAkw9yuPDybsQy0DaUN\nAovtrvdcfqQvxnFJsXJ5qH79dwxwHnThO9MnhxWcD6A+bMOH8scvTcowTOASsvxJ\nTejE+41f99IxOVQ+Cv7vMrNM/3nEeb1ofhKsdbybAzqRoxuMeDLEt2jOh06Ck1D8\n/YV8kavGYR/VNxO2l7C5DZJYXgcm18ZrTFEXZes8bydZesoHl+JRVO1utjR2IhAj\nnYqqNaf5RXruEzXWxP0+jjEgg4NLFfqVnQTZFrLwokwc8NEMXf3dZJ0k0cHHmxOB\n6BHuPZhMOZ56U74PyWgCmbPp9g/SLt3iInpZ4ahmAQKBgQDhQwdbUEQ1q+KSMsMm\ndJl+ghX/Ff3uaZ7LjdBiOgtmTaIVbuf/bw0V9x8GbRGdJJyp546R5vhUE0zKzkMt\nTNdDNrWk3Zh4tCRHvPEHiqmDn91pWFeDDQf/OjKz+SFV31mQ050BOatZ8dBEy+md\nvHG8yLTB7oJvSpviim4ty15wIQKBgQC9a5jsBFB0fltHNJ0lZp7I2hF+aOqOngJM\nqEipPjJABJ4izGTOK/KW8CyWEP82nb6p7u9v0f4sV8CFWXG178DMv1NlRYzom3CQ\nkXdx+nRgO4oX4eEfYuoP2PxF0hCOwbh55NgFdwTt/dExX6bau4d9yQMV7o0TXpRW\nZzygOOTfHQKBgQC7ayhwyfymZydwmjmSAks/XX5tqN+IgGo1U/1/7GlVqdvkV01B\nUiUiFGTE1PRluXN7TYRqUjBky1YGGsz7oMYtTxScYh6ctszEvygPLUhSki0GnBDb\noXj42nQbF3mr19POUrJ7tX6irDWrN7lcmtBK0PbLr+ToMbw3JRP8mAsv4QKBgEac\nC18/pHYofAIpHMNKY7pff9HtbjJHuHe2648bPkQa9I/oPVOVklKtqREvuNM1LlPO\nW7cFQohpFb0fwIGfo/EvCPlhWcuD1gwuDaaRRDxzNWD9tJusla/epPup+L4efJQD\nuHshCNdmnEqZa2tyKGm9Osc8K56izQ0AYtsfGkIJAoGAMtaXTA96OXUvpEm4waQX\nOTbuEZQEdntnYWHacNrGlvwnNmvNC9hXwB38ijxXHEn0j1QUcV3w5QXFupwzjpZ2\nlIp9vTq1mOTVhHzmQmOb9DKKAE/2pi2HnekItncoQCBtgJ7k6tIk1KEfvXuQS/oM\nh8qPMwuMcQ/vKGhl3xLYo9M=\n-----END PRIVATE KEY-----\n",
|
6 |
+
"client_email": "firebase-adminsdk-qaxaj@clinical-nlp-b9117.iam.gserviceaccount.com",
|
7 |
+
"client_id": "117623958723912081118",
|
8 |
+
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
|
9 |
+
"token_uri": "https://oauth2.googleapis.com/token",
|
10 |
+
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
|
11 |
+
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/firebase-adminsdk-qaxaj%40clinical-nlp-b9117.iam.gserviceaccount.com"
|
12 |
+
}
|