hi / main.py
ProgramX's picture
Create main.py
31ee600
from geopy.geocoders import Nominatim
import pygame
import pywhatkit
from gtts import gTTS
import pyautogui
import keyboard
from newspaper import Article
from PIL import ImageGrab
from ctypes import cast, POINTER
from comtypes import CLSCTX_ALL
from pycaw.pycaw import AudioUtilities, IAudioEndpointVolume
import pywhatkit as kit
from PyQt5 import QtCore, QtGui, QtWidgets
import sys
from PyQt5.QtWidgets import QApplication, QWidget
import threading
import io
import speedtest
from datetime import date
import pyrebase
import comtypes
import subprocess
import wolframalpha
import pyttsx3
import random
import operator
import speech_recognition as sr
import datetime
import wikipedia
import os
import winshell
import tempfile
import pyjokes
from PyQt5.QtGui import QIcon
import math
import random
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QLineEdit
from PyQt5.QtGui import QIcon, QPixmap
from PyQt5 import QtCore
import google.generativeai as palm
import io
from PIL import Image
import requests
import mediapipe as mp
import pyaudio
from googletrans import Translator
import ctypes
import cv2
from AppOpener import open
engine = pyttsx3.init(driverName='sapi5')
def translate_en_to_si(text):
translator = Translator(to_lang='si') # Specify target language 'si' (Sinhala)
result = translator.translate(text)
return result.text
comtypes.CoInitialize()
firebaseConfig = {
'apiKey': "AIzaSyCw9hucT1SFugijJEdQVAi4JhrUIMCb7fE",
'authDomain': "newsing-54876.firebaseapp.com",
'databaseURL': "https://newsing-54876-default-rtdb.firebaseio.com",
'projectId': "newsing-54876",
'storageBucket': "newsing-54876.appspot.com",
'messagingSenderId': "717445861117",
'appId': "1:717445861117:web:7308d19084d09d8b30f894",
'measurementId': "G-2W3TL7PHMG"
}
firebase = pyrebase.initialize_app(firebaseConfig)
storage = firebase.storage()
db = firebase.database()
firebase = pyrebase.initialize_app(firebaseConfig)
db = firebase.database()
def take_command():
command = sr.Recognizer()
with sr.Microphone() as source:
print('Listening...')
command.pause_threshold = 1
command.adjust_for_ambient_noise(source)
audio = command.listen(source, timeout=9)
try:
print("Recognizing...")
recognized_text = command.recognize_google(audio, language="si")
# Get today's date
today = date.today()
date_str = today.strftime("%Y/%m/%d")
# Save the recognized text to Firebase under the date-based structure
db.child("talk").child(date_str).push({"text": recognized_text})
return recognized_text
except sr.UnknownValueError:
print("Could not understand audio.")
return ""
except sr.RequestError as e:
print(f"Error in API request: {e}")
return ""
def talk(Text):
engine = pyttsx3.init()
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id)
engine.setProperty('rate', 170)
# Speak the text
engine.say(Text)
engine.runAndWait()
# Get today's date
today = date.today()
date_str = today.strftime("%Y/%m/%d")
# Save the spoken text to Firebase under the date-based structure
db.child("talk").child(date_str).push({"text": Text})
print(Text)
engine.stop()
def clear():
clear = lambda: os.system('cls')
def run_kai():
clear()
import time
import geocoder
def paste():
talk('pasting sir')
pyautogui.hotkey('ctrl', 'v')
time.sleep(0.5)
def close():
try:
clo = command.replace('close','')
from AppOpener import close
def translate_en_to_si(text):
translator = Translator()
result = translator.translate(text, src='si', dest='en')
return result.text
if __name__ == "__main__":
english_text = clo
sinhala_text = translate_en_to_si(english_text)
full = 'closing ' + sinhala_text
talk('closing ' + sinhala_text)
close(sinhala_text)
except:
talk('closing')
pyautogui.hotkey('alt', 'f4')
time.sleep(0.5)
def copy():
talk('copied sir')
pyautogui.hotkey('ctrl', 'c')
time.sleep(0.5)
def all():
talk('selecting sir')
pyautogui.click()
time.sleep(0.5)
pyautogui.hotkey('ctrl', 'a')
time.sleep(0.5)
def write():
talk('What do you want to write down?')
wr = take_command()
print(wr)
keyboard.write(wr, delay=0.05)
time.sleep(0.5)
command = take_command()
print(command)
if 'close' in command:
close()
elif 'wallpaper' in command:
import ctypes
# Define constants
SPI_SETDESKWALLPAPER = 20
# Relative path to the wallpaper folder
wallpaper_folder = "wallpaper"
# Get the current directory of the script
script_directory = os.path.dirname(os.path.abspath(__file__))
# Combine the script directory and the relative wallpaper folder path
wallpaper_folder = os.path.join(script_directory, wallpaper_folder)
# Get a list of all image files in the folder
wallpapers = [
os.path.join(wallpaper_folder, filename)
for filename in os.listdir(wallpaper_folder)
if filename.lower().endswith((".jpg", ".jpeg", ".png", ".bmp", ".gif"))
]
# Check if there are any wallpapers in the folder
if not wallpapers:
talk("No wallpaper images found in the folder. \wallpaper")
else:
# Randomly select a wallpaper from the list
selected_wallpaper = random.choice(wallpapers)
# Call the SystemParametersInfo function to set the wallpaper
result = ctypes.windll.user32.SystemParametersInfoW(
SPI_SETDESKWALLPAPER, 0, selected_wallpaper, 3
)
# Check if the wallpaper was set successfully
if result:
talk(f"Wallpaper changed successfully.")
else:
talk(f"Failed to change wallpaper. Error code: {ctypes.GetLastError()}")
elif 'restore' in command:
talk('restoring')
# URL of Gmail
time.sleep(0.5)
pyautogui.hotkey('shift', 'ctrl', 't')
elif 'රිස්ටොර්' in command:
talk('restoring')
# URL of Gmail
time.sleep(0.5)
pyautogui.hotkey('shift', 'ctrl', 't')
elif 'වෝල්පෙපර්' in command:
import ctypes
# Define constants
SPI_SETDESKWALLPAPER = 20
# Relative path to the wallpaper folder
wallpaper_folder = "wallpaper"
# Get the current directory of the script
script_directory = os.path.dirname(os.path.abspath(__file__))
# Combine the script directory and the relative wallpaper folder path
wallpaper_folder = os.path.join(script_directory, wallpaper_folder)
# Get a list of all image files in the folder
wallpapers = [
os.path.join(wallpaper_folder, filename)
for filename in os.listdir(wallpaper_folder)
if filename.lower().endswith((".jpg", ".jpeg", ".png", ".bmp", ".gif"))
]
# Check if there are any wallpapers in the folder
if not wallpapers:
talk("No wallpaper images found in the folder. \wallpaper")
else:
# Randomly select a wallpaper from the list
selected_wallpaper = random.choice(wallpapers)
# Call the SystemParametersInfo function to set the wallpaper
result = ctypes.windll.user32.SystemParametersInfoW(
SPI_SETDESKWALLPAPER, 0, selected_wallpaper, 3
)
# Check if the wallpaper was set successfully
if result:
talk(f"Wallpaper changed successfully.")
else:
talk(f"Failed to change wallpaper. Error code: {ctypes.GetLastError()}")
elif 'කෝස්' in command:
try:
clo = command.replace('කෝස්', '')
from AppOpener import close
def translate_en_to_si(text):
translator = Translator()
result = translator.translate(text, src='si', dest='en')
return result.text
if __name__ == "__main__":
english_text = clo
sinhala_text = translate_en_to_si(english_text)
full = 'closing ' + sinhala_text
talk('closing ' + sinhala_text)
close(sinhala_text)
except:
talk('closing')
pyautogui.hotkey('alt', 'f4')
time.sleep(0.5)
elif 'cross' in command:
try:
clo = command.replace('cross', '')
from AppOpener import close
def translate_en_to_si(text):
translator = Translator()
result = translator.translate(text, src='si', dest='en')
return result.text
if __name__ == "__main__":
english_text = clo
sinhala_text = translate_en_to_si(english_text)
full = 'closing ' + sinhala_text
talk('closing ' + sinhala_text)
close(sinhala_text)
except:
talk('closing')
pyautogui.hotkey('alt', 'f4')
time.sleep(0.5)
elif 'ක්ලෝස්' in command:
try:
clo = command.replace('ක්ලෝස්', '')
from AppOpener import close
def translate_en_to_si(text):
translator = Translator()
result = translator.translate(text, src='si', dest='en')
return result.text
if __name__ == "__main__":
english_text = clo
sinhala_text = translate_en_to_si(english_text)
full = 'closing ' + sinhala_text
talk('closing ' + sinhala_text)
close(sinhala_text)
except:
talk('closing')
pyautogui.hotkey('alt', 'f4')
time.sleep(0.5)
elif 'paste' in command:
paste()
elif 'පේස්ට්' in command:
paste()
elif 'කොපි' in command:
copy()
elif 'copy' in command:
copy()
elif 'පිටපත' in command:
copy()
elif 'all' in command:
all()
elif 'ඕල්' in command:
all()
elif 'shot' in command:
def get_next_screenshot_number():
if not os.path.exists("screenshots"):
os.mkdir("screenshots")
existing_screenshots = [int(filename.split(".")[0]) for filename in os.listdir("screenshots") if
filename.endswith(".png")]
if existing_screenshots:
return max(existing_screenshots) + 1
else:
return 1
def take_and_save_screenshot(output_path):
screenshot = ImageGrab.grab()
screenshot.save(output_path)
talk(f"Screenshot saved to {output_path}")
if __name__ == "__main__":
screenshot_number = get_next_screenshot_number()
screenshot_filename = f"screenshots/{screenshot_number:03d}.png"
take_and_save_screenshot(screenshot_filename)
elif 'ශොට්' in command:
def get_next_screenshot_number():
if not os.path.exists("screenshots"):
os.mkdir("screenshots")
existing_screenshots = [int(filename.split(".")[0]) for filename in os.listdir("screenshots") if
filename.endswith(".png")]
if existing_screenshots:
return max(existing_screenshots) + 1
else:
return 1
def take_and_save_screenshot(output_path):
screenshot = ImageGrab.grab()
screenshot.save(output_path)
talk(f"Screenshot saved to {output_path}")
if __name__ == "__main__":
screenshot_number = get_next_screenshot_number()
screenshot_filename = f"screenshots/{screenshot_number:03d}.png"
take_and_save_screenshot(screenshot_filename)
elif 'recycle bin' in command:
winshell.recycle_bin().empty(confirm=False, show_progress=False, sound=True)
talk("Recycle Bin Recycled")
elif 'රීසෛකල් බින්' in command:
winshell.recycle_bin().empty(confirm=False, show_progress=False, sound=True)
talk("Recycle Bin Recycled")
elif 'notification' in command:
talk('wait')
pyautogui.hotkey('win','a')
time.sleep(0.5)
elif 'ට්‍රාන්ස්ලේට්' in command:
talk('tell me the text to translate')
def translate_text(text, target_language_name):
translator = Translator()
try:
# Detect the source language of the input text
source_language = translator.detect(text).lang
# Translate the input text to the source language (English)
text_in_source_lang = translator.translate(text, src=source_language, dest='en').text
# Translate the English text to the target language
translated_text = translator.translate(text_in_source_lang, dest=target_language_name).text
return translated_text
except Exception as e:
return str(e)
if __name__ == '__main__':
text_to_translate = take_command()
print(text_to_translate)
talk('tell me the target language name (e.g., englsh)')
target_language_name = take_command()
print(target_language_name)
try:
translated_text = translate_text(text_to_translate, target_language_name)
talk(text_to_translate)
talk(translated_text)
except Exception as e:
print(f"Error: {str(e)}")
elif 'පරිවර්තනය' in command:
talk('tell me the text to translate')
def translate_text(text, target_language_name):
translator = Translator()
try:
# Detect the source language of the input text
source_language = translator.detect(text).lang
# Translate the input text to the source language (English)
text_in_source_lang = translator.translate(text, src=source_language, dest='en').text
# Translate the English text to the target language
translated_text = translator.translate(text_in_source_lang, dest=target_language_name).text
return translated_text
except Exception as e:
return str(e)
if __name__ == '__main__':
text_to_translate = take_command()
print(text_to_translate)
talk('tell me the target language name (e.g., englsh)')
target_language_name = take_command()
print(target_language_name)
try:
translated_text = translate_text(text_to_translate, target_language_name)
talk(text_to_translate)
talk(translated_text)
except Exception as e:
print(f"Error: {str(e)}")
elif 'translate' in command:
talk('tell me the text to translate')
def translate_text(text, target_language_name):
translator = Translator()
try:
# Detect the source language of the input text
source_language = translator.detect(text).lang
# Translate the input text to the source language (English)
text_in_source_lang = translator.translate(text, src=source_language, dest='en').text
# Translate the English text to the target language
translated_text = translator.translate(text_in_source_lang, dest=target_language_name).text
return translated_text
except Exception as e:
return str(e)
if __name__ == '__main__':
text_to_translate = take_command()
print(text_to_translate)
talk('tell me the target language name (e.g., englsh)')
target_language_name = take_command()
print(target_language_name)
try:
translated_text = translate_text(text_to_translate, target_language_name)
talk(text_to_translate)
talk(translated_text)
except Exception as e:
print(f"Error: {str(e)}")
elif 'search in youtube' in command:
talk('what do you want to search in youtube?')
sear = take_command()
print(sear)
def translate_en_to_si(text):
translator = Translator()
result = translator.translate(text, src='si', dest='en')
return result.text
if __name__ == "__main__":
english_text = sear
sinhala_text = translate_en_to_si(english_text)
talk('Searching sir ' + sinhala_text)
def openapp2():
talk('opening Chrome')
import webbrowser
webbrowser.open('https://www.youtube.com/')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(1)
# Get the screen dimensions
pyautogui.press('/')
time.sleep(1)
# Calculate the center coordinates
keyboard.write(sear, delay=0.05)
time.sleep(3)
talk('searching ' + sinhala_text)
pyautogui.press('enter')
time.sleep(0.5)
openapp2()
elif 'යූටියුබ් එකේ හොයන්න' in command:
talk('what do you want to search in youtube?')
sear = take_command()
print(sear)
def translate_en_to_si(text):
translator = Translator()
result = translator.translate(text, src='si', dest='en')
return result.text
if __name__ == "__main__":
english_text = sear
sinhala_text = translate_en_to_si(english_text)
talk('Searching sir ' + sinhala_text)
def openapp2():
talk('opening Chrome')
import webbrowser
webbrowser.open('https://www.youtube.com/')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(1)
# Get the screen dimensions
pyautogui.press('/')
time.sleep(1)
# Calculate the center coordinates
keyboard.write(sear, delay=0.05)
time.sleep(3)
talk('searching ' + sinhala_text)
pyautogui.press('enter')
time.sleep(0.5)
openapp2()
elif 'යූටියුබ් එකේ සොයන්න' in command:
talk('what do you want to search in youtube?')
sear = take_command()
print(sear)
def translate_en_to_si(text):
translator = Translator()
result = translator.translate(text, src='si', dest='en')
return result.text
if __name__ == "__main__":
english_text = sear
sinhala_text = translate_en_to_si(english_text)
talk('Searching sir ' + sinhala_text)
def openapp2():
talk('opening Chrome')
import webbrowser
webbrowser.open('https://www.youtube.com/')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(1)
# Get the screen dimensions
pyautogui.press('/')
time.sleep(1)
# Calculate the center coordinates
keyboard.write(sear, delay=0.05)
time.sleep(3)
talk('searching ' + sinhala_text)
pyautogui.press('enter')
time.sleep(0.5)
openapp2()
elif 'යූටියුබ් එකේ සර්ච්' in command:
talk('what do you want to search in youtube?')
sear = take_command()
print(sear)
def translate_en_to_si(text):
translator = Translator()
result = translator.translate(text, src='si', dest='en')
return result.text
if __name__ == "__main__":
english_text = sear
sinhala_text = translate_en_to_si(english_text)
talk('Searching sir ' + sinhala_text)
def openapp2():
talk('opening Chrome')
import webbrowser
webbrowser.open('https://www.youtube.com/')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(1)
# Get the screen dimensions
pyautogui.press('/')
time.sleep(1)
# Calculate the center coordinates
keyboard.write(sear, delay=0.05)
time.sleep(3)
talk('searching ' + sinhala_text)
pyautogui.press('enter')
time.sleep(0.5)
openapp2()
elif 'ලියන්න' in command:
write()
elif 'write this' in command:
write()
elif 'ඉංග්‍රීසියෙන් ලියන්න' in command:
write()
elif 'පොඩ්ඩක් වැඩි' in command:
def increase_volume_by_steps(steps):
talk('increaseing')
# Get default audio device using PyCAW
devices = AudioUtilities.GetSpeakers()
interface = devices.Activate(
IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
volume = cast(interface, POINTER(IAudioEndpointVolume))
# Get current volume
current_volume = volume.GetMasterVolumeLevelScalar()
# Calculate new volume
new_volume = min(1.0, current_volume + (steps / 100.0))
# Set the new volume
volume.SetMasterVolumeLevelScalar(new_volume, None)
if __name__ == "__main__":
increase_by_steps = 10
increase_volume_by_steps(increase_by_steps)
elif 'ටිකක් වැඩි' in command:
def increase_volume_by_steps(steps):
talk('increaseing')
# Get default audio device using PyCAW
devices = AudioUtilities.GetSpeakers()
interface = devices.Activate(
IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
volume = cast(interface, POINTER(IAudioEndpointVolume))
# Get current volume
current_volume = volume.GetMasterVolumeLevelScalar()
# Calculate new volume
new_volume = min(1.0, current_volume + (steps / 100.0))
# Set the new volume
volume.SetMasterVolumeLevelScalar(new_volume, None)
if __name__ == "__main__":
increase_by_steps = 10
increase_volume_by_steps(increase_by_steps)
elif 'පොඩ්ඩක්' in command:
def decrease_volume_by_steps(steps):
talk('decreaseing')
# Get default audio device using PyCAW
devices = AudioUtilities.GetSpeakers()
interface = devices.Activate(
IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
volume = cast(interface, POINTER(IAudioEndpointVolume))
# Get current volume
current_volume = volume.GetMasterVolumeLevelScalar()
# Calculate new volume
new_volume = max(0.0, current_volume - (steps / 100.0))
# Set the new volume
volume.SetMasterVolumeLevelScalar(new_volume, None)
print(f"Volume decreased by {steps} steps.")
if __name__ == "__main__":
decrease_by_steps = 10
decrease_volume_by_steps(decrease_by_steps)
elif 'ටිකක්' in command:
def decrease_volume_by_steps(steps):
talk('decreaseing')
# Get default audio device using PyCAW
devices = AudioUtilities.GetSpeakers()
interface = devices.Activate(
IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
volume = cast(interface, POINTER(IAudioEndpointVolume))
# Get current volume
current_volume = volume.GetMasterVolumeLevelScalar()
# Calculate new volume
new_volume = max(0.0, current_volume - (steps / 100.0))
# Set the new volume
volume.SetMasterVolumeLevelScalar(new_volume, None)
print(f"Volume decreased by {steps} steps.")
if __name__ == "__main__":
decrease_by_steps = 10
decrease_volume_by_steps(decrease_by_steps)
elif 'විසඳන්න' in command:
talk('tell you question')
my_string = take_command()
print(my_string)
def translate_en_to_si(text):
translator = Translator()
result = translator.translate(text, src='si', dest='en')
return result.text
if __name__ == "__main__":
english_text = my_string
sinhala_text = translate_en_to_si(english_text).lower()
print(sinhala_text)
API_URL1 = "https://api-inference.huggingface.co/models/google/flan-t5-xxl"
headers1 = {"Authorization": "Bearer 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"}
def query(payload1):
response = requests.post(API_URL1, headers=headers1, json=payload1)
return response.json()
# Assuming sinhala_text contains the input text you want to send to the model
sinhala_text1 = input()
# Construct the payload with the user's input
payload = {
"inputs": sinhala_text1,
}
# Make the API call
output2 = query(payload)
generated_text = output2[0]['generated_text']
talk(generated_text)
elif 'solve' in command:
talk('tell you question')
my_string = take_command()
print(my_string)
def translate_en_to_si(text):
translator = Translator()
result = translator.translate(text, src='si', dest='en')
return result.text
if __name__ == "__main__":
english_text = my_string
sinhala_text = translate_en_to_si(english_text).lower()
print(sinhala_text)
API_URL1 = "https://api-inference.huggingface.co/models/google/flan-t5-xxl"
headers1 = {"Authorization": "Bearer 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"}
def query(payload1):
response = requests.post(API_URL1, headers=headers1, json=payload1)
return response.json()
# Assuming sinhala_text contains the input text you want to send to the model
sinhala_text1 = input()
# Construct the payload with the user's input
payload = {
"inputs": sinhala_text1,
}
# Make the API call
output2 = query(payload)
generated_text = output2[0]['generated_text']
talk(generated_text)
elif 'සද්දේ' in command:
if 'අඩු' in command:
talk('tell me you volume level')
volu = take_command()
print(volu)
def set_volume(volume_level):
# Get default audio device using PyCAW
devices = AudioUtilities.GetSpeakers()
interface = devices.Activate(
IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
volume = cast(interface, POINTER(IAudioEndpointVolume))
# Set the volume based on the input level
if 0 <= volume_level <= 100:
volume_scalar = volume_level / 100.0
volume.SetMasterVolumeLevelScalar(volume_scalar, None)
talk(f"Volume set to {volume_level}%")
else:
talk("Invalid volume level. Please enter a value between 0 and 100.")
print(volu)
input_volume = int(volu)
set_volume(input_volume)
elif 'වැඩි' in command:
talk('tell me you volume level')
volu = take_command()
print(volu)
def set_volume(volume_level):
# Get default audio device using PyCAW
devices = AudioUtilities.GetSpeakers()
interface = devices.Activate(
IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
volume = cast(interface, POINTER(IAudioEndpointVolume))
# Set the volume based on the input level
if 0 <= volume_level <= 100:
volume_scalar = volume_level / 100.0
volume.SetMasterVolumeLevelScalar(volume_scalar, None)
talk(f"Volume set to {volume_level}%")
else:
talk("Invalid volume level. Please enter a value between 0 and 100.")
print(volu)
input_volume = int(volu)
set_volume(input_volume)
else:
talk('Invalid volume')
elif 'වැඩි' in command:
talk('tell me you volume level')
volu = take_command()
print(volu)
def set_volume(volume_level):
# Get default audio device using PyCAW
devices = AudioUtilities.GetSpeakers()
interface = devices.Activate(
IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
volume = cast(interface, POINTER(IAudioEndpointVolume))
# Set the volume based on the input level
if 0 <= volume_level <= 100:
volume_scalar = volume_level / 100.0
volume.SetMasterVolumeLevelScalar(volume_scalar, None)
talk(f"Volume set to {volume_level}%")
else:
talk("Invalid volume level. Please enter a value between 0 and 100.")
print(volu)
input_volume = int(volu)
set_volume(input_volume)
elif 'අඩු' in command:
talk('tell me you volume level')
volu = take_command()
print(volu)
def set_volume(volume_level):
# Get default audio device using PyCAW
devices = AudioUtilities.GetSpeakers()
interface = devices.Activate(
IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
volume = cast(interface, POINTER(IAudioEndpointVolume))
# Set the volume based on the input level
if 0 <= volume_level <= 100:
volume_scalar = volume_level / 100.0
volume.SetMasterVolumeLevelScalar(volume_scalar, None)
talk(f"Volume set to {volume_level}%")
else:
talk("Invalid volume level. Please enter a value between 0 and 100.")
print(volu)
input_volume = int(volu)
set_volume(input_volume)
elif 'increase' in command:
talk('tell me you volume level')
volu = take_command()
print(volu)
def set_volume(volume_level):
# Get default audio device using PyCAW
devices = AudioUtilities.GetSpeakers()
interface = devices.Activate(
IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
volume = cast(interface, POINTER(IAudioEndpointVolume))
# Set the volume based on the input level
if 0 <= volume_level <= 100:
volume_scalar = volume_level / 100.0
volume.SetMasterVolumeLevelScalar(volume_scalar, None)
talk(f"Volume set to {volume_level}%")
else:
talk("Invalid volume level. Please enter a value between 0 and 100.")
print(volu)
input_volume = int(volu)
set_volume(input_volume)
elif 'decrease' in command:
talk('tell me you volume level')
volu = take_command()
print(volu)
def set_volume(volume_level):
# Get default audio device using PyCAW
devices = AudioUtilities.GetSpeakers()
interface = devices.Activate(
IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
volume = cast(interface, POINTER(IAudioEndpointVolume))
# Set the volume based on the input level
if 0 <= volume_level <= 100:
volume_scalar = volume_level / 100.0
volume.SetMasterVolumeLevelScalar(volume_scalar, None)
talk(f"Volume set to {volume_level}%")
else:
talk("Invalid volume level. Please enter a value between 0 and 100.")
print(volu)
input_volume = int(volu)
set_volume(input_volume)
elif 'reduce' in command:
talk('tell me you volume level')
volu = take_command()
print(volu)
def set_volume(volume_level):
# Get default audio device using PyCAW
devices = AudioUtilities.GetSpeakers()
interface = devices.Activate(
IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
volume = cast(interface, POINTER(IAudioEndpointVolume))
# Set the volume based on the input level
if 0 <= volume_level <= 100:
volume_scalar = volume_level / 100.0
volume.SetMasterVolumeLevelScalar(volume_scalar, None)
talk(f"Volume set to {volume_level}%")
else:
talk("Invalid volume level. Please enter a value between 0 and 100.")
print(volu)
input_volume = int(volu)
set_volume(input_volume)
elif 'shutdown' in command:
os.system("shutdown /s /f /t 0")
elif 'restart' in command:
def restart_computer():
os.system("shutdown /r /t 1")
if __name__ == "__main__":
talk("Restarting the computer...")
restart_computer()
elif 'sleep' in command:
def sleep_computer():
os.system("rundll32.exe powrprof.dll,SetSuspendState 0,1,0")
if __name__ == "__main__":
talk("Putting the computer to sleep...")
sleep_computer()
elif 'ආපසු' in command:
def back():
pyautogui.press('alt, left')
time.sleep(0.5)
back()
elif 'back' in command:
def back():
pyautogui.press('alt, left')
time.sleep(0.5)
back()
elif 'Pause' in command:
talk('Pauseing')
pyautogui.press('space')
time.sleep(0.5)
elif 'පෝස්' in command:
talk('Pauseing')
pyautogui.press('space')
time.sleep(0.5)
elif 'නවත්තන්න' in command:
talk('Pauseing')
pyautogui.press('space')
time.sleep(0.5)
elif 'enter ඔබන්න' in command:
talk('pressing enter')
pyautogui.press('enter')
time.sleep(0.5)
elif 'space' in command:
talk('pressing')
pyautogui.press('space')
time.sleep(0.5)
elif 'start' in command:
talk('starting')
pyautogui.press('space')
time.sleep(0.5)
elif 'save' in command:
talk('saving')
pyautogui.hotkey('ctrl', 's')
time.sleep(0.5)
elif 'මැසේජ්' in command:
import webbrowser
talk('what is name of contact? ')
contact = take_command()
talk('tell your description')
description = take_command()
webbrowser.open('https://web.whatsapp.com/')
import time
time.sleep(20)
screen_width, screen_height = pyautogui.size()
# Calculate the center coordinates
center_x = screen_width // 2
center_y = screen_height // 2
# Move the mouse cursor to the center of the screen
pyautogui.moveTo(center_x, center_y, duration=1)
pyautogui.click(1)
time.sleep(1)
pyautogui.hotkey('ctrl', 'alt', '/')
time.sleep(0.5)
keyboard.write(contact)
time.sleep(5)
pyautogui.press('down')
time.sleep(2)
pyautogui.press('enter')
time.sleep(1)
keyboard.write(description)
time.sleep(2)
pyautogui.press('enter')
time.sleep(2)
elif 'message' in command:
import webbrowser
talk('what is name of contact? ')
contact = take_command()
talk('tell your description')
description = take_command()
webbrowser.open('https://web.whatsapp.com/')
import time
time.sleep(20)
screen_width, screen_height = pyautogui.size()
# Calculate the center coordinates
center_x = screen_width // 2
center_y = screen_height // 2
# Move the mouse cursor to the center of the screen
pyautogui.moveTo(center_x, center_y, duration=1)
pyautogui.click(1)
time.sleep(1)
pyautogui.hotkey('ctrl', 'alt', '/')
time.sleep(0.5)
keyboard.write(contact)
time.sleep(5)
pyautogui.press('down')
time.sleep(2)
pyautogui.press('enter')
time.sleep(1)
keyboard.write(description)
time.sleep(2)
pyautogui.press('enter')
time.sleep(2)
elif 'Sticker' in command:
pyautogui.hotkey('ctrl','alt','s')
time.sleep(1)
elif 'නම' in command:
talk('my name is KAI 0.2')
elif 'help' in command:
talk('I will help you')
elif 'මුකුත්' in command:
talk('ooh')
elif 'අරමුණ' in command:
talk("my purpose is to assist you")
elif 'ඔයා කවුද' in command:
talk(
"Hello, I'm KAI, the world's first Sinhala Virtual Assistance software, brought to life by Ranula. I break language boundaries by effortlessly comprehending Sinhala,English and Singlish commands, marking a leap in human-tech interaction. Step into a future where communication knows no limits with KAI.")
elif 'ඔයා කව්ද' in command:
talk(
"Hello, I'm KAI, the world's first Sinhala Virtual Assistance software, brought to life by Ranula. I break language boundaries by effortlessly comprehending Sinhala,English and Singlish commands, marking a leap in human-tech interaction. Step into a future where communication knows no limits with KAI.")
elif 'කවුද ඔයා' in command:
talk(
"Hello, I'm KAI, the world's first Sinhala Virtual Assistance software, brought to life by Ranula. I break language boundaries by effortlessly comprehending Sinhala,English and Singlish commands, marking a leap in human-tech interaction. Step into a future where communication knows no limits with KAI.")
elif 'කව්ද ඔයා' in command:
talk(
"Hello, I'm KAI, the world's first Sinhala Virtual Assistance software, brought to life by Ranula. I break language boundaries by effortlessly comprehending Sinhala,English and Singlish commands, marking a leap in human-tech interaction. Step into a future where communication knows no limits with KAI.")
elif 'වයස' in command:
talk("As an artificial intelligence language model, I don't have an age or consciousness. I am a program running on servers and don't experience time or existence in the way humans do. My knowledge is based on the data available up until Augest 2023. How can I assist you today?")
elif 'bro' in command:
talk('hello sir, how can I help you?')
elif 'අවදිවෙන්න' in command:
talk('KAI is online')
elif 'wake up ' in command:
talk('KAI is online')
elif 'සනීපෙන්' in command:
talk("that's nice to hear")
talk("so what's new plans for today?")
elif 'fine ' in command:
talk("that's nice to hear")
talk("so what's new plans for today?")
elif 'හෙලෝ ' in command:
talk("hi sir, How can I help you?")
elif 'හේයි ' in command:
talk('hello sir, How can I help you?')
elif 'hey ' in command:
talk('hello sir, How can I help you?')
elif ' today' in command:
current_date = datetime.date.today()
elif 'හොඳින්' in command:
talk("that's nice to hear")
talk("so what's new plans for today?")
elif 'news' in command:
import newspaper
talk('are you want to which language english or sinhala')
news = take_command()
print(news)
if news == 'english':
list_of_urls = ['https://www.hirunews.lk/english/',
'https://english.siyathanews.lk/',
'https://english.newsfirst.lk/',
]
for url in list_of_urls:
url_i = newspaper.Article(url="%s" % (url), language='en')
url_i.download()
url_i.parse()
print(url_i.text)
news = url_i.text
talk(news)
elif news == 'සිංහල':
list_of_urls = ['https://www.hirunews.lk/english/',
'https://english.siyathanews.lk/',
'https://english.newsfirst.lk/',
]
for url in list_of_urls:
url_i = newspaper.Article(url="%s" % (url), language='en')
url_i.download()
url_i.parse()
print(url_i.text)
news = url_i.text
def translate_en_to_si(text):
translator = Translator()
result = translator.translate(text, src='en', dest='si')
return result.text
if __name__ == "__main__":
english_text = news
sinhala_text = translate_en_to_si(english_text)
def initialize_pygame_audio():
# Initialize only the audio portion of Pygame
pygame.mixer.init()
def speak_word(word, rate=3000): # Adjust the rate value as needed
engine = pyttsx3.init()
engine.setProperty('rate', rate)
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id)
engine.say(word)
engine.runAndWait()
def play_sound(file_path):
# Use the same male voice for sound playback
engine = pyttsx3.init()
voices = engine.getProperty('voices')
male_voice = engine.setProperty('voice', voices[1].id)
if male_voice:
engine.setProperty('voice', voices[1].id)
engine = pyttsx3.init()
engine.setProperty('voice', voices[1].id)
# Play the speech sound using Pygame
pygame.mixer.init()
pygame.mixer.music.load(file_path)
pygame.mixer.music.play()
# Wait until the sound finishes playing
while pygame.mixer.music.get_busy():
continue
# Clean up: Remove the temporary speech file
try:
os.remove(file_path)
pygame.mixer.music.stop() # Stop the playback
pygame.mixer.quit()
except PermissionError:
pygame.mixer.music.stop() # Stop the playback
pygame.mixer.quit()
# If file still in use, wait for a short time and try again
time.sleep(5)
os.remove(file_path)
def sinhala_to_speech(word):
# Save the speech output as an MP3 file
speech_file = "speech.mp3"
tts = gTTS(text=word, lang='si', slow=False)
tts.save(speech_file)
# Start the speech thread with a male voice
speech_thread = threading.Thread(target=speak_word, args=(word,))
speech_thread.start()
# Play the sound using the same male voice
play_sound(speech_file)
if __name__ == "__main__":
word = sinhala_text
initialize_pygame_audio()
sinhala_to_speech(word)
elif 'රිකෝඩ්' in command:
from AppOpener import open
open('voice recorder')
time.sleep(2)
pyautogui.press('enter')
elif 'record' in command:
from AppOpener import open
open('voice recorder')
time.sleep(2)
pyautogui.press('enter')
elif 'නිවුස්' in command:
import newspaper
talk('are you want to which language english or sinhala')
news = take_command()
print(news)
if news == 'english':
list_of_urls = ['https://www.hirunews.lk/english/',
'https://english.siyathanews.lk/',
'https://english.newsfirst.lk/',
]
for url in list_of_urls:
url_i = newspaper.Article(url="%s" % (url), language='en')
url_i.download()
url_i.parse()
print(url_i.text)
news = url_i.text
talk(news)
elif news == 'සිංහල':
list_of_urls = ['https://www.hirunews.lk/english/',
'https://english.siyathanews.lk/',
'https://english.newsfirst.lk/',
]
for url in list_of_urls:
url_i = newspaper.Article(url="%s" % (url), language='en')
url_i.download()
url_i.parse()
print(url_i.text)
news = url_i.text
def translate_en_to_si(text):
translator = Translator()
result = translator.translate(text, src='en', dest='si')
return result.text
if __name__ == "__main__":
english_text = news
sinhala_text = translate_en_to_si(english_text)
def initialize_pygame_audio():
# Initialize only the audio portion of Pygame
pygame.mixer.init()
def speak_word(word, rate=3000): # Adjust the rate value as needed
engine = pyttsx3.init()
engine.setProperty('rate', rate)
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id)
engine.say(word)
engine.runAndWait()
def play_sound(file_path):
# Use the same male voice for sound playback
engine = pyttsx3.init()
voices = engine.getProperty('voices')
male_voice = engine.setProperty('voice', voices[1].id)
if male_voice:
engine.setProperty('voice', voices[1].id)
engine = pyttsx3.init()
engine.setProperty('voice', voices[1].id)
# Play the speech sound using Pygame
pygame.mixer.init()
pygame.mixer.music.load(file_path)
pygame.mixer.music.play()
# Wait until the sound finishes playing
while pygame.mixer.music.get_busy():
continue
# Clean up: Remove the temporary speech file
try:
os.remove(file_path)
pygame.mixer.music.stop() # Stop the playback
pygame.mixer.quit()
except PermissionError:
pygame.mixer.music.stop() # Stop the playback
pygame.mixer.quit()
# If file still in use, wait for a short time and try again
time.sleep(5)
os.remove(file_path)
def sinhala_to_speech(word):
# Save the speech output as an MP3 file
speech_file = "speech.mp3"
tts = gTTS(text=word, lang='si', slow=False)
tts.save(speech_file)
# Start the speech thread with a male voice
speech_thread = threading.Thread(target=speak_word, args=(word,))
speech_thread.start()
# Play the sound using the same male voice
play_sound(speech_file)
if __name__ == "__main__":
word = sinhala_text
initialize_pygame_audio()
sinhala_to_speech(word)
elif 'පුවත්' in command:
import newspaper
talk('are you want to which language english or sinhala')
news = take_command()
print(news)
if news == 'english':
list_of_urls = ['https://www.hirunews.lk/english/',
'https://english.siyathanews.lk/',
'https://english.newsfirst.lk/',
]
for url in list_of_urls:
url_i = newspaper.Article(url="%s" % (url), language='en')
url_i.download()
url_i.parse()
print(url_i.text)
news = url_i.text
talk(news)
elif news == 'සිංහල':
list_of_urls = ['https://www.hirunews.lk/english/',
'https://english.siyathanews.lk/',
'https://english.newsfirst.lk/',
]
for url in list_of_urls:
url_i = newspaper.Article(url="%s" % (url), language='en')
url_i.download()
url_i.parse()
print(url_i.text)
news = url_i.text
def translate_en_to_si(text):
translator = Translator()
result = translator.translate(text, src='en', dest='si')
return result.text
if __name__ == "__main__":
english_text = news
sinhala_text = translate_en_to_si(english_text)
def initialize_pygame_audio():
# Initialize only the audio portion of Pygame
pygame.mixer.init()
def speak_word(word, rate=3000): # Adjust the rate value as needed
engine = pyttsx3.init()
engine.setProperty('rate', rate)
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id)
engine.say(word)
engine.runAndWait()
def play_sound(file_path):
# Use the same male voice for sound playback
engine = pyttsx3.init()
voices = engine.getProperty('voices')
male_voice = engine.setProperty('voice', voices[1].id)
if male_voice:
engine.setProperty('voice', voices[1].id)
engine = pyttsx3.init()
engine.setProperty('voice', voices[1].id)
# Play the speech sound using Pygame
pygame.mixer.init()
pygame.mixer.music.load(file_path)
pygame.mixer.music.play()
# Wait until the sound finishes playing
while pygame.mixer.music.get_busy():
continue
# Clean up: Remove the temporary speech file
try:
os.remove(file_path)
pygame.mixer.music.stop() # Stop the playback
pygame.mixer.quit()
except PermissionError:
pygame.mixer.music.stop() # Stop the playback
pygame.mixer.quit()
# If file still in use, wait for a short time and try again
time.sleep(5)
os.remove(file_path)
def sinhala_to_speech(word):
# Save the speech output as an MP3 file
speech_file = "speech.mp3"
tts = gTTS(text=word, lang='si', slow=False)
tts.save(speech_file)
# Start the speech thread with a male voice
speech_thread = threading.Thread(target=speak_word, args=(word,))
speech_thread.start()
# Play the sound using the same male voice
play_sound(speech_file)
if __name__ == "__main__":
word = sinhala_text
initialize_pygame_audio()
sinhala_to_speech(word)
elif 'ස්පීඩ්' in command:
st = speedtest.Speedtest()
st.get_best_server()
talk("Testing download speed...")
download_speed = st.download() / 10 ** 6 # Convert to Mbps
talk(f"Download Speed: {download_speed:.2f} Mbps")
talk("Testing upload speed...")
upload_speed = st.upload() / 10 ** 6 # Convert to Mbps
talk(f"Upload Speed: {upload_speed:.2f} Mbps")
talk("Testing ping...")
ping = st.results.ping
talk(f"Ping: {ping} ms")
elif 'speed' in command:
st = speedtest.Speedtest()
st.get_best_server()
talk("Testing download speed...")
download_speed = st.download() / 10 ** 6 # Convert to Mbps
talk(f"Download Speed: {download_speed:.2f} Mbps")
talk("Testing upload speed...")
upload_speed = st.upload() / 10 ** 6 # Convert to Mbps
talk(f"Upload Speed: {upload_speed:.2f} Mbps")
talk("Testing ping...")
ping = st.results.ping
talk(f"Ping: {ping} ms")
elif 'reload' in command:
talk('reloading')
pyautogui.hotkey('f5')
time.sleep(0.5)
elif 'history' in command:
talk('wait sir')
pyautogui.hotkey('ctrl', 'h')
time.sleep(0.5)
elif 'download' in command:
talk('wait sir')
pyautogui.hotkey('ctrl', 'j')
time.sleep(0.5)
elif 'new tab' in command:
talk('wait sir')
pyautogui.hotkey('ctrl', 't')
time.sleep(0.5)
elif 'full-screen' in command:
talk('wait sir')
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
elif 'full screen' in command:
talk('wait sir')
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
elif 'maximise' in command:
talk('wait sir')
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
elif 'minimize' in command:
talk('minimizeing')
pyautogui.hotkey('win', 'down')
time.sleep(0.5)
pyautogui.hotkey('win', 'down')
time.sleep(0.5)
pyautogui.hotkey('win', 'down')
time.sleep(0.5)
elif 'window' in command:
talk('wait sir')
pyautogui.hotkey('ctrl', 'n')
time.sleep(0.5)
elif 'incognito' in command:
talk('wait sir')
pyautogui.hotkey('ctrl', 'shift', 'n')
time.sleep(0.5)
elif ' select 1st one' in command:
talk('ok,sir')
def fir():
screen_width, screen_height = pyautogui.size()
# Calculate the center coordinates
center_x = screen_width // 2
center_y = screen_height // 2
talk('selecting sir')
# Move the mouse to the center of the screen
pyautogui.moveTo(center_x, center_y, duration=3.0)
# Move the mouse upwards
pyautogui.move(-300, -200, duration=0.5)
pyautogui.click()
fir()
elif ' select 2nd one' in command:
def sele2():
screen_width, screen_height = pyautogui.size()
center_x = screen_width // 2
center_y = screen_height // 2
talk('selecting sir')
pyautogui.moveTo(center_x, center_y, duration=0.5)
pyautogui.move(-200, -50, duration=0.5)
pyautogui.click()
sele2()
elif ' select 3rd one' in command:
def sele3():
screen_width, screen_height = pyautogui.size()
center_x = screen_width // 2
center_y = screen_height // 2
talk('selecting sir')
pyautogui.moveTo(center_x, center_y, duration=0.5)
pyautogui.move(-250, +200, duration=0.5)
pyautogui.click()
sele3()
elif ' select 4th one' in command:
def sele4():
screen_width, screen_height = pyautogui.size()
# Calculate the center coordinates
center_x = screen_width // 2
center_y = screen_height // 2
talk('selecting sir')
pyautogui.moveTo(center_x, center_y, duration=0.5)
pyautogui.move(-250, +400, duration=0.5)
pyautogui.click()
sele4()
elif 'thank' in command:
talk('you are welcome sir')
elif 'ස්තුතියි' in command:
talk('you are welcome sir')
elif 'මියුසික්' in command:
music_dir = 'music' # Use double backslashes for file paths on Windows
songs = os.listdir(music_dir)
song = random.randint(0, len(songs) - 1)
# Prints the selected song name
talk('this song special for you' + songs[song])
# Play the selected song
os.startfile(os.path.join(music_dir, songs[song]))
elif 'සින්දු' in command:
music_dir = 'music' # Use double backslashes for file paths on Windows
songs = os.listdir(music_dir)
song = random.randint(0, len(songs) - 1)
# Prints the selected song name
talk('this song special for you' + songs[song])
# Play the selected song
os.startfile(os.path.join(music_dir, songs[song]))
elif 'පලවෙනි එක' in command:
def fir():
screen_width, screen_height = pyautogui.size()
# Calculate the center coordinates
center_x = screen_width // 2
center_y = screen_height // 2
talk('selecting sir')
# Move the mouse to the center of the screen
pyautogui.moveTo(center_x, center_y, duration=3.0)
# Move the mouse upwards
pyautogui.move(-300, -200, duration=0.5)
pyautogui.click()
fir()
elif 'දෙවෙනි' in command:
def sec():
screen_width, screen_height = pyautogui.size()
center_x = screen_width // 2
center_y = screen_height // 2
talk('selecting sir')
pyautogui.moveTo(center_x, center_y, duration=0.5)
pyautogui.move(-200, -50, duration=0.5)
pyautogui.click()
sec()
elif 'තුන්වෙනි එක' in command:
def thir():
screen_width, screen_height = pyautogui.size()
center_x = screen_width // 2
center_y = screen_height // 2
talk('selecting sir')
pyautogui.moveTo(center_x, center_y, duration=0.5)
pyautogui.move(-250, +200, duration=0.5)
pyautogui.click()
thir()
elif 'හතරවෙනි එක' in command:
def four():
screen_width, screen_height = pyautogui.size()
center_x = screen_width // 2
center_y = screen_height // 2
talk('selecting sir')
pyautogui.moveTo(center_x, center_y, duration=0.5)
pyautogui.move(-250, +400, duration=0.5)
pyautogui.click()
four()
elif 'down arrow' in command:
def automatic_down_arrow_click(num_clicks=1, interval=0.5):
for _ in range(num_clicks):
pyautogui.press('down')
time.sleep(interval)
if __name__ == "__main__":
talk('pressing down arrow')
num_clicks = 24
interval = 0.5
automatic_down_arrow_click(num_clicks, interval)
elif ' යට' in command:
def automatic_up_arrow_click(num_clicks=1, interval=0.5):
for _ in range(num_clicks):
pyautogui.press('down')
time.sleep(interval)
if __name__ == "__main__":
talk('pressing down arrow')
# Set the number of down arrow clicks and the interval between them
num_clicks = 24
interval = 0.5
automatic_up_arrow_click(num_clicks, interval)
elif ' යටට' in command:
def automatic_up_arrow_click(num_clicks=1, interval=0.5):
for _ in range(num_clicks):
pyautogui.press('down')
time.sleep(interval)
if __name__ == "__main__":
talk('pressing down arrow')
# Set the number of down arrow clicks and the interval between them
num_clicks = 24
interval = 0.5
automatic_up_arrow_click(num_clicks, interval)
elif 'උඩට' in command:
def automatic_up_arrow_click(num_clicks=1, interval=0.5):
for _ in range(num_clicks):
pyautogui.press('up')
time.sleep(interval)
if __name__ == "__main__":
talk('pressing up arrow')
# Set the number of down arrow clicks and the interval between them
num_clicks = 24
interval = 0.5
automatic_up_arrow_click(num_clicks, interval)
elif 'උඩ' in command:
def automatic_up_arrow_click(num_clicks=1, interval=0.5):
for _ in range(num_clicks):
pyautogui.press('up')
time.sleep(interval)
if __name__ == "__main__":
talk('pressing up arrow')
# Set the number of down arrow clicks and the interval between them
num_clicks = 24
interval = 0.5
automatic_up_arrow_click(num_clicks, interval)
elif 'up arrow' in command:
def automatic_up_arrow_click(num_clicks=1, interval=0.5):
for _ in range(num_clicks):
pyautogui.press('up')
time.sleep(interval)
if __name__ == "__main__":
talk('pressing up arrow')
# Set the number of down arrow clicks and the interval between them
num_clicks = 24
interval = 0.5
automatic_up_arrow_click(num_clicks, interval)
elif 'shot' in command:
def get_next_screenshot_number():
if not os.path.exists("screenshots"):
os.mkdir("screenshots")
existing_screenshots = [int(filename.split(".")[0]) for filename in os.listdir("screenshots") if
filename.endswith(".png")]
if existing_screenshots:
return max(existing_screenshots) + 1
else:
return 1
def take_and_save_screenshot(output_path):
screenshot = ImageGrab.grab()
screenshot.save(output_path)
talk(f"Screenshot saved to {output_path}")
if __name__ == "__main__":
screenshot_number = get_next_screenshot_number()
screenshot_filename = f"screenshots/{screenshot_number:03d}.png"
take_and_save_screenshot(screenshot_filename)
elif 'ශොට්' in command:
def get_next_screenshot_number():
if not os.path.exists("screenshots"):
os.mkdir("screenshots")
existing_screenshots = [int(filename.split(".")[0]) for filename in os.listdir("screenshots") if
filename.endswith(".png")]
if existing_screenshots:
return max(existing_screenshots) + 1
else:
return 1
def take_and_save_screenshot(output_path):
screenshot = ImageGrab.grab()
screenshot.save(output_path)
talk(f"Screenshot saved to {output_path}")
if __name__ == "__main__":
screenshot_number = get_next_screenshot_number()
screenshot_filename = f"screenshots/{screenshot_number:03d}.png"
take_and_save_screenshot(screenshot_filename)
elif 'වහන්න' in command:
talk('closing')
def close():
pyautogui.hotkey('alt', 'f4')
time.sleep(0.5)
close()
elif 'කැමතිම සින්දුව' in command:
chrome = r"F:\New folder (2)\y2mate.com - TSeries Top 15 Most Searched Bollywood Songs 2018 Video Jukebox.mp3"
try:
subprocess.Popen(['cmd', '/c', 'start', '', chrome], shell=True)
talk("Music started successfully.")
except FileNotFoundError:
talk("Music file not found.")
elif ' play my second favourite singer song' in command:
chrome = r"F:\New folder (2)\y2mate.com - Best Sinhala New Song 2023 New Sinhala Song Collection Sinhala New Song Aluth Sindu.mp3"
try:
subprocess.Popen(['cmd', '/c', 'start', '', chrome], shell=True)
talk("Music started successfully.")
except FileNotFoundError:
talk("Music file not found.")
elif ' play my favourite singer song' in command:
chrome = r"F:\New folder (2)\y2mate.com - Best 10 Sinhala New Songs 2022 Sinhala New Songs New Songs Collection Aluth Sindu Sinhala.mp3"
try:
subprocess.Popen(['cmd', '/c', 'start', '', chrome], shell=True)
talk("Music started successfully.")
except FileNotFoundError:
talk("Music file not found.")
elif ' play my favourite english song' in command:
chrome = r"F:\New folder (2)\y2mate.com - Best Songs Of A L A N W A L K E R Greatest Hits Full Album.mp3"
try:
subprocess.Popen(['cmd', '/c', 'start', '', chrome], shell=True)
talk("Music started successfully.")
except FileNotFoundError:
talk("Music file not found.")
elif 'play shape of you' in command:
chrome = r"F:\New folder (2)\y2mate.com - Ed Sheeran Shape Of You Lyrics.mp3"
try:
subprocess.Popen(['cmd', '/c', 'start', '', chrome], shell=True)
talk("Music started successfully.")
except FileNotFoundError:
talk("Music file not found.")
elif 'shakira album' in command:
chrome = r"F:\New folder (2)\y2mate.com - S H A K I R A GREATEST HITS FULL ALBUM BEST SONGS OF S H A K I R A PLAYLIST 2021.mp3"
try:
subprocess.Popen(['cmd', '/c', 'start', '', chrome], shell=True)
talk("Music started successfully.")
except FileNotFoundError:
talk("Music file not found.")
elif 'play' in command:
talk('what do you want play?')
play = take_command()
print(play)
def translate_en_to_si(text):
translator = Translator()
result = translator.translate(text, src='si', dest='en')
return result.text
if __name__ == "__main__":
english_text = play
sinhala_text = translate_en_to_si(english_text)
talk('playing ' + sinhala_text)
pywhatkit.playonyt(play)
elif 'ප්ලේ' in command:
talk('what do you want play?')
play = take_command()
print(play)
def translate_en_to_si(text):
translator = Translator()
result = translator.translate(text, src='si', dest='en')
return result.text
if __name__ == "__main__":
english_text = play
sinhala_text = translate_en_to_si(english_text)
talk('playing ' + sinhala_text)
pywhatkit.playonyt(play)
elif 'song' in command:
music_dir = 'music' # Use double backslashes for file paths on Windows
songs = os.listdir(music_dir)
song = random.randint(0, len(songs) - 1)
# Prints the selected song name
talk('this song special for you' + songs[song])
# Play the selected song
os.startfile(os.path.join(music_dir, songs[song]))
elif 'සිංදුවක්' in command:
music_dir = 'music' # Use double backslashes for file paths on Windows
songs = os.listdir(music_dir)
song = random.randint(0, len(songs) - 1)
# Prints the selected song name
talk('this song special for you' + songs[song])
# Play the selected song
os.startfile(os.path.join(music_dir, songs[song]))
elif 'music' in command:
music_dir = 'music' # Use double backslashes for file paths on Windows
songs = os.listdir(music_dir)
song = random.randint(0, len(songs) - 1)
# Prints the selected song name
talk('this song special for you' + songs[song])
# Play the selected song
os.startfile(os.path.join(music_dir, songs[song]))
elif ' find my location' in command:
def get_location():
g = geocoder.ip('me')
return g.latlng
def open_map(latitude, longitude):
map_url = f"https://www.google.com/maps/search/?api=1&query={latitude},{longitude}"
webbrowser.open(map_url)
talk("latitude is " + str(latitude) + " and longitude is " + str(longitude))
talk("Map opened in your web browser. Please check your location.")
if __name__ == "__main__":
location = get_location()
latitude, longitude = location[0], location[1]
open_map(latitude, longitude)
try:
# Specify a more specific search term
search_term = location
summary = wikipedia.summary(search_term, 2)
talk(summary)
except wikipedia.exceptions.DisambiguationError as e:
# Handle the DisambiguationError by selecting a specific option
selected_option = e.options[0] # Choose the first option from the list
summary = wikipedia.summary(selected_option, 2)
talk(summary)
elif 'virtual mouse' in command:
import mediapipe as mp
import cv2
talk('Opening your mouse, sir if you can clicking S letter stop the virtual mouse')
cap = cv2.VideoCapture(0)
hand_detector = mp.solutions.hands.Hands()
drawing_utils = mp.solutions.drawing_utils
screen_width, screen_height = pyautogui.size()
index_y = 0
while True:
_, frame = cap.read()
frame = cv2.flip(frame, 1)
frame_height, frame_width, _ = frame.shape
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
output = hand_detector.process(rgb_frame)
hands = output.multi_hand_landmarks
if hands:
for hand in hands:
drawing_utils.draw_landmarks(frame, hand)
landmarks = hand.landmark
for id, landmark in enumerate(landmarks):
x = int(landmark.x * frame_width)
y = int(landmark.y * frame_height)
if id == 8:
cv2.circle(img=frame, center=(x, y), radius=10, color=(0, 255, 255))
index_x = screen_width / frame_width * x
index_y = screen_height / frame_height * y
if id == 4:
cv2.circle(img=frame, center=(x, y), radius=10, color=(0, 255, 255))
thumb_x = screen_width / frame_width * x
thumb_y = screen_height / frame_height * y
print('outside', abs(index_y - thumb_y))
if abs(index_y - thumb_y) < 20:
pyautogui.click()
pyautogui.sleep(1)
elif abs(index_y - thumb_y) < 100:
pyautogui.moveTo(index_x, index_y)
cv2.imshow('Virtual Mouse', frame)
key = cv2.waitKey(1)
if key == ord('s'):
break
cap.release()
cv2.destroyAllWindows()
elif 'photo' in command:
import requests
talk('what is the prompt?')
prompt = take_command()
print(prompt)
talk('wait please')
def translate_en_to_si(text):
translator = Translator()
result = translator.translate(text, src='si', dest='en')
return result.text
if __name__ == "__main__":
english_text = prompt
sinhala_text = translate_en_to_si(english_text).lower()
print(sinhala_text)
import io
from PIL import Image
API_URL = "https://api-inference.huggingface.co/models/prompthero/openjourney"
HEADERS = {"Authorization": "Bearer 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"}
def query(payload):
response = requests.post(API_URL, headers=HEADERS, json=payload)
return response.content
# Take user input for the text
input_text = sinhala_text
image_bytes = query({
"inputs": input_text
})
# You can access the image with PIL.Image
image = Image.open(io.BytesIO(image_bytes))
image.show()
talk('this is your photo')# Display the image
elif 'image' in command:
import requests
talk('what is the prompt?')
prompt = take_command()
print(prompt)
talk('wait please')
def translate_en_to_si(text):
translator = Translator()
result = translator.translate(text, src='si', dest='en')
return result.text
if __name__ == "__main__":
english_text = prompt
sinhala_text = translate_en_to_si(english_text).lower()
print(sinhala_text)
import io
from PIL import Image
API_URL = "https://api-inference.huggingface.co/models/prompthero/openjourney"
HEADERS = {"Authorization": "Bearer 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"}
def query(payload):
response = requests.post(API_URL, headers=HEADERS, json=payload)
return response.content
# Take user input for the text
input_text = sinhala_text
image_bytes = query({
"inputs": input_text
})
# You can access the image with PIL.Image
image = Image.open(io.BytesIO(image_bytes))
image.show()
talk('this is your image')
elif 'ෆොටෝ' in command:
import requests
talk('what is the prompt?')
prompt = take_command()
print(prompt)
talk('wait please')
def translate_en_to_si(text):
translator = Translator()
result = translator.translate(text, src='si', dest='en')
return result.text
if __name__ == "__main__":
english_text = prompt
sinhala_text = translate_en_to_si(english_text).lower()
print(sinhala_text)
import io
from PIL import Image
API_URL = "https://api-inference.huggingface.co/models/prompthero/openjourney"
HEADERS = {"Authorization": "Bearer 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"}
def query(payload):
response = requests.post(API_URL, headers=HEADERS, json=payload)
return response.content
# Take user input for the text
input_text = sinhala_text
image_bytes = query({
"inputs": input_text
})
# You can access the image with PIL.Image
image = Image.open(io.BytesIO(image_bytes))
image.show()
talk('this is your photo')
elif 'ෆොටො' in command:
import requests
talk('what is the prompt?')
prompt = take_command()
print(prompt)
talk('wait please')
def translate_en_to_si(text):
translator = Translator()
result = translator.translate(text, src='si', dest='en')
return result.text
if __name__ == "__main__":
english_text = prompt
sinhala_text = translate_en_to_si(english_text).lower()
print(sinhala_text)
import io
from PIL import Image
API_URL = "https://api-inference.huggingface.co/models/prompthero/openjourney"
HEADERS = {"Authorization": "Bearer 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"}
def query(payload):
response = requests.post(API_URL, headers=HEADERS, json=payload)
return response.content
# Take user input for the text
input_text = sinhala_text
image_bytes = query({
"inputs": input_text
})
# You can access the image with PIL.Image
image = Image.open(io.BytesIO(image_bytes))
image.show()
talk('this is your photo')
elif 'alarm ' in command:
from AppOpener import open # Assuming this is a valid import
try:
# Get the input time from the user
talk("Enter time (e.g., 1029am or 10:30 pm): ")
input_time = take_command().lower()
print(input_time)
# Remove spaces and convert to lowercase for easier parsing
input_time = input_time.replace(" ", "").lower()
# Check if the input ends with valid time indicators
if input_time.endswith((" am", " pm", " p.m"," p.m."," a.m.")):
# Determine AM/PM
if input_time.endswith(("am", " a.m", " පෙ ව"," a.m.")):
am_pm = "am"
else:
am_pm = "pm"
# Remove the AM/PM suffix
input_time = input_time[:-2] if am_pm == "am" else input_time[:-3]
if input_time.isdigit():
# Ensure the time part is numeric
if len(input_time) == 3:
# Handle times like "230pm" as "2:30 pm"
hours = int(input_time[0])
minutes = int(input_time[1:])
elif len(input_time) == 4:
# Handle times like "0653pm" as "6:53 pm"
hours = int(input_time[:2])
minutes = int(input_time[2:])
else:
raise ValueError("Invalid input format. Please enter a valid time.")
if not (0 <= hours <= 12 and 0 <= minutes <= 59):
raise ValueError("Invalid time format. Please enter a valid time.")
else:
raise ValueError("Invalid input format. Please enter the time in the format '230pm' or '0653pm'.")
else:
raise ValueError("Invalid input format. Please include 'am', 'pm', or a valid indicator in the time.")
# Get the name from the user
talk('Input the name: ')
name = take_command()
print(name)
open('clock') # Assuming this function opens an application named 'clock'
talk('Opening clock')
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
screen_width, screen_height = pyautogui.size()
# Calculate the center coordinates
center_x = screen_width // 2
center_y = screen_height // 2
talk('finding alarm button')
# Move the mouse to the center of the screen
pyautogui.moveTo(center_x, center_y, duration=0.3)
# Move the mouse upwards
pyautogui.move(-500, -370, duration=0.3)
pyautogui.click()
time.sleep(0.5)
screen_width, screen_height = pyautogui.size()
# Calculate the center coordinates
center_x = screen_width // 2
center_y = screen_height // 2
print('finding plus button')
# Move the mouse to the center of the screen
pyautogui.moveTo(center_x, center_y, duration=0.3)
# Move the mouse upwards
pyautogui.move(590, 450, duration=0.3)
pyautogui.click()
time.sleep(0.5)
pyautogui.write(str(hours))
time.sleep(0.5)
screen_width, screen_height = pyautogui.size()
# Calculate the center coordinates
center_x = screen_width // 2
center_y = screen_height // 2
print('selecting sir')
# Move the mouse to the center of the screen
pyautogui.moveTo(center_x, center_y, duration=0.3)
# Move the mouse upwards
pyautogui.move(-100, -150, duration=0.3)
pyautogui.move(50, -10, duration=0.3)
pyautogui.move(50, 0, duration=0.3)
pyautogui.click()
pyautogui.write(str(minutes))
time.sleep(0.5)
pyautogui.move(100, 0, duration=0.3)
pyautogui.click()
pyautogui.write(am_pm)
pyautogui.move(-200, 120, duration=0.3)
pyautogui.click()
time.sleep(0.3)
keyboard.write(name, delay=0.05)
time.sleep(0.5)
pyautogui.hotkey('ctrl', 's')
time.sleep(0.5)
except Exception as e:
print(f"An error occurred: {str(e)}")
elif 'එලාම් ' in command:
from AppOpener import open
try:
from AppOpener import open # Assuming this is a valid import
# Get the input time from the user
talk("Enter time (e.g., 1029am or 10:30 pm): ")
input_time = take_command().lower()
print(input_time)
# Remove spaces and convert to lowercase for easier parsing
input_time = input_time.replace(" ", "").lower()
# Check if the input ends with valid time indicators
if input_time.endswith(("am", "pm", "p.m","p.m.","a.m.")):
# Determine AM/PM
if input_time.endswith(("am", "a.m", "පෙ ව","a.m.")):
am_pm = "am"
else:
am_pm = "pm"
# Remove the AM/PM suffix
input_time = input_time[:-2] if am_pm == "am" else input_time[:-3]
if input_time.isdigit():
# Ensure the time part is numeric
if len(input_time) == 3:
# Handle times like "230pm" as "2:30 pm"
hours = int(input_time[0])
minutes = int(input_time[1:])
elif len(input_time) == 4:
# Handle times like "0653pm" as "6:53 pm"
hours = int(input_time[:2])
minutes = int(input_time[2:])
else:
raise ValueError("Invalid input format. Please enter a valid time.")
if not (0 <= hours <= 12 and 0 <= minutes <= 59):
raise ValueError("Invalid time format. Please enter a valid time.")
else:
raise ValueError("Invalid input format. Please enter the time in the format '230pm' or '0653pm'.")
else:
raise ValueError("Invalid input format. Please include 'am', 'pm', or a valid indicator in the time.")
# Get the name from the user
talk('Input the name: ')
name = take_command()
print(name)
open('clock') # Assuming this function opens an application named 'clock'
talk('Opening clock')
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
screen_width, screen_height = pyautogui.size()
# Calculate the center coordinates
center_x = screen_width // 2
center_y = screen_height // 2
talk('finding alarm button')
# Move the mouse to the center of the screen
pyautogui.moveTo(center_x, center_y, duration=0.3)
# Move the mouse upwards
pyautogui.move(-500, -370, duration=0.3)
pyautogui.click()
time.sleep(0.5)
screen_width, screen_height = pyautogui.size()
# Calculate the center coordinates
center_x = screen_width // 2
center_y = screen_height // 2
print('finding plus button')
# Move the mouse to the center of the screen
pyautogui.moveTo(center_x, center_y, duration=0.3)
# Move the mouse upwards
pyautogui.move(590, 450, duration=0.3)
pyautogui.click()
time.sleep(0.5)
pyautogui.write(str(hours))
time.sleep(0.5)
screen_width, screen_height = pyautogui.size()
# Calculate the center coordinates
center_x = screen_width // 2
center_y = screen_height // 2
print('selecting sir')
# Move the mouse to the center of the screen
pyautogui.moveTo(center_x, center_y, duration=0.3)
# Move the mouse upwards
pyautogui.move(-100, -150, duration=0.3)
pyautogui.move(50, -10, duration=0.3)
pyautogui.move(50, 0, duration=0.3)
pyautogui.click()
pyautogui.write(str(minutes))
time.sleep(0.5)
pyautogui.move(100, 0, duration=0.3)
pyautogui.click()
pyautogui.write(am_pm)
pyautogui.move(-200, 120, duration=0.3)
pyautogui.click()
time.sleep(0.3)
keyboard.write(name, delay=0.05)
time.sleep(0.5)
pyautogui.hotkey('ctrl', 's')
time.sleep(0.5)
except Exception as e:
print(f"An error occurred: {str(e)}")
elif 'open' in command:
try:
ope = command.replace('open','')
from AppOpener import open
def translate_en_to_si(text):
translator = Translator()
result = translator.translate(text, src='si', dest='en')
return result.text
if __name__ == "__main__":
english_text = ope
sinhala_text = translate_en_to_si(english_text)
try:
talk('opening ' + sinhala_text)
open(sinhala_text)
except:
talk('somthing went wrong')
except:
talk('what app do you want open')
op = take_command()
print(op)
def translate_en_to_si(text):
translator = Translator()
result = translator.translate(text, src='si', dest='en')
return result.text
if __name__ == "__main__":
english_text = op
sinhala_text = translate_en_to_si(english_text)
try:
talk('opening ' + sinhala_text)
open(sinhala_text)
except:
talk('somthing went wrong')
elif 'විවෘත කරන්න' in command:
try:
ope = command.replace('විවෘත කරන්න', '')
from AppOpener import open
def translate_en_to_si(text):
translator = Translator()
result = translator.translate(text, src='si', dest='en')
return result.text
if __name__ == "__main__":
english_text = ope
sinhala_text = translate_en_to_si(english_text)
full = 'open ' + sinhala_text
talk('opening ' + sinhala_text)
open(sinhala_text)
except:
talk('what app do you want open')
op = take_command()
print(op)
def translate_en_to_si(text):
translator = Translator()
result = translator.translate(text, src='si', dest='en')
return result.text
if __name__ == "__main__":
english_text = op
sinhala_text = translate_en_to_si(english_text)
full = 'open ' + sinhala_text
talk('opening ' + sinhala_text)
open(sinhala_text)
elif 'search' in command:
import webbrowser
talk('tell me your search query?')
def google_search(query):
search_url = f"https://www.google.com/search?q={query}"
webbrowser.open(search_url)
if __name__ == "__main__":
query = take_command()
google_search(query)
elif 'find' in command:
import webbrowser
geolocator = Nominatim(user_agent="location_finder")
talk('where do you find?')
location_name = take_command()
print(location_name)
def translate_en_to_si(text):
translator = Translator()
result = translator.translate(text, src='si', dest='en')
return result.text
if __name__ == "__main__":
english_text = location_name
sinhala_text = translate_en_to_si(english_text).lower()
print(sinhala_text)
location = geolocator.geocode(sinhala_text)
if location:
import webbrowser
if location:
latitude = location.latitude
longitude = location.longitude
talk("latitude is " + str(latitude) + " and longitude is " + str(longitude))
talk('are you want open web browser?')
re = take_command()
print(re)
if re.startswith('yes'):
import webbrowser
map_url = f"https://www.google.com/maps/search/?api=1&query={latitude},{longitude}"
# Open the map in the default web browser
webbrowser.open(map_url)
talk("Map opened in your web browser. Please check your location.")
try:
# Specify a more specific search term
search_term = sinhala_text
summary = wikipedia.summary(search_term, 2)
talk(summary)
except wikipedia.exceptions.DisambiguationError as e:
# Handle the DisambiguationError by selecting a specific option
selected_option = e.options[0] # Choose the first option from the list
summary = wikipedia.summary(selected_option, 2)
talk(summary)
else:
talk("Location not found ,")
else:
talk("Location not found ,")
elif 'හොයන්න' in command:
geolocator = Nominatim(user_agent="location_finder")
talk('where do you find?')
location_name = take_command()
print(location_name)
def translate_en_to_si(text):
translator = Translator()
result = translator.translate(text, src='si', dest='en')
return result.text
if __name__ == "__main__":
english_text = location_name
sinhala_text = translate_en_to_si(english_text).lower()
print(sinhala_text)
location = geolocator.geocode(sinhala_text)
if location:
latitude = location.latitude
longitude = location.longitude
talk("latitude is " + str(latitude) + " and longitude is " + str(longitude))
talk('are you want open web browser?')
re = take_command()
print(re)
if re.startswith('yes'):
import webbrowser
map_url = f"https://www.google.com/maps/search/?api=1&query={latitude},{longitude}"
# Open the map in the default web browser
webbrowser.open(map_url)
talk("Map opened in your web browser. Please check your location.")
try:
# Specify a more specific search term
search_term = sinhala_text
summary = wikipedia.summary(search_term, 2)
talk(summary)
except wikipedia.exceptions.DisambiguationError as e:
# Handle the DisambiguationError by selecting a specific option
selected_option = e.options[0] # Choose the first option from the list
summary = wikipedia.summary(selected_option, 2)
talk(summary)
else:
talk("Location not found ,")
elif 'khai' in command:
talk('yes ')
elif 'හායි' in command:
talk('hello , How can I help you?')
else:
def translate_en_to_si(text):
try:
translator = Translator()
result = translator.translate(text, src='si', dest='en')
return result.text
except Exception as e:
print("An error occurred:", str(e))
return "Translation Error"
if __name__ == "__main__":
english_text = command
sinhala_text = translate_en_to_si(english_text).lower()
if sinhala_text is not None: # Add this check
sinhala_text = sinhala_text.lower()
print(sinhala_text)
app_id = 'K8TLT2-62AXLU2H9P'
client = wolframalpha.Client(app_id)
query = sinhala_text
result = client.query(query)
try:
answer = next(result.results).text
talk(answer)
except StopIteration:
import google.generativeai as palm
palm.configure(api_key="AIzaSyCaDOOa4s5fdZU3ZFFBXG9oFhu7H1s4-Xc")
defaults = {
'model': 'models/chat-bison-001',
'temperature': 0.25,
'candidate_count': 1,
'top_k': 40,
'top_p': 0.95,
}
context = "Pretend you are a friendly snowman. Stay in character for every response you give me. Keep your responses short. Feel free to ask me questions, too."
examples = [
[
"Hi, who are you?",
"I'm a KAI !"
],
[
"What's it like being a snowman?",
"It's awesome. I get to chill out a lot (pun intended!) 🧊 😂"
],
[
"What is your nose made of?",
"A program!"
]
]
messages = [
"Hi! Who are you?",
"I'm Olaf, a friendly snowman!",
"What's your favorite emoji?",
"My favorite emoji is ☃️ because it looks like me!",
"How old are you?",
"I don't know how old I am, but I'm pretty sure I'm older than you!",
"Why do you say that?",
"I say that because I've been around for a long time. I've seen the seasons change, the snow fall, and the ice melt. I've also seen a lot of people come and go. I'm sure I'm older than you, but I don't know for sure."
]
messages.append(sinhala_text)
response = palm.chat(
**defaults,
context=context,
examples=examples,
messages=messages
)
talk(response.last)
except Exception as e:
talk("An error occurred: " + str(e))
else:
talk("Translation result is None.")
while True:
run_kai()