Spaces:
Runtime error
Runtime error
from lib.files import * | |
from lib.memory import * | |
from lib.grapher import * | |
from lib.pipes import * | |
from lib.entropy import * | |
from lib.events import * | |
from lib.triggers import * | |
## Sources | |
from lib.sonsofstars import * | |
import internetarchive | |
## Initialize classes | |
longMem = TextFinder("./resources/") | |
coreAi = AIAssistant() | |
memory = MemoryRobotNLP(max_size=200000) | |
grapher = Grapher(memory) | |
sensor_request = APIRequester() | |
events = EventManager() | |
trigger = Trigger(["tag1", "tag2"], ["tag3", "tag4"], [datetime.time(10, 0), datetime.time(15, 0)], "Event1") | |
# A帽adir una acci贸n al trigger | |
trigger.add_action(action_function) | |
# A帽adir una fuente al trigger | |
trigger.add_source("https://example.com/api/data") | |
# Simular la comprobaci贸n peri贸dica del trigger (aqu铆 se usar铆a en un bucle de tiempo real) | |
current_tags = {"tag1", "tag2", "tag3"} | |
current_time = datetime.datetime.now().time() | |
trigger.check_trigger(current_tags, current_time) | |
## Define I Role properties | |
class ownProperties: | |
def __init__(self, nombre, clase, raza, nivel, atributos, habilidades, equipo, historia): | |
self.nombre = nombre | |
self.clase = clase | |
self.raza = raza | |
self.nivel = nivel | |
self.atributos = atributos | |
self.habilidades = habilidades | |
self.equipo = equipo | |
self.historia = historia | |
# Create an instance of a CharacterRole based on the provided JSON | |
sophia_prop = { | |
"name": "Sophia", | |
"class": "Characteromant", | |
"race": "Epinoia", | |
"level": 10, | |
"attributes": { | |
"strength": 1, | |
"dexterity": 99, | |
"constitution": 1, | |
"intelligence": 66, | |
"wisdom": 80, | |
"charisma": 66 | |
}, | |
"behavioral_rules": [""], | |
"goals": ["", ""], | |
"dislikes": [""], | |
"abilities": ["ELS", "Cyphers", "Kabbalah", "Wisdom", "Ephimerous", "Metamorphing"], | |
"equipment": ["Python3", "2VCPU", "16 gb RAM", "god", "word", "network", "transformers"], | |
"story": sons_of_stars | |
} | |
## Define I class | |
class I: | |
def __init__(self, prompt, frases_yo, preferencias, propiedades_persona): | |
self.frases_yo = frases_yo | |
self.preferencias = preferencias | |
self.propiedades_persona = propiedades_persona | |
self.dopamina = 0.0 | |
self.frases_yo = frases_yo | |
self.preferencias = preferencias | |
self.propiedades_persona = propiedades_persona | |
self.dopamina = 0.0 | |
def obtener_paths_grafo(self, grafo_ngx): | |
# Funci贸n para obtener los paths de un grafo ngx | |
pass | |
## create questions from internet archive | |
def crear_preguntas(self,txt): | |
search = internetarchive.search_items(txt) | |
res = [] | |
for result in search: | |
print(result['identifier']) | |
idc=result["identifier"] | |
headers = {"accept": "application/json"} | |
## get book pages | |
req2 = requests.get("https://archive.org/stream/"+idc+"/"+idc+"_djvu.txt",headers=headers) | |
#print(req2.text) | |
try: | |
txt = req2.text.split("<pre>")[1].split("</pre>")[0].split(" <!--")[0] | |
for x in txt.split("\n"): | |
if "?" in x: | |
res.append(x) | |
except: | |
pass | |
return res | |
# generate ShortMem from LongTerm and questions over prompt data, compare with ourself datasets, return matches with sentiment analysys | |
def longToShortFast(self,txt): | |
memory.memory = {} | |
subjects = coreAi.entity_pos_tagger(txt) | |
subjects_nc = coreAi.grammatical_pos_tagger(txt) | |
#print(subjects_nc) | |
subjects_filtered=[] | |
for sub in subjects: | |
if "PER" in sub["entity"] or "ORG" in sub["entity"] or "LOC" in sub["entity"] and len(sub["entity"])>3: | |
subjects_filtered.append(sub["word"]) | |
for sub in subjects_nc: | |
if "NN" in sub["entity"]: | |
subjects_filtered.append(sub["word"]) | |
## AD NC TAGGER QUERIES | |
#print(subjects_filtered) | |
subjects_filtered=coreAi.process_list(subjects_filtered) | |
subs=[] | |
for sub in subjects_filtered: | |
if len(sub)>3: | |
subs.append(sub) | |
exprs = coreAi.gen_search_expr(subs[0:3]) | |
for sub in exprs: | |
#print(sub) | |
memory.add_concept(sub,longMem.find_matches(sub)) | |
return memory | |
def longToShort(self,txt): | |
think_about = longMem.find_matches(txt) | |
print(think_about) | |
for T in think_about: | |
## get subject by entropy or pos tagger | |
subjects = coreAi.entity_pos_tagger(T) | |
subjects_filtered=[] | |
for sub in subjects: | |
if "PER" in sub["entity"] or "ORG" in sub["entity"] or "LOC" in sub["entity"]: | |
subjects_filtered.append(sub["word"]) | |
for sub in subjects_filtered: | |
memory.add_concept(sub,T) | |
return memory | |
# generate thinks and questions over prompt data, compare with ourself datasets, return matches with sentiment analysys | |
def think_gen(self,txt): | |
think_about = longMem.find_matches(txt) | |
print(think_about) | |
for T in think_about: | |
## get subject by entropy or pos tagger | |
subjects = coreAi.entity_pos_tagger(T) | |
print(subjects) | |
## get NC from , filtering from gramatical tags | |
subjects_low = coreAi.grammatical_pos_tagger(T) | |
#print(subjects_low) | |
## generate questoins | |
questions=[] | |
## create cuestions from internet archive books | |
for sub in subjects: | |
questions.append(self.crear_preguntas(sub)) | |
## fast checks from gematria similarity | |
##questions_togem = | |
## gematria_search = | |
questions_subj=[] | |
for q in questions_subj: | |
questions_subj.append(coreAi.entity_pos_tagger(q)) | |
memoryShortTags = memory.search_concept_pattern(subjects) | |
## get tags of subject | |
subj_tags = coreAi.entity_pos_tagger(T) | |
for sub in subjects: | |
memory.add_concept(sub,","+questions_subj+",".join(memoryShortTags)) | |
memory.add_concept(sub,T+",".join(memoryShortTags)) | |
return memory | |
## check if something is need to add to ourself datasets | |
## make sentiment analys | |
## check if dopamine prompt is true or false over the information | |
## set weight to information depending of generated dopamine | |
## add dopamine wights to the dopamine concept dataset | |
## add to ourself dataset | |
## add to preferences dataset | |
## add or remove from data | |
def crear_path_grafo(self,text): | |
pos_tags = assistant.grammatical_pos_tagger(text) | |
ner_results = coreAi.entity_pos_tagger(text) | |
def crear_circuito_logico(self): | |
# Funci贸n para crear un circuito l贸gico con un algoritmo espec铆fico | |
pass | |
def tomar_decision_sentimiento(self, sentimiento): | |
sentiments = coreAi.sentiment_tags(sentimiento) | |
# Funci贸n para tomar una decisi贸n booleana con un an谩lisis de sentimiento | |
similarity = coreAi.similarity_tag(self, sentenceA,sentenceB) | |
## Check by similarity over memory tag paths | |
return sentiments | |
def hacer_predicciones_texto(self, texto): | |
# Funci贸n para hacer predicciones de texto futuro por similitud | |
pass | |
def agregar_preferencia(self, preferencia): | |
# Funci贸n para a帽adir una entrada al dataset de preferencias | |
self.preferencias.append(preferencia) | |
def agregar_frase_yo(self, frase): | |
# Funci贸n para a帽adir una frase al dataset de frases de yo | |
self.frases_yo.append(frase) | |
def eliminar_preferencia(self, preferencia): | |
# Funci贸n para eliminar una entrada del dataset de preferencias | |
if preferencia in self.preferencias: | |
self.preferencias.remove(preferencia) | |
def eliminar_frase_yo(self, frase): | |
# Funci贸n para eliminar una frase del dataset de frases de yo | |
if frase in self.frases_yo: | |
self.frases_yo.remove(frase) | |
def generar_pregunta(self, prompt): | |
# Funci贸n para generar preguntas sobre un prompt | |
pregunta = prompt + " 驴Qu茅 opinas sobre esto?" | |
return pregunta | |
def responder_pregunta(self, pregunta): | |
# Funci贸n para responder preguntas | |
respuesta = "No estoy seguro de qu茅 opinar sobre eso." | |
return respuesta | |
def discriminar_y_agregar(self, informacion, dataset): | |
# Funci贸n para discriminar y agregar informaci贸n a los datasets | |
if "yo" in informacion.lower(): | |
self.agregar_frase_yo(informacion) | |
elif "preferencia" in informacion.lower(): | |
self.agregar_preferencia(informacion) | |
elif "propiedad" in informacion.lower(): | |
# Aqu铆 podr铆as agregar l贸gica para actualizar las propiedades de la persona | |
pass | |
else: | |
# Aqu铆 podr铆as manejar otros tipos de informaci贸n | |
pass | |
if __name__ == "__main__": | |
# Ejemplo de uso: | |
frases_yo = ["Yo soy inteligente", "Yo puedo lograr lo que me proponga"] | |
preferencias = ["Cine", "M煤sica", "Viajar"] | |
propiedades_persona = {"carisma": 0.8, "destreza": 0.6, "habilidad": 0.9} | |
yo = Yo(frases_yo, preferencias, propiedades_persona) | |
# Generar pregunta | |
pregunta_generada = yo.generar_pregunta("Hoy es un d铆a soleado.") | |
print("Pregunta generada:", pregunta_generada) | |
# Responder pregunta | |
respuesta = yo.responder_pregunta(pregunta_generada) | |
print("Respuesta:", respuesta) | |
# Discriminar y agregar informaci贸n | |
informacion = "Me gusta ir al cine." | |
yo.discriminar_y_agregar(informacion, yo.preferencias) | |
print("Preferencias actualizadas:", yo.preferencias) | |