Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -401,64 +401,122 @@ class NeuralNetworkSimulator:
|
|
401 |
|
402 |
|
403 |
|
404 |
-
#
|
405 |
-
|
406 |
-
|
407 |
-
for y in range(height):
|
408 |
-
for x in range(width):
|
409 |
-
base_sensitivities = np.random.rand(12) * 0.5 + 0.5
|
410 |
-
|
411 |
-
# Enhance certain areas
|
412 |
-
if 250 < x < 350 and 50 < y < 150: # Head
|
413 |
-
base_sensitivities *= 1.5
|
414 |
-
elif 275 < x < 325 and 80 < y < 120: # Eyes
|
415 |
-
base_sensitivities[0] *= 2 # More sensitive to pain
|
416 |
-
elif 290 < x < 310 and 100 < y < 120: # Nose
|
417 |
-
base_sensitivities[4] *= 2 # More sensitive to texture
|
418 |
-
elif 280 < x < 320 and 120 < y < 140: # Mouth
|
419 |
-
base_sensitivities[1] *= 2 # More sensitive to pleasure
|
420 |
-
elif 250 < x < 350 and 250 < y < 550: # Torso
|
421 |
-
base_sensitivities[2:6] *= 1.3 # Enhance pressure, temp, texture, em
|
422 |
-
elif (150 < x < 250 or 350 < x < 450) and 250 < y < 600: # Arms
|
423 |
-
base_sensitivities[0:2] *= 1.2 # Enhance pain and pleasure
|
424 |
-
elif 200 < x < 400 and 600 < y < 800: # Legs
|
425 |
-
base_sensitivities[6:8] *= 1.4 # Enhance tickle and itch
|
426 |
-
elif (140 < x < 160 or 440 < x < 460) and 390 < y < 410: # Hands
|
427 |
-
base_sensitivities *= 2 # Highly sensitive overall
|
428 |
-
elif (220 < x < 240 or 360 < x < 380) and 770 < y < 790: # Feet
|
429 |
-
base_sensitivities[6] *= 2 # Very ticklish
|
430 |
-
|
431 |
-
sensation_map[y, x] = base_sensitivities
|
432 |
-
|
433 |
-
return sensation_map
|
434 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
435 |
|
|
|
|
|
436 |
|
437 |
-
|
438 |
|
439 |
-
|
440 |
-
|
441 |
-
|
442 |
-
|
443 |
|
444 |
-
#
|
445 |
-
|
446 |
|
447 |
-
#
|
448 |
-
|
449 |
|
450 |
-
#
|
451 |
-
|
452 |
|
453 |
-
#
|
454 |
-
|
455 |
-
|
456 |
-
|
457 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
458 |
|
459 |
-
|
|
|
|
|
|
|
|
|
460 |
|
461 |
-
return
|
462 |
|
463 |
|
464 |
# Create futuristic human-like avatar
|
|
|
401 |
|
402 |
|
403 |
|
404 |
+
# Set up MediaPipe Pose
|
405 |
+
mp_pose = mp.solutions.pose
|
406 |
+
pose = mp_pose.Pose(static_image_mode=True, min_detection_confidence=0.5)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
407 |
|
408 |
+
def detect_humanoid(image):
|
409 |
+
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
410 |
+
results = pose.process(image_rgb)
|
411 |
+
|
412 |
+
if results.pose_landmarks:
|
413 |
+
landmarks = results.pose_landmarks.landmark
|
414 |
+
image_height, image_width, _ = image.shape
|
415 |
+
keypoints = []
|
416 |
+
for landmark in landmarks:
|
417 |
+
x = int(landmark.x * image_width)
|
418 |
+
y = int(landmark.y * image_height)
|
419 |
+
keypoints.append((x, y))
|
420 |
+
return keypoints
|
421 |
+
return []
|
422 |
+
|
423 |
+
def apply_touch_points(image, keypoints):
|
424 |
+
draw = ImageDraw.Draw(image)
|
425 |
+
for point in keypoints:
|
426 |
+
draw.ellipse([point[0]-5, point[1]-5, point[0]+5, point[1]+5], fill='red')
|
427 |
+
return image
|
428 |
+
|
429 |
+
def create_sensation_map(width, height, keypoints):
|
430 |
+
sensation_map = np.zeros((height, width, 12))
|
431 |
+
for y in range(height):
|
432 |
+
for x in range(width):
|
433 |
+
base_sensitivities = np.random.rand(12) * 0.5 + 0.5
|
434 |
+
|
435 |
+
# Enhance sensitivities near keypoints
|
436 |
+
for kp in keypoints:
|
437 |
+
distance = np.sqrt((x - kp[0])**2 + (y - kp[1])**2)
|
438 |
+
if distance < 30: # Adjust this value to change the area of influence
|
439 |
+
base_sensitivities *= 1.5
|
440 |
+
|
441 |
+
sensation_map[y, x, 0] = base_sensitivities[0] * np.random.rand() # Pain
|
442 |
+
sensation_map[y, x, 1] = base_sensitivities[1] * np.random.rand() # Pleasure
|
443 |
+
sensation_map[y, x, 2] = base_sensitivities[2] * np.random.rand() # Pressure
|
444 |
+
sensation_map[y, x, 3] = base_sensitivities[3] * (np.random.rand() * 10 + 30) # Temperature
|
445 |
+
sensation_map[y, x, 4] = base_sensitivities[4] * np.random.rand() # Texture
|
446 |
+
sensation_map[y, x, 5] = base_sensitivities[5] * np.random.rand() # EM field
|
447 |
+
sensation_map[y, x, 6] = base_sensitivities[6] * np.random.rand() # Tickle
|
448 |
+
sensation_map[y, x, 7] = base_sensitivities[7] * np.random.rand() # Itch
|
449 |
+
sensation_map[y, x, 8] = base_sensitivities[8] * np.random.rand() # Quantum
|
450 |
+
sensation_map[y, x, 9] = base_sensitivities[9] * np.random.rand() # Neural
|
451 |
+
sensation_map[y, x, 10] = base_sensitivities[10] * np.random.rand() # Proprioception
|
452 |
+
sensation_map[y, x, 11] = base_sensitivities[11] * np.random.rand() # Synesthesia
|
453 |
+
|
454 |
+
return sensation_map
|
455 |
+
|
456 |
+
def create_heatmap(sensation_map, sensation_type):
|
457 |
+
plt.figure(figsize=(10, 15))
|
458 |
+
sns.heatmap(sensation_map[:, :, sensation_type], cmap='viridis')
|
459 |
+
plt.title(f'{["Pain", "Pleasure", "Pressure", "Temperature", "Texture", "EM Field", "Tickle", "Itch", "Quantum", "Neural", "Proprioception", "Synesthesia"][sensation_type]} Sensation Map')
|
460 |
+
plt.axis('off')
|
461 |
+
|
462 |
+
buf = io.BytesIO()
|
463 |
+
plt.savefig(buf, format='png')
|
464 |
+
buf.seek(0)
|
465 |
+
|
466 |
+
data = base64.b64encode(buf.getvalue()).decode('utf-8')
|
467 |
+
|
468 |
+
plt.close()
|
469 |
+
|
470 |
+
return f'data:image/png;base64,{data}'
|
471 |
|
472 |
+
# Streamlit app
|
473 |
+
st.title("NeuraSense AI - Humanoid Touch Point Detection")
|
474 |
|
475 |
+
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
|
476 |
|
477 |
+
if uploaded_file is not None:
|
478 |
+
# Read the image
|
479 |
+
image = Image.open(uploaded_file)
|
480 |
+
image_np = np.array(image)
|
481 |
|
482 |
+
# Detect humanoid keypoints
|
483 |
+
keypoints = detect_humanoid(image_np)
|
484 |
|
485 |
+
# Apply touch points to the image
|
486 |
+
processed_image = apply_touch_points(image.copy(), keypoints)
|
487 |
|
488 |
+
# Display the processed image
|
489 |
+
st.image(processed_image, caption='Processed Image with Touch Points', use_column_width=True)
|
490 |
|
491 |
+
# Create sensation map
|
492 |
+
sensation_map = create_sensation_map(image.width, image.height, keypoints)
|
493 |
+
|
494 |
+
# Display heatmaps for different sensations
|
495 |
+
sensation_types = ["Pain", "Pleasure", "Pressure", "Temperature", "Texture", "EM Field",
|
496 |
+
"Tickle", "Itch", "Quantum", "Neural", "Proprioception", "Synesthesia"]
|
497 |
+
|
498 |
+
selected_sensation = st.selectbox("Select a sensation to view:", sensation_types)
|
499 |
+
heatmap = create_heatmap(sensation_map, sensation_types.index(selected_sensation))
|
500 |
+
st.image(heatmap, use_column_width=True)
|
501 |
+
|
502 |
+
# Generate AI response based on the image and sensations
|
503 |
+
if st.button("Generate AI Response"):
|
504 |
+
# You can customize this part to generate more specific responses based on the detected keypoints and sensations
|
505 |
+
response = generate_ai_response(keypoints, sensation_map)
|
506 |
+
st.write("AI Response:", response)
|
507 |
+
|
508 |
+
def generate_ai_response(keypoints, sensation_map):
|
509 |
+
# This is a simple example. You can make this more sophisticated based on your needs.
|
510 |
+
num_keypoints = len(keypoints)
|
511 |
+
avg_sensations = np.mean(sensation_map, axis=(0, 1))
|
512 |
|
513 |
+
response = f"I detect {num_keypoints} key points on the humanoid figure. "
|
514 |
+
response += "The average sensations across the body are:\n"
|
515 |
+
for i, sensation in enumerate(["Pain", "Pleasure", "Pressure", "Temperature", "Texture", "EM Field",
|
516 |
+
"Tickle", "Itch", "Quantum", "Neural", "Proprioception", "Synesthesia"]):
|
517 |
+
response += f"{sensation}: {avg_sensations[i]:.2f}\n"
|
518 |
|
519 |
+
return response
|
520 |
|
521 |
|
522 |
# Create futuristic human-like avatar
|