H.M.I / app.py
Sephfox's picture
Update app.py
b0028e8 verified
raw
history blame
16.1 kB
import streamlit as st
import numpy as np
import random
import matplotlib.pyplot as plt
from scipy.spatial import distance
from sklearn.cluster import KMeans
import networkx as nx
from collections import deque, Counter
from scipy.signal import convolve2d
# Constants
GRID_SIZE = 200
FOOD_SOURCES = [(20, 20), (80, 80), (150, 150), (40, 160), (180, 30)]
OBSTACLES = [(50, 50), (100, 100), (150, 50), (70, 130), (120, 80)]
PHEROMONE_DECAY_RATE = 0.02
PHEROMONE_DIFFUSION_RATE = 0.05
MAX_ANTS = 100
MUTATION_RATE = 0.01
# Pheromone Grid
pheromone_grid = np.zeros((GRID_SIZE, GRID_SIZE, 3)) # 3 channels: food, danger, exploration
# Graph representation of the environment
env_graph = nx.grid_2d_graph(GRID_SIZE, GRID_SIZE)
# Remove edges for obstacles
for obstacle in OBSTACLES:
env_graph.remove_node(obstacle)
class HiveMind:
def __init__(self):
self.collective_memory = {}
self.global_strategy = {}
self.task_allocation = {}
self.pheromone_importance = {'food': 0.5, 'danger': 0.3, 'exploration': 0.2}
def update_collective_memory(self, ant_memories):
for ant_memory in ant_memories:
for position, info in ant_memory:
if position not in self.collective_memory:
self.collective_memory[position] = info
else:
old_info = self.collective_memory[position]
new_info = tuple((old + new) / 2 for old, new in zip(old_info, info))
self.collective_memory[position] = new_info
def update_global_strategy(self, ant_performances):
best_ants = sorted(ant_performances, key=lambda x: x[1], reverse=True)[:5]
self.global_strategy = {
'exploration_rate': np.mean([ant.genome['exploration_rate'] for ant, _ in best_ants]),
'learning_rate': np.mean([ant.genome['learning_rate'] for ant, _ in best_ants]),
'discount_factor': np.mean([ant.genome['discount_factor'] for ant, _ in best_ants])
}
def allocate_tasks(self, ants):
ant_positions = [ant.position for ant in ants]
clusters = KMeans(n_clusters=min(5, len(ants))).fit(ant_positions)
for i, ant in enumerate(ants):
cluster = clusters.labels_[i]
if cluster not in self.task_allocation:
self.task_allocation[cluster] = []
self.task_allocation[cluster].append(ant)
def get_swarm_decision(self, decisions):
return Counter(decisions).most_common(1)[0][0]
class Ant:
def __init__(self, position, genome, hivemind):
self.position = position
self.genome = genome
self.hivemind = hivemind
self.carrying_food = False
self.energy = 100
self.memory = deque(maxlen=20)
self.path_home = []
self.role = "explorer"
self.communication_range = 10
self.q_table = {}
self.performance = 0
self.cluster = None
def perceive_environment(self, pheromone_grid, ants):
self.food_pheromone = pheromone_grid[self.position[0], self.position[1], 0]
self.danger_pheromone = pheromone_grid[self.position[0], self.position[1], 1]
self.exploration_pheromone = pheromone_grid[self.position[0], self.position[1], 2]
self.nearby_ants = [ant for ant in ants if distance.euclidean(self.position, ant.position) <= self.communication_range]
def act(self, pheromone_grid):
possible_actions = self.get_possible_actions()
if random.random() < self.genome['exploration_rate']:
action = random.choice(possible_actions)
else:
nearby_ants = [ant for ant in self.hivemind.task_allocation.get(self.cluster, [])
if distance.euclidean(self.position, ant.position) <= self.communication_range]
if nearby_ants:
swarm_decisions = [ant.decide(pheromone_grid) for ant in nearby_ants]
action = self.hivemind.get_swarm_decision(swarm_decisions)
else:
q_values = [self.get_q_value(action) for action in possible_actions]
action = possible_actions[np.argmax(q_values)]
reward = self.calculate_reward()
self.update_q_table(action, reward)
self.performance += reward
return action
def decide(self, pheromone_grid):
possible_actions = self.get_possible_actions()
q_values = [self.get_q_value(action) for action in possible_actions]
return possible_actions[np.argmax(q_values)]
def get_q_value(self, action):
return self.q_table.get((self.position, action), 0)
def update_q_table(self, action, reward):
current_q = self.get_q_value(action)
max_future_q = max([self.get_q_value(future_action) for future_action in self.get_possible_actions()])
new_q = (1 - self.genome['learning_rate']) * current_q + \
self.genome['learning_rate'] * (reward + self.genome['discount_factor'] * max_future_q)
self.q_table[(self.position, action)] = new_q
def calculate_reward(self):
base_reward = -1 # Cost of living
if self.carrying_food:
base_reward += 10
if self.position in FOOD_SOURCES:
base_reward += 20
if self.position in OBSTACLES:
base_reward -= 10
pheromone_reward = (
self.hivemind.pheromone_importance['food'] * self.food_pheromone +
self.hivemind.pheromone_importance['danger'] * -self.danger_pheromone +
self.hivemind.pheromone_importance['exploration'] * self.exploration_pheromone
)
return base_reward + pheromone_reward
def get_possible_actions(self):
x, y = self.position
possible_actions = []
for dx, dy in [(0, 1), (1, 0), (0, -1), (-1, 0)]: # right, down, left, up
new_x, new_y = x + dx, y + dy
if 0 <= new_x < GRID_SIZE and 0 <= new_y < GRID_SIZE and (new_x, new_y) not in OBSTACLES:
possible_actions.append((new_x, new_y))
return possible_actions
def update(self, pheromone_grid, ants):
self.perceive_environment(pheromone_grid, ants)
action = self.act(pheromone_grid)
self.position = action
self.energy -= 1
if self.energy <= 0:
return False # Ant dies
if self.carrying_food:
pheromone_grid[self.position[0], self.position[1], 0] += 5
if self.position == (0, 0): # Drop food at nest
self.carrying_food = False
self.energy = min(100, self.energy + 50)
return True # Food collected successfully
if self.position in FOOD_SOURCES and not self.carrying_food:
self.carrying_food = True
pheromone_grid[self.position[0], self.position[1], 0] += 10
if self.position in OBSTACLES:
pheromone_grid[self.position[0], self.position[1], 1] += 5
pheromone_grid[self.position[0], self.position[1], 2] += 1 # Exploration pheromone
self.memory.append((self.position, (self.food_pheromone, self.danger_pheromone, self.exploration_pheromone)))
# Update role based on cluster task
self.role = self.hivemind.task_allocation.get(self.cluster, ["explorer"])[0]
# Path planning
if self.carrying_food and not self.path_home:
self.path_home = nx.shortest_path(env_graph, self.position, (0, 0))
return True # Ant survives
# Pheromone Diffusion using Convolution
def diffuse_pheromones(pheromone_grid):
kernel = np.array([[0.05, 0.1, 0.05],
[0.1, 0.4, 0.1],
[0.05, 0.1, 0.05]])
for i in range(3): # For each pheromone type
pheromone_grid[:,:,i] = convolve2d(pheromone_grid[:,:,i], kernel, mode='same', boundary='wrap')
# Genetic Algorithm
def crossover(parent1, parent2):
child = {}
for key in parent1.keys():
if random.random() < 0.5:
child[key] = parent1[key]
else:
child[key] = parent2[key]
return child
def mutate(genome):
for key in genome.keys():
if random.random() < MUTATION_RATE:
genome[key] += random.uniform(-0.1, 0.1)
genome[key] = max(0, min(1, genome[key]))
return genome
# Simulation Loop
def simulate(ants, hivemind):
global pheromone_grid
food_collected = 0
hivemind.allocate_tasks(ants)
for ant in ants:
if ant.update(pheromone_grid, ants):
if ant.position == (0, 0) and not ant.carrying_food:
food_collected += 1
pheromone_grid *= (1 - PHEROMONE_DECAY_RATE)
diffuse_pheromones(pheromone_grid)
hivemind.update_collective_memory([ant.memory for ant in ants])
hivemind.update_global_strategy([(ant, ant.performance) for ant in ants])
# Genetic Algorithm and Swarm Adaptation
if len(ants) > MAX_ANTS:
ants.sort(key=lambda x: x.performance, reverse=True)
survivors = ants[:MAX_ANTS//2]
new_ants = []
while len(new_ants) < MAX_ANTS//2:
parent1, parent2 = random.sample(survivors, 2)
child_genome = crossover(parent1.genome, parent2.genome)
child_genome = mutate(child_genome)
child_genome = {**child_genome, **hivemind.global_strategy} # Incorporate global strategy
new_ant = Ant((random.randint(0, GRID_SIZE-1), random.randint(0, GRID_SIZE-1)), child_genome, hivemind)
new_ants.append(new_ant)
ants = survivors + new_ants
return ants, food_collected
# Visualization Functions
def plot_environment(pheromone_grid, ants, cluster_centers):
fig, ax = plt.subplots(figsize=(10, 10))
ax.imshow(np.sum(pheromone_grid, axis=2), cmap='viridis', alpha=0.7)
for ant in ants:
color = 'blue' if ant.role == 'explorer' else 'red' if ant.role == 'carrier' else 'green'
ax.plot(ant.position[1], ant.position[0], 'o', color=color, markersize=4)
for food_x, food_y in FOOD_SOURCES:
ax.plot(food_y, food_x, 'go', markersize=10)
for obstacle_x, obstacle_y in OBSTACLES:
ax.plot(obstacle_y, obstacle_x, 'ro', markersize=10)
for center in cluster_centers:
ax.plot(center[1], center[0], 'mo', markersize=15, alpha=0.7)
ax.set_xlim([0, GRID_SIZE])
ax.set_ylim([GRID_SIZE, 0])
return fig
# Streamlit App
st.title("Advanced Ant Hivemind Simulation")
# Sidebar controls
st.sidebar.header("Simulation Parameters")
num_ants = st.sidebar.slider("Number of Ants", 10, MAX_ANTS, 50)
exploration_rate = st.sidebar.slider("Exploration Rate", 0.0, 1.0, 0.2)
learning_rate = st.sidebar.slider("Learning Rate", 0.0, 1.0, 0.1)
discount_factor = st.sidebar.slider("Discount Factor", 0.0, 1.0, 0.9)
# Initialize hivemind and ants
hivemind = HiveMind()
ants = [Ant((random.randint(0, GRID_SIZE-1), random.randint(0, GRID_SIZE-1)),
{'exploration_rate': exploration_rate,
'learning_rate': learning_rate,
'discount_factor': discount_factor},
hivemind)
for _ in range(num_ants)]
# Simulation control
start_simulation = st.sidebar.button("Start Simulation")
stop_simulation = st.sidebar.button("Stop Simulation")
reset_simulation = st.sidebar.button("Reset Simulation")
# Initialize variables
total_food_collected = 0
iterations = 0
# Main simulation loop
if start_simulation:
cluster_centers = np.array([[0, 0], [0, 0], [0, 0]])
progress_bar = st.progress(0)
stats_placeholder = st.empty()
plot_placeholder = st.empty()
while not stop_simulation:
ants, food_collected = simulate(ants, hivemind)
total_food_collected += food_collected
iterations += 1
if iterations % 10 == 0:
cluster_centers = hivemind.allocate_tasks(ants)
if iterations % 5 == 0:
progress_bar.progress(min(iterations / 1000, 1.0))
stats_placeholder.write(f"Iterations: {iterations}, Total Food Collected: {total_food_collected}")
fig = plot_environment(pheromone_grid, ants, cluster_centers)
plot_placeholder.pyplot(fig)
plt.close(fig)
if reset_simulation:
pheromone_grid = np.zeros((GRID_SIZE, GRID_SIZE, 3))
hivemind = HiveMind()
ants = [Ant((random.randint(0, GRID_SIZE-1), random.randint(0, GRID_SIZE-1)),
{'exploration_rate': exploration_rate,
'learning_rate': learning_rate,
'discount_factor': discount_factor},
hivemind)
for _ in range(num_ants)]
total_food_collected = 0
iterations = 0
# Display final statistics only if simulation has run
if iterations > 0:
st.write("## Final Statistics")
st.write(f"Total Food Collected: {total_food_collected}")
st.write(f"Average Food per Iteration: {total_food_collected / iterations}")
# Display heatmap of pheromone concentration
st.write("## Pheromone Concentration Heatmap")
fig, ax = plt.subplots(figsize=(10, 10))
heatmap = ax.imshow(np.sum(pheromone_grid, axis=2), cmap='hot', interpolation='nearest')
plt.colorbar(heatmap)
st.pyplot(fig)
# Display ant role distribution
roles = [ant.role for ant in ants]
role_counts = {role: roles.count(role) for role in set(roles)}
st.write("## Ant Role Distribution")
st.bar_chart(role_counts)
# Display network graph of ant communication
st.write("## Ant Communication Network")
G = nx.Graph()
for ant in ants:
G.add_node(ant.position)
for nearby_ant in ant.nearby_ants:
G.add_edge(ant.position, nearby_ant.position)
fig, ax = plt.subplots(figsize=(10, 10))
pos = nx.spring_layout(G)
nx.draw(G, pos, with_labels=False, node_size=30, node_color='skyblue', edge_color='gray', ax=ax)
st.pyplot(fig)
# Display hivemind collective memory heatmap
st.write("## Hivemind Collective Memory")
memory_grid = np.zeros((GRID_SIZE, GRID_SIZE))
for pos, info in hivemind.collective_memory.items():
memory_grid[pos[0], pos[1]] = np.mean(info)
fig, ax = plt.subplots(figsize=(10, 10))
heatmap = ax.imshow(memory_grid, cmap='viridis', interpolation='nearest')
plt.colorbar(heatmap)
st.pyplot(fig)
# Display global strategy evolution
st.write("## Global Strategy Evolution")
strategy_df = pd.DataFrame(hivemind.global_strategy, index=[0])
st.line_chart(strategy_df)
# Display performance distribution of ants
st.write("## Ant Performance Distribution")
performances = [ant.performance for ant in ants]
fig, ax = plt.subplots()
ax.hist(performances, bins=20)
ax.set_xlabel('Performance')
ax.set_ylabel('Number of Ants')
st.pyplot(fig)
# Display task allocation
st.write("## Task Allocation")
task_df = pd.DataFrame.from_dict(hivemind.task_allocation, orient='index')
task_df = task_df.applymap(lambda x: len(x) if x else 0)
st.bar_chart(task_df)
# Add some final notes about the simulation
st.write("""
## About this Simulation
This advanced ant hivemind simulation demonstrates several key concepts in swarm intelligence and collective behavior:
1. **Collective Decision Making**: Ants make decisions based on both individual and swarm intelligence.
2. **Adaptive Strategies**: The hivemind evolves its strategy based on the performance of the best ants.
3. **Distributed Task Allocation**: Ants are dynamically assigned to different tasks based on their location and the colony's needs.
4. **Emergent Behavior**: Complex colony-level behaviors emerge from simple individual ant rules.
5. **Information Sharing**: Ants share information through pheromones and direct communication.
6. **Collective Memory**: The hivemind maintains a collective memory of the environment.
This simulation showcases how simple agents, when working together with the right rules, can exhibit complex and intelligent behavior at the group level.
""")