import gradio as gr import numpy as np import matplotlib.pyplot as plt import matplotlib.patches as patches from matplotlib.animation import FuncAnimation import networkx as nx import time import random import json from datetime import datetime import threading import queue class Neuron: def __init__(self, neuron_id, x, y, z=0): self.id = neuron_id self.x = x self.y = y self.z = z self.activation = random.random() * 0.1 self.specialization = random.choice([ "visual", "semantic", "temporal", "spatial", "abstract", "linguistic", "logical", "creative", "memory", "learning" ]) self.knowledge = set() self.connections = [] self.quantum_state = [random.random() for _ in range(4)] self.learning_rate = 0.1 + random.random() * 0.9 self.age = 0 self.experience = 0 self.fitness = 0 self.energy = random.random() self.bias = random.uniform(-0.1, 0.1) self.weights = {} self.memory = [] class AdvancedNeuralNetwork: def __init__(self): self.neurons = [] self.connections = [] self.metrics = { 'loss': 1.0, 'efficiency': 0, 'convergence': 0, 'global_fitness': 0, 'learning_rate': 0.1, 'knowledge_growth': 0, 'reasoning_capability': 0 } self.specializations = [ "visual", "semantic", "temporal", "spatial", "abstract", "linguistic", "logical", "creative", "memory", "learning" ] self.initialize_network() def initialize_network(self, num_neurons=30): """Inicializar la red neuronal con neuronas distribuidas""" self.neurons = [] self.connections = [] # Crear neuronas especializadas for i in range(num_neurons): x = random.uniform(0.1, 0.9) y = random.uniform(0.1, 0.9) neuron = Neuron(i, x, y) self.neurons.append(neuron) # Crear conexiones iniciales self.create_initial_connections() def create_initial_connections(self): """Crear conexiones iniciales entre neuronas""" for i, neuron1 in enumerate(self.neurons): for j, neuron2 in enumerate(self.neurons[i+1:], i+1): distance = self.calculate_distance(neuron1, neuron2) if distance < 0.3 and random.random() < 0.3: weight = random.random() neuron1.connections.append(j) neuron2.connections.append(i) neuron1.weights[j] = weight neuron2.weights[i] = weight self.connections.append({'from': i, 'to': j, 'weight': weight}) def calculate_distance(self, neuron1, neuron2): """Calcular distancia euclidiana entre dos neuronas""" dx = neuron1.x - neuron2.x dy = neuron1.y - neuron2.y dz = neuron1.z - neuron2.z return np.sqrt(dx*dx + dy*dy + dz*dz) def sigmoid(self, x): """Función de activación sigmoid""" return 1 / (1 + np.exp(-np.clip(x, -500, 500))) def step(self): """Ejecutar un paso de la simulación""" # Actualizar activaciones new_activations = [] for j, neuron in enumerate(self.neurons): input_sum = 0 for i, source_neuron in enumerate(self.neurons): if i == j: continue weight = source_neuron.weights.get(j, 0) distance = self.calculate_distance(source_neuron, neuron) attenuation = 1 / (1 + distance * 5) input_sum += source_neuron.activation * weight * attenuation # Influencia cuántica quantum_influence = (neuron.quantum_state[0] - 0.5) * 0.8 new_activation = self.sigmoid(input_sum + neuron.bias + quantum_influence) new_activations.append(new_activation) # Aplicar nuevas activaciones con decay for i, neuron in enumerate(self.neurons): neuron.activation = new_activations[i] * 0.96 neuron.energy = neuron.activation neuron.age += 0.01 neuron.experience += neuron.activation * 0.1 # Actualizar estado cuántico for j in range(4): neuron.quantum_state[j] = np.sin(time.time() * 0.001 + j) * neuron.activation # Aprendizaje hebbiano self.hebbian_learning() # Actualizar métricas self.update_metrics() def hebbian_learning(self): """Aplicar aprendizaje hebbiano""" learning_rate = self.metrics['learning_rate'] * 0.01 for neuron in self.neurons: for connected_id in neuron.connections: if connected_id < len(self.neurons): connected_neuron = self.neurons[connected_id] delta = learning_rate * neuron.activation * connected_neuron.activation current_weight = neuron.weights.get(connected_id, 0) neuron.weights[connected_id] = max(0, min(4, current_weight * 0.999 + delta)) def update_metrics(self): """Actualizar métricas de la red""" active_neurons = sum(1 for n in self.neurons if n.activation > 0.1) total_knowledge = sum(len(n.knowledge) for n in self.neurons) total_energy = sum(n.activation for n in self.neurons) max_energy = len(self.neurons) self.metrics['efficiency'] = active_neurons / len(self.neurons) if self.neurons else 0 self.metrics['knowledge_growth'] = total_knowledge self.metrics['global_fitness'] = total_energy / max_energy if max_energy > 0 else 0 self.metrics['convergence'] = min(self.metrics['efficiency'] * self.metrics['global_fitness'], 1) self.metrics['reasoning_capability'] = len([n for n in self.neurons if n.specialization == "logical" and n.activation > 0.3]) / 10 def add_neuron(self): """Añadir una nueva neurona a la red""" new_id = len(self.neurons) x = random.uniform(0.1, 0.9) y = random.uniform(0.1, 0.9) new_neuron = Neuron(new_id, x, y) # Conectar con neuronas cercanas for existing_neuron in self.neurons: distance = self.calculate_distance(new_neuron, existing_neuron) if distance < 0.3: weight = np.exp(-distance / 0.4) * (0.5 + random.random() * 0.9) new_neuron.weights[existing_neuron.id] = weight existing_neuron.weights[new_id] = weight new_neuron.connections.append(existing_neuron.id) existing_neuron.connections.append(new_id) self.neurons.append(new_neuron) def remove_neuron(self): """Eliminar una neurona de la red""" if len(self.neurons) <= 5: return removed = self.neurons.pop() # Limpiar conexiones for neuron in self.neurons: if removed.id in neuron.weights: del neuron.weights[removed.id] if removed.id in neuron.connections: neuron.connections.remove(removed.id) # Actualizar conexiones self.connections = [conn for conn in self.connections if conn['from'] != removed.id and conn['to'] != removed.id] def teach_concept(self, concept): """Enseñar un concepto a la red""" learning_neurons = [n for n in self.neurons if n.specialization == "learning"] if learning_neurons: best_learner = max(learning_neurons, key=lambda n: n.activation) best_learner.knowledge.add(concept) best_learner.activation += 0.5 best_learner.memory.append({ 'pattern': concept, 'timestamp': time.time(), 'strength': 1.0 }) # Propagar conocimiento for connected_id in best_learner.connections: if connected_id < len(self.neurons) and random.random() < 0.3: connected_neuron = self.neurons[connected_id] connected_neuron.knowledge.add(concept) connected_neuron.activation += 0.2 def reset(self): """Reiniciar la red neuronal""" self.initialize_network() # Instancia global de la red neuronal global_network = AdvancedNeuralNetwork() def create_network_visualization(): """Crear visualización de la red neuronal""" fig, ax = plt.subplots(figsize=(12, 8)) ax.set_xlim(0, 1) ax.set_ylim(0, 1) ax.set_aspect('equal') ax.set_facecolor('#0f172a') fig.patch.set_facecolor('#0f172a') # Colores por especialización colors = { 'visual': '#ef4444', 'semantic': '#22c55e', 'temporal': '#3b82f6', 'spatial': '#eab308', 'abstract': '#a855f7', 'linguistic': '#06b6d4', 'logical': '#f97316', 'creative': '#84cc16', 'memory': '#f59e0b', 'learning': '#ec4899' } # Dibujar conexiones for conn in global_network.connections: from_neuron = global_network.neurons[conn['from']] to_neuron = global_network.neurons[conn['to']] ax.plot([from_neuron.x, to_neuron.x], [from_neuron.y, to_neuron.y], 'b-', alpha=0.3, linewidth=0.5) # Dibujar neuronas for neuron in global_network.neurons: size = 50 + neuron.activation * 300 color = colors.get(neuron.specialization, '#ffffff') alpha = 0.3 + neuron.activation * 0.7 circle = plt.Circle((neuron.x, neuron.y), 0.02, color=color, alpha=alpha, zorder=10) ax.add_patch(circle) # Mostrar ID para neuronas muy activas if neuron.activation > 0.7: ax.text(neuron.x, neuron.y + 0.03, str(neuron.id), ha='center', va='bottom', color='white', fontsize=8) # Efecto de conocimiento if neuron.knowledge: knowledge_circle = plt.Circle((neuron.x, neuron.y), 0.025, fill=False, edgecolor='#22c55e', linewidth=2, alpha=0.6, zorder=11) ax.add_patch(knowledge_circle) ax.set_title('Red Neuronal IA Avanzada - Visualización en Tiempo Real', color='white', fontsize=16, pad=20) ax.set_xlabel('') ax.set_ylabel('') ax.tick_params(colors='white') # Leyenda legend_elements = [] for spec, color in colors.items(): legend_elements.append(plt.Line2D([0], [0], marker='o', color='w', markerfacecolor=color, markersize=8, label=spec.capitalize())) ax.legend(handles=legend_elements, loc='upper left', bbox_to_anchor=(1, 1), facecolor='#1e293b', edgecolor='#475569', labelcolor='white') plt.tight_layout() return fig def step_simulation(): """Ejecutar un paso de la simulación""" global_network.step() return create_network_visualization(), get_metrics_display() def get_metrics_display(): """Obtener display de métricas""" metrics = global_network.metrics return f""" ## 📊 Métricas de la Red Neuronal - **Eficiencia**: {metrics['efficiency']:.1%} - **Convergencia**: {metrics['convergence']:.1%} - **Fitness Global**: {metrics['global_fitness']:.1%} - **Neuronas**: {len(global_network.neurons)} - **Conexiones**: {len(global_network.connections)} - **Conocimiento Total**: {metrics['knowledge_growth']} - **Capacidad de Razonamiento**: {metrics['reasoning_capability']:.1%} """ def teach_concept_to_network(concept): """Enseñar concepto a la red""" if concept.strip(): global_network.teach_concept(concept.strip()) return (create_network_visualization(), get_metrics_display(), f"✅ Concepto '{concept}' enseñado exitosamente a la red", "") return create_network_visualization(), get_metrics_display(), "❌ Por favor ingresa un concepto válido", concept def add_neuron_to_network(): """Añadir neurona a la red""" global_network.add_neuron() return create_network_visualization(), get_metrics_display(), "➕ Nueva neurona añadida" def remove_neuron_from_network(): """Remover neurona de la red""" global_network.remove_neuron() return create_network_visualization(), get_metrics_display(), "➖ Neurona eliminada" def reset_network(): """Reiniciar la red""" global_network.reset() return create_network_visualization(), get_metrics_display(), "🔄 Red neuronal reiniciada" def auto_simulation_steps(): """Ejecutar múltiples pasos automáticamente""" for _ in range(5): global_network.step() time.sleep(0.1) return create_network_visualization(), get_metrics_display() # Crear la interfaz Gradio def create_gradio_interface(): with gr.Blocks( theme=gr.themes.Soft( primary_hue="blue", secondary_hue="slate", neutral_hue="slate", ).set( body_background_fill="#0f172a", block_background_fill="#1e293b", block_border_color="#475569", input_background_fill="#334155", button_primary_background_fill="#3b82f6", button_primary_text_color="white", ), css=""" .gradio-container { background: linear-gradient(135deg, #0f172a 0%, #1e3a8a 50%, #0f172a 100%); min-height: 100vh; } .gr-button { border-radius: 8px; font-weight: 600; } .gr-panel { border-radius: 12px; border: 1px solid #475569; } """, title="🧠 NEBULA - Red Neuronal IA Avanzada" ) as iface: gr.HTML("""
Simulación interactiva con aprendizaje automático y supervisión por IA
Una demostración avanzada de redes neuronales con especialización funcional y aprendizaje adaptativo
🚀 Desarrollado por Agnuxo | 💡 Simulación avanzada de redes neuronales con IA supervisada
Esta demostración muestra conceptos de neurociencia computacional, aprendizaje automático y sistemas adaptativos complejos.