-
Notifications
You must be signed in to change notification settings - Fork 0
Description
/**
- ArgOS - A Reality-Bending Agent Simulation Framework
- Built for Project 89 using BitECS
*/
import {
createWorld,
defineComponent,
defineQuery,
defineSystem,
enterQuery,
exitQuery,
IWorld,
Types
} from 'bitecs';
// ============================================================
// Core Components
// ============================================================
// Basic Components
export const Position = defineComponent({
x: Types.f32,
y: Types.f32
});
export const Velocity = defineComponent({
x: Types.f32,
y: Types.f32
});
// Agent-specific Components
export const SensoryData = defineComponent({
radius: Types.f32, // How far the agent can perceive
entitiesDetected: [Types.eid, 10] // Array of entity IDs detected
});
export const Learning = defineComponent({
qValues: [Types.f32, 4 * 10], // Q-values for 4 actions across 10 state bins
learningRate: Types.f32, // How fast to update Q-values
explorationRate: Types.f32, // Probability of random action vs. learned action
lastState: Types.ui8, // Previous state for learning updates
lastAction: Types.ui8, // Previous action for learning updates
rewardAccumulator: Types.f32 // Total reward accumulated
});
export const Memory = defineComponent({
// Short-term memory capacity - stores recent events
capacity: Types.ui8,
// Current memory pointer
currentIndex: Types.ui8,
// Entity IDs stored in memory
entityIds: [Types.eid, 20],
// Types of entities remembered (0: agent, 1: resource, 2: obstacle)
entityTypes: [Types.ui8, 20],
// Positions of remembered entities
positionsX: [Types.f32, 20],
positionsY: [Types.f32, 20],
// Timestamps of memories
timestamps: [Types.ui32, 20]
});
export const Goals = defineComponent({
// Primary goal type (0: explore, 1: collect, 2: avoid, 3: communicate)
primaryType: Types.ui8,
// Target entity for the goal (if applicable)
targetEntity: Types.eid,
// Target position
targetX: Types.f32,
targetY: Types.f32,
// Priority of this goal (higher = more important)
priority: Types.ui8
});
export const Actions = defineComponent({
// Current action type (0: none, 1: move, 2: collect, 3: communicate)
currentAction: Types.ui8,
// Cooldown timer for actions
cooldown: Types.ui8,
// Success rate of previous actions (0-100)
successRate: Types.ui8
});
export const CognitiveState = defineComponent({
// Represents emotional state (0-100)
// 0-30: cautious, 31-70: neutral, 71-100: bold
emotionalState: Types.ui8,
// Focus level (0-100)
// Low focus means more distracted, high focus means more determined
focus: Types.ui8,
// Learning rate (0-100)
// How quickly agent adapts to new situations
adaptability: Types.ui8
});
// Environment Components
export const Environmental = defineComponent({
// Type of environmental entity (0: resource, 1: obstacle, 2: hazard)
type: Types.ui8,
// Quantity/health/strength of the entity
value: Types.ui8,
// Can this be interacted with?
interactive: Types.ui8
});
export const RealityFlux = defineComponent({
// How stable is this entity's reality? (0-100)
// Lower values mean more susceptible to reality-bending
stability: Types.ui8,
// The type of reality effect applied (0: none, 1: teleport, 2: phase, 3: transform)
effectType: Types.ui8,
// Duration of the effect
duration: Types.ui16
});
export const Communication = defineComponent({
// Is this entity currently sending a message?
sending: Types.ui8,
// Target entity to communicate with (0 for broadcast)
target: Types.eid,
// Message type (0: location, 1: warning, 2: request, 3: cooperation)
messageType: Types.ui8,
// Additional data slots for message content
data1: Types.i32,
data2: Types.i32,
data3: Types.i32
});
export const Social = defineComponent({
allies: [Types.eid, 5], // Up to 5 allied agents
rivals: [Types.eid, 5], // Up to 5 rival agents
trustLevel: Types.ui8, // Overall trust in others (0-100)
cooperationCount: Types.ui8, // Number of successful cooperations
groupId: Types.ui8 // Group identifier for cooperation clusters
});
// ============================================================
// Core Systems
// ============================================================
/**
- Perception System - Updates sensory data for agents based on environment
*/
export const createPerceptionSystem = () => {
const agentQuery = defineQuery([Position, SensoryData]);
const environmentalQuery = defineQuery([Position, Environmental]);
return defineSystem((world: IWorld) => {
const agents = agentQuery(world);
const environmentalEntities = environmentalQuery(world);
// For each agent, detect nearby entities
for (let i = 0; i < agents.length; i++) {
const agent = agents[i];
const agentPos = {
x: Position.x[agent],
y: Position.y[agent]
};
const senseRadius = SensoryData.radius[agent];
let detectedCount = 0;
// Clear previous detections
for (let j = 0; j < 10; j++) {
SensoryData.entitiesDetected[agent * 10 + j] = 0;
}
// Detect environmental entities
for (let j = 0; j < environmentalEntities.length; j++) {
const entity = environmentalEntities[j];
if (entity === agent) continue;
const entityPos = {
x: Position.x[entity],
y: Position.y[entity]
};
// Calculate distance
const dx = entityPos.x - agentPos.x;
const dy = entityPos.y - agentPos.y;
const distance = Math.sqrt(dx * dx + dy * dy);
// If within perception radius and we have space to store it
if (distance <= senseRadius && detectedCount < 10) {
SensoryData.entitiesDetected[agent * 10 + detectedCount] = entity;
detectedCount++;
}
}
}
return world;
});
};
/**
- Memory System - Records and manages agent memories
*/
export const createMemorySystem = () => {
const memoryQuery = defineQuery([Memory, SensoryData]);
return defineSystem((world: IWorld) => {
const timestamp = world.time; // Current simulation time
const agents = memoryQuery(world);
for (let i = 0; i < agents.length; i++) {
const agent = agents[i];
const capacity = Memory.capacity[agent];
let currentIndex = Memory.currentIndex[agent];
// Process detected entities and store in memory
for (let j = 0; j < 10; j++) {
const detectedEntity = SensoryData.entitiesDetected[agent * 10 + j];
if (detectedEntity === 0) continue; // No entity detected in this slot
// Store entity in memory
Memory.entityIds[agent * 20 + currentIndex] = detectedEntity;
Memory.timestamps[agent * 20 + currentIndex] = timestamp;
// Store entity type
if (Environmental[detectedEntity]) {
Memory.entityTypes[agent * 20 + currentIndex] = Environmental.type[detectedEntity];
} else {
// Assume it's another agent
Memory.entityTypes[agent * 20 + currentIndex] = 0;
}
// Store position
Memory.positionsX[agent * 20 + currentIndex] = Position.x[detectedEntity];
Memory.positionsY[agent * 20 + currentIndex] = Position.y[detectedEntity];
// Increment memory index, wrap around if needed
currentIndex = (currentIndex + 1) % capacity;
}
// Update current index
Memory.currentIndex[agent] = currentIndex;
}
return world;
});
};
/**
- Decision-Making System - Processes inputs to determine the next action with learning
*/
export const createDecisionSystem = () => {
const decisionQuery = defineQuery([Goals, Memory, SensoryData, CognitiveState, Learning, Actions, Social]);
return defineSystem((world: IWorld) => {
const agents = decisionQuery(world);
for (let i = 0; i < agents.length; i++) {
const agent = agents[i];
// Get agent's cognitive and learning attributes
const adaptability = CognitiveState.adaptability[agent];
const learningRate = Learning.learningRate[agent];
const explorationRate = Learning.explorationRate[agent];
// Calculate simple state based on environment
// We'll use distance to nearest resource/hazard as our state
let currentState = 0;
let nearestResourceDist = Infinity;
let nearestHazardDist = Infinity;
let nearestAgentDist = Infinity;
let nearestResourcePos = {x: 0, y: 0};
let nearestHazardPos = {x: 0, y: 0};
let nearestAgentId = 0;
// Process sensory data to determine state
for (let j = 0; j < 10; j++) {
const detectedEntity = SensoryData.entitiesDetected[agent * 10 + j];
if (detectedEntity === 0) continue;
const dx = Position.x[detectedEntity] - Position.x[agent];
const dy = Position.y[detectedEntity] - Position.y[agent];
const distance = Math.sqrt(dx * dx + dy * dy);
// Track nearest resource
if (Environmental[detectedEntity] && Environmental.type[detectedEntity] === 0) {
if (distance < nearestResourceDist) {
nearestResourceDist = distance;
nearestResourcePos = {x: Position.x[detectedEntity], y: Position.y[detectedEntity]};
currentState = Math.min(9, Math.floor(distance / 3)); // Bin into state 0-9
}
}
// Track nearest hazard
if (Environmental[detectedEntity] && Environmental.type[detectedEntity] === 2) {
if (distance < nearestHazardDist) {
nearestHazardDist = distance;
nearestHazardPos = {x: Position.x[detectedEntity], y: Position.y[detectedEntity]};
// If hazard is very close, override state to represent danger
if (distance < 5) {
currentState = 9; // Danger state
}
}
}
// Track nearest agent (for social dynamics)
if (SensoryData[detectedEntity] && Memory[detectedEntity]) {
if (distance < nearestAgentDist) {
nearestAgentDist = distance;
nearestAgentId = detectedEntity;
}
}
}
// Calculate reward for previous action based on success rate
// and whether agent achieved its goal
const previousAction = Learning.lastAction[agent];
const previousState = Learning.lastState[agent];
let reward = -1; // Base cost for actions
// Higher reward for successful actions
if (Actions.successRate[agent] > 60) {
reward += 2;
}
// Extra reward for collecting resources (getting close to goal)
if (previousAction === 1 && nearestResourceDist < 2) {
reward += 5;
}
// Extra reward for successfully avoiding hazards
if (previousAction === 2 && nearestHazardDist > 10) {
reward += 3;
}
// Punishment for getting too close to hazards
if (nearestHazardDist < 3) {
reward -= 4;
}
// Reward cooperative behavior with allies
if (previousAction === 3) {
let isAlly = false;
for (let k = 0; k < 5; k++) {
if (Social.allies[agent * 5 + k] === nearestAgentId) {
isAlly = true;
break;
}
}
if (isAlly && nearestAgentDist < 5) {
reward += 2;
Social.cooperationCount[agent]++;
}
}
// Accumulate total reward (for analytics)
Learning.rewardAccumulator[agent] += reward;
// Update Q-value for previous action
if (previousAction < 4) {
const stateActionIndex = previousState * 4 + previousAction;
const oldQValue = Learning.qValues[agent * 40 + stateActionIndex];
// Q-learning update formula
const bestNextQValue = Math.max(
Learning.qValues[agent * 40 + currentState * 4],
Learning.qValues[agent * 40 + currentState * 4 + 1],
Learning.qValues[agent * 40 + currentState * 4 + 2],
Learning.qValues[agent * 40 + currentState * 4 + 3]
);
// Update Q-value
const newQValue = oldQValue + learningRate * (reward + 0.8 * bestNextQValue - oldQValue);
Learning.qValues[agent * 40 + stateActionIndex] = newQValue;
}
// Choose next action: explore or exploit
let goalType, targetX, targetY, highestPriority = 0, targetEntity = 0;
// Exploration: random action
if (Math.random() < explorationRate) {
goalType = Math.floor(Math.random() * 4); // Random action (0-3)
}
// Exploitation: choose best action based on Q-values
else {
// Find action with highest Q-value for current state
let maxQValue = -Infinity;
for (let action = 0; action < 4; action++) {
const qValue = Learning.qValues[agent * 40 + currentState * 4 + action];
if (qValue > maxQValue) {
maxQValue = qValue;
goalType = action;
}
}
}
// Set target based on chosen action type
switch (goalType) {
case 0: // Explore
const angle = Math.random() * Math.PI * 2;
const distance = 10;
targetX = Position.x[agent] + Math.cos(angle) * distance;
targetY = Position.y[agent] + Math.sin(angle) * distance;
highestPriority = 20;
break;
case 1: // Collect resource
if (nearestResourceDist < Infinity) {
targetX = nearestResourcePos.x;
targetY = nearestResourcePos.y;
highestPriority = 50;
} else {
// No resource detected, check memory
let foundInMemory = false;
const capacity = Memory.capacity[agent];
for (let j = 0; j < capacity; j++) {
if (Memory.entityTypes[agent * 20 + j] === 0) {
targetEntity = Memory.entityIds[agent * 20 + j];
targetX = Memory.positionsX[agent * 20 + j];
targetY = Memory.positionsY[agent * 20 + j];
highestPriority = 30;
foundInMemory = true;
break;
}
}
if (!foundInMemory) {
// Default to exploration if no resource found
goalType = 0;
const angle = Math.random() * Math.PI * 2;
targetX = Position.x[agent] + Math.cos(angle) * distance;
targetY = Position.y[agent] + Math.sin(angle) * distance;
highestPriority = 20;
}
}
break;
case 2: // Avoid hazard
if (nearestHazardDist < Infinity) {
// Move away from hazard
const dx = Position.x[agent] - nearestHazardPos.x;
const dy = Position.y[agent] - nearestHazardPos.y;
const dist = Math.sqrt(dx*dx + dy*dy);
if (dist > 0) {
targetX = Position.x[agent] + (dx/dist) * 15; // Move away in same direction
targetY = Position.y[agent] + (dy/dist) * 15;
} else {
// If directly on hazard, move in random direction
const angle = Math.random() * Math.PI * 2;
targetX = Position.x[agent] + Math.cos(angle) * 15;
targetY = Position.y[agent] + Math.sin(angle) * 15;
}
highestPriority = 70; // High priority for avoiding danger
} else {
// No hazard detected, default to exploration
goalType = 0;
const angle = Math.random() * Math.PI * 2;
const distance = 10;
targetX = Position.x[agent] + Math.cos(angle) * distance;
targetY = Position.y[agent] + Math.sin(angle) * distance;
highestPriority = 20;
}
break;
case 3: // Communicate/Cooperate
if (nearestAgentId !== 0 && nearestAgentDist < 10) {
// Try to cooperate with nearby agent
targetEntity = nearestAgentId;
targetX = Position.x[nearestAgentId];
targetY = Position.y[nearestAgentId];
highestPriority = 40;
// Initiate communication
if (Communication[agent]) {
Communication.sending[agent] = 1;
Communication.target[agent] = nearestAgentId;
Communication.messageType[agent] = 3; // Cooperation
Communication.data1[agent] = Social.groupId[agent];
}
} else {
// No nearby agent, default to exploration
goalType = 0;
const angle = Math.random() * Math.PI * 2;
const distance = 10;
targetX = Position.x[agent] + Math.cos(angle) * distance;
targetY = Position.y[agent] + Math.sin(angle) * distance;
highestPriority = 20;
}
break;
}
// Store current state and action for next learning update
Learning.lastState[agent] = currentState;
Learning.lastAction[agent] = goalType;
// Set the goal
Goals.primaryType[agent] = goalType;
Goals.targetEntity[agent] = targetEntity;
Goals.targetX[agent] = targetX;
Goals.targetY[agent] = targetY;
Goals.priority[agent] = highestPriority;
}
return world;
});
};
/**
- Action System - Executes chosen actions
*/
export const createActionSystem = () => {
const actionQuery = defineQuery([Position, Velocity, Goals, Actions]);
return defineSystem((world: IWorld) => {
const agents = actionQuery(world);
for (let i = 0; i < agents.length; i++) {
const agent = agents[i];
// Skip if on cooldown
if (Actions.cooldown[agent] > 0) {
Actions.cooldown[agent]--;
continue;
}
const goalType = Goals.primaryType[agent];
const targetX = Goals.targetX[agent];
const targetY = Goals.targetY[agent];
// Set action based on goal
Actions.currentAction[agent] = goalType === 2 ? 1 : goalType; // Convert avoid to move
// Execute the action
switch (Actions.currentAction[agent]) {
case 0: // Explore
case 1: // Move
case 2: // Avoid
// Calculate direction to target
const dx = targetX - Position.x[agent];
const dy = targetY - Position.y[agent];
const distance = Math.sqrt(dx * dx + dy * dy);
if (distance > 0.1) {
const speed = 0.5; // Movement speed
Velocity.x[agent] = (dx / distance) * speed;
Velocity.y[agent] = (dy / distance) * speed;
} else {
Velocity.x[agent] = 0;
Velocity.y[agent] = 0;
Actions.successRate[agent] = 100; // Arrived at destination
}
break;
case 3: // Communicate
// Get target entity for communication
const targetEntity = Goals.targetEntity[agent];
if (targetEntity && Communication[agent]) {
Communication.sending[agent] = 1;
Communication.target[agent] = targetEntity;
Communication.messageType[agent] = 0; // Location sharing
Communication.data1[agent] = Math.floor(Position.x[agent]);
Communication.data2[agent] = Math.floor(Position.y[agent]);
Actions.cooldown[agent] = 5; // Communication takes time
}
break;
}
}
return world;
});
};
/**
- Physics System - Basic movement and collision
*/
export const createPhysicsSystem = () => {
const movementQuery = defineQuery([Position, Velocity]);
const obstacleQuery = defineQuery([Position, Environmental]);
return defineSystem((world: IWorld) => {
const entities = movementQuery(world);
const obstacles = obstacleQuery(world)
.filter(e => Environmental.type[e] === 1); // Type 1 = obstacle
for (let i = 0; i < entities.length; i++) {
const entity = entities[i];
// Update position based on velocity
Position.x[entity] += Velocity.x[entity];
Position.y[entity] += Velocity.y[entity];
// Simple boundary check
const boundarySize = 100;
if (Position.x[entity] < 0) Position.x[entity] = 0;
if (Position.y[entity] < 0) Position.y[entity] = 0;
if (Position.x[entity] > boundarySize) Position.x[entity] = boundarySize;
if (Position.y[entity] > boundarySize) Position.y[entity] = boundarySize;
// Simple collision detection with obstacles
for (let j = 0; j < obstacles.length; j++) {
const obstacle = obstacles[j];
const obstacleRadius = 1; // Assuming obstacles have unit radius
const dx = Position.x[obstacle] - Position.x[entity];
const dy = Position.y[obstacle] - Position.y[entity];
const distance = Math.sqrt(dx * dx + dy * dy);
if (distance < obstacleRadius + 0.5) { // Agent radius = 0.5
// Basic collision response - push away
const pushDistance = (obstacleRadius + 0.5) - distance;
const angle = Math.atan2(dy, dx);
Position.x[entity] -= Math.cos(angle) * pushDistance;
Position.y[entity] -= Math.sin(angle) * pushDistance;
}
}
}
return world;
});
};
/**
- Reality-Bending System - Introduces unpredictable changes to the environment
- Enhanced with wave effects that propagate across the environment
*/
export const createRealityBendingSystem = () => {
const fluxQuery = defineQuery([RealityFlux, Position]);
return defineSystem((world: IWorld) => {
const time = world.time;
// Initialize reality wave properties if not present
if (!world.realityWave) {
world.realityWave = {
active: false,
x: 0,
y: 0,
direction: 'horizontal',
speed: 1,
amplitude: 3,
frequency: 0.1,
particleCount: 20,
particles: []
};
}
// Major reality shift every 150 ticks (changed from 100)
if (time % 150 === 0) {
console.log("Major reality shift triggered");
// Initiate a reality wave
const randomDirection = Math.random() > 0.5 ? 'horizontal' : 'vertical';
world.realityWave = {
active: true,
x: randomDirection === 'horizontal' ? 0 : Math.random() * 100,
y: randomDirection === 'vertical' ? 0 : Math.random() * 100,
direction: randomDirection,
speed: 0.5 + Math.random() * 1.5, // 0.5 to 2.0
amplitude: 2 + Math.random() * 3, // 2 to 5
frequency: 0.05 + Math.random() * 0.1, // 0.05 to 0.15
particleCount: 10 + Math.floor(Math.random() * 20), // 10 to 30
particles: []
};
// Initialize wave particles
for (let i = 0; i < world.realityWave.particleCount; i++) {
world.realityWave.particles.push({
x: world.realityWave.x,
y: world.realityWave.y,
size: 1 + Math.random() * 3,
speed: 0.8 + Math.random() * 1.5,
angle: Math.random() * Math.PI * 2
});
}
// Full environment distortion during shifts
const fluxEntities = fluxQuery(world);
for (let i = 0; i < fluxEntities.length; i++) {
const entity = fluxEntities[i];
const stability = RealityFlux.stability[entity];
// Lower stability means higher chance of being affected
if (Math.random() * 100 > stability) {
const effectType = Math.floor(Math.random() * 3) + 1; // 1-3
RealityFlux.effectType[entity] = effectType;
RealityFlux.duration[entity] = 20 + Math.floor(Math.random() * 20); // 20-40 ticks
// Apply immediate effect
switch (effectType) {
case 1: // Teleport
const boundarySize = 100;
Position.x[entity] = Math.random() * boundarySize;
Position.y[entity] = Math.random() * boundarySize;
break;
case 2: // Phase (temporarily remove)
// Just mark as phased, handled elsewhere
break;
case 3: // Transform
if (Environmental[entity]) {
// Change type randomly but ensure it's still 0-2
const currentType = Environmental.type[entity];
let newType;
do {
newType = Math.floor(Math.random() * 3); // 0-2
} while(newType === currentType);
Environmental.type[entity] = newType;
}
break;
}
}
}
}
// Update reality wave position and effect
if (world.realityWave && world.realityWave.active) {
// Update wave position
if (world.realityWave.direction === 'horizontal') {
world.realityWave.x += world.realityWave.speed;
if (world.realityWave.x > 100) {
world.realityWave.active = false;
}
} else {
world.realityWave.y += world.realityWave.speed;
if (world.realityWave.y > 100) {
world.realityWave.active = false;
}
}
// Update wave particles
for (let i = 0; i < world.realityWave.particles.length; i++) {
const particle = world.realityWave.particles[i];
particle.x += Math.cos(particle.angle) * particle.speed;
particle.y += Math.sin(particle.angle) * particle.speed;
// Regenerate particles that go out of bounds
if (particle.x < 0 || particle.x > 100 || particle.y < 0 || particle.y > 100) {
particle.x = world.realityWave.x;
particle.y = world.realityWave.y;
particle.angle = Math.random() * Math.PI * 2;
}
}
// Apply wave effects to nearby entities
const fluxEntities = fluxQuery(world);
for (let i = 0; i < fluxEntities.length; i++) {
const entity = fluxEntities[i];
const entityPos = { x: Position.x[entity], y: Position.y[entity] };
const wavePos = world.realityWave;
// Calculate distance to wave front
let distanceToWave;
if (wavePos.direction === 'horizontal') {
distanceToWave = Math.abs(entityPos.x - wavePos.x);
} else {
distanceToWave = Math.abs(entityPos.y - wavePos.y);
}
// If entity is close to the wave front
if (distanceToWave < 5) {
const stability = RealityFlux.stability[entity];
// Higher chance of effect when stability is low
if (Math.random() * 100 > stability * 1.5) {
// Apply mild reality distortion effect
// No effect if already under a stronger effect
if (RealityFlux.effectType[entity] === 0) {
const effectType = Math.floor(Math.random() * 3) + 1; // 1-3
RealityFlux.effectType[entity] = effectType;
RealityFlux.duration[entity] = 10 + Math.floor(Math.random() * 15); // 10-25 ticks
// Apply immediate effect (milder than full shift)
switch (effectType) {
case 1: // Mild Teleport (shorter distance)
Position.x[entity] += (Math.random() - 0.5) * 20;
Position.y[entity] += (Math.random() - 0.5) * 20;
// Keep within bounds
Position.x[entity] = Math.max(0, Math.min(100, Position.x[entity]));
Position.y[entity] = Math.max(0, Math.min(100, Position.y[entity]));
break;
case 2: // Phase (handled in visualization)
break;
case 3: // Transform (for environmental only)
if (Environmental[entity] && Math.random() < 0.3) {
// 30% chance to transform
const currentType = Environmental.type[entity];
let newType;
do {
newType = Math.floor(Math.random() * 3); // 0-2
} while(newType === currentType);
Environmental.type[entity] = newType;
}
break;
}
}
}
}
}
}
// Update ongoing reality effects
const fluxEntities = fluxQuery(world);
for (let i = 0; i < fluxEntities.length; i++) {
const entity = fluxEntities[i];
if (RealityFlux.duration[entity] > 0) {
RealityFlux.duration[entity]--;
// When effect expires
if (RealityFlux.duration[entity] === 0) {
RealityFlux.effectType[entity] = 0; // No effect
}
}
}
return world;
});
};
/**
- Communication System - Handles information exchange between agents
- Enhanced with social dynamics and cooperation
*/
export const createCommunicationSystem = () => {
const senderQuery = defineQuery([Communication, Position, Social]);
const receiverQuery = defineQuery([Memory, Position, SensoryData, Social, Goals]);
return defineSystem((world: IWorld) => {
const senders = senderQuery(world);
const receivers = receiverQuery(world);
const timestamp = world.time;
for (let i = 0; i < senders.length; i++) {
const sender = senders[i];
// Skip if not sending
if (Communication.sending[sender] === 0) continue;
const messageType = Communication.messageType[sender];
const targetEntity = Communication.target[sender];
const data1 = Communication.data1[sender];
const data2 = Communication.data2[sender];
const data3 = Communication.data3[sender];
const senderGroupId = Social.groupId[sender];
// Process targeted message
if (targetEntity !== 0) {
// Ensure target is a valid receiver
if (!receivers.includes(targetEntity)) continue;
// Handle cooperation request (message type 3)
if (messageType === 3) {
const receiverGroupId = Social.groupId[targetEntity];
const receiverTrust = Social.trustLevel[targetEntity];
// Check if receiver is willing to cooperate based on trust and group
const willCooperate =
(receiverGroupId === senderGroupId && Math.random() < 0.8) || // Same group: high chance
(receiverTrust > 60 && Math.random() < 0.5) || // High trust: medium chance
(Math.random() < 0.2); // Random chance
if (willCooperate) {
// Add sender as ally if not already
let alreadyAlly = false;
let emptySlot = -1;
for (let j = 0; j < 5; j++) {
if (Social.allies[targetEntity * 5 + j] === sender) {
alreadyAlly = true;
break;
}
if (Social.allies[targetEntity * 5 + j] === 0 && emptySlot === -1) {
emptySlot = j;
}
}
if (!alreadyAlly && emptySlot !== -1) {
Social.allies[targetEntity * 5 + emptySlot] = sender;
Social.cooperationCount[targetEntity]++;
// Reciprocate by adding target as ally to sender if possible
let senderEmptySlot = -1;
for (let j = 0; j < 5; j++) {
if (Social.allies[sender * 5 + j] === 0) {
senderEmptySlot = j;
break;
}
}
if (senderEmptySlot !== -1) {
Social.allies[sender * 5 + senderEmptySlot] = targetEntity;
}
// Increase trust level
Social.trustLevel[targetEntity] = Math.min(100, Social.trustLevel[targetEntity] + 5);
// Share current goal (cooperative behavior)
if (Goals[sender] && Goals[targetEntity]) {
// Only share high-priority goals from trusted allies
if (Goals.priority[sender] > 40) {
Goals.primaryType[targetEntity] = Goals.primaryType[sender];
Goals.targetX[targetEntity] = Goals.targetX[sender];
Goals.targetY[targetEntity] = Goals.targetY[sender];
}
}
}
} else {
// Refuse cooperation - potentially add as rival if repeated rejections
let rejectionCount = 0;
for (let j = 0; j < 5; j++) {
if (Social.rivals[targetEntity * 5 + j] === sender) {
rejectionCount++;
}
}
// Add as rival if repeatedly trying to cooperate against will
if (rejectionCount > 2) {
let emptySlot = -1;
for (let j = 0; j < 5; j++) {
if (Social.rivals[targetEntity * 5 + j] === 0) {
emptySlot = j;
break;
}
}
if (emptySlot !== -1) {
Social.rivals[targetEntity * 5 + emptySlot] = sender;
Social.trustLevel[targetEntity] = Math.max(10, Social.trustLevel[targetEntity] - 10);
}
}
}
}
// Handle other message types
else {
// Store message in receiver's memory
processMessage(targetEntity, sender, messageType, data1, data2, data3, timestamp);
}
}
// Process broadcast message
else {
const senderPos = { x: Position.x[sender], y: Position.y[sender] };
const commRadius = 10; // Communication radius
// Send to all agents in range
for (let j = 0; j < receivers.length; j++) {
const receiver = receivers[j];
if (receiver === sender) continue;
const receiverPos = { x: Position.x[receiver], y: Position.y[receiver] };
const dx = receiverPos.x - senderPos.x;
const dy = receiverPos.y - senderPos.y;
const distance = Math.sqrt(dx * dx + dy * dy);
if (distance <= commRadius) {
// For cooperation messages, prioritize same group members
if (messageType === 3) {
if (Social.groupId[receiver] === senderGroupId) {
// Higher chance of cooperation with same group
if (Math.random() < 0.7) {
// Add to allies if not already and there's space
let alreadyAlly = false;
let emptySlot = -1;
for (let k = 0; k < 5; k++) {
if (Social.allies[receiver * 5 + k] === sender) {
alreadyAlly = true;
break;
}
if (Social.allies[receiver * 5 + k] === 0 && emptySlot === -1) {
emptySlot = k;
}
}
if (!alreadyAlly && emptySlot !== -1) {
Social.allies[receiver * 5 + emptySlot] = sender;
Social.trustLevel[receiver] += 5;
}
}
}
}
// Process the message normally
processMessage(receiver, sender, messageType, data1, data2, data3, timestamp);
}
}
}
// Reset sending flag
Communication.sending[sender] = 0;
}
return world;
});
};
/**
- Helper function to process received messages
*/
function processMessage(receiver: number, sender: number, messageType: number,
data1: number, data2: number, data3: number, timestamp: number) {
// Get current memory index
const memIndex = Memory.currentIndex[receiver];
const capacity = Memory.capacity[receiver];
// Store message data in memory
switch (messageType) {
case 0: // Location information
// Store in memory as if it were directly observed
Memory.entityIds[receiver * 20 + memIndex] = sender;
Memory.entityTypes[receiver * 20 + memIndex] = 0; // Agent type
Memory.positionsX[receiver * 20 + memIndex] = data1;
Memory.positionsY[receiver * 20 + memIndex] = data2;
Memory.timestamps[receiver * 20 + memIndex] = timestamp;
// Update memory index
Memory.currentIndex[receiver] = (memIndex + 1) % capacity;
break;
case 1: // Warning about hazard
// data1, data2 = hazard position, data3 = hazard type
const hazardMemIndex = (memIndex + 1) % capacity;
Memory.entityIds[receiver * 20 + hazardMemIndex] = 0; // Unknown entity ID
Memory.entityTypes[receiver * 20 + hazardMemIndex] = 2; // Hazard type
Memory.positionsX[receiver * 20 + hazardMemIndex] = data1;
Memory.positionsY[receiver * 20 + hazardMemIndex] = data2;
Memory.timestamps[receiver * 20 + hazardMemIndex] = timestamp;
// Update memory index
Memory.currentIndex[receiver] = (hazardMemIndex + 1) % capacity;
break;
}
}
// ============================================================
// Factory Functions for Entity Creation
// ============================================================
/**
- Creates an agent entity with cognitive capabilities
*/
export function createAgent(world: IWorld, x: number, y: number) {
const entity = world.entities.length;
// Add basic components
addComponent(world, Position, entity);
Position.x[entity] = x;
Position.y[entity] = y;
addComponent(world, Velocity, entity);
Velocity.x[entity] = 0;
Velocity.y[entity] = 0;
// Add cognitive components
addComponent(world, SensoryData, entity);
SensoryData.radius[entity] = 10; // Can perceive in a radius of 10 units
addComponent(world, Memory, entity);
Memory.capacity[entity] = 20;
Memory.currentIndex[entity] = 0;
addComponent(world, Goals, entity);
Goals.primaryType[entity] = 0; // Start with exploration
Goals.priority[entity] = 50;
addComponent(world, Actions, entity);
Actions.currentAction[entity] = 0; // No action
Actions.cooldown[entity] = 0;
Actions.successRate[entity] = 50; // Average success
addComponent(world, CognitiveState, entity);
CognitiveState.emotionalState[entity] = 50; // Neutral
CognitiveState.focus[entity] = 70; // Good focus
CognitiveState.adaptability[entity] = 50; // Average adaptability
// Add learning
addComponent(world, Learning, entity);
Learning.learningRate[entity] = 0.1;
Learning.explorationRate[entity] = 0.2;
Learning.lastState[entity] = 0;
Learning.lastAction[entity] = 0;
Learning.rewardAccumulator[entity] = 0;
// Initialize Q-values
for (let i = 0; i < 40; i++) {
Learning.qValues[entity * 40 + i] = 0;
}
// Add social capabilities
addComponent(world, Social, entity);
Social.trustLevel[entity] = 70; // Start with moderate trust
Social.cooperationCount[entity] = 0;
Social.groupId[entity] = Math.floor(Math.random() * 3); // Random group assignment
for (let i = 0; i < 5; i++) {
Social.allies[entity * 5 + i] = 0;
Social.rivals[entity * 5 + i] = 0;
}
// Add communication
addComponent(world, Communication, entity);
Communication.sending[entity] = 0; // Not sending
// Add reality flux
addComponent(world, RealityFlux, entity);
RealityFlux.stability[entity] = 70; // Fairly stable
RealityFlux.effectType[entity] = 0; // No effect
return entity;
}
/**
- Creates an environmental entity (resource, obstacle, hazard)
*/
export function createEnvironmentalEntity(
world: IWorld,
x: number,
y: number,
type: number, // 0: resource, 1: obstacle, 2: hazard
value: number = 50
) {
const entity = world.entities.length;
// Add basic components
addComponent(world, Position, entity);
Position.x[entity] = x;
Position.y[entity] = y;
// Add environmental component
addComponent(world, Environmental, entity);
Environmental.type[entity] = type;
Environmental.value[entity] = value;
Environmental.interactive[entity] = type === 0 ? 1 : 0; // Resources are interactive
// Add reality flux
addComponent(world, RealityFlux, entity);
RealityFlux.stability[entity] = type === 1 ? 90 : 40; // Obstacles are stable, resources/hazards are not
RealityFlux.effectType[entity] = 0; // No effect
return entity;
}
// ============================================================
// ArgOS Simulation
// ============================================================
/**
- Creates and configures the ArgOS simulation world
*/
export function createArgOSWorld() {
const world = createWorld();
// Register all systems
const perceptionSystem = createPerceptionSystem();
const memorySystem = createMemorySystem();
const decisionSystem = createDecisionSystem();
const actionSystem = createActionSystem();
const physicsSystem = createPhysicsSystem();
const realityBendingSystem = createRealityBendingSystem();
const communicationSystem = createCommunicationSystem();
// Return world and systems
return {
world,
systems: [
perceptionSystem,
memorySystem,
decisionSystem,
actionSystem,
physicsSystem,
realityBendingSystem,
communicationSystem
]
};
}
/**
- Main simulation loop
*/
export function runArgOSSimulation(
worldData: { world: IWorld, systems: any[] },
numAgents: number = 10,
numResources: number = 20,
numObstacles: number = 10,
numHazards: number = 5,
steps: number = 1000
) {
const { world, systems } = worldData;
const boundarySize = 100;
// Create agents
for (let i = 0; i < numAgents; i++) {
createAgent(
world,
Math.random() * boundarySize,
Math.random() * boundarySize
);
}
// Create resources
for (let i = 0; i < numResources; i++) {
createEnvironmentalEntity(
world,
Math.random() * boundarySize,
Math.random() * boundarySize,
0, // Resource type
50 + Math.floor(Math.random() * 50) // Value between 50-100
);
}
// Create obstacles
for (let i = 0; i < numObstacles; i++) {
createEnvironmentalEntity(
world,
Math.random() * boundarySize,
Math.random() * boundarySize,
1, // Obstacle type
100 // Full strength obstacles
);
}
// Create hazards
for (let i = 0; i < numHazards; i++) {
createEnvironmentalEntity(
world,
Math.random() * boundarySize,
Math.random() * boundarySize,
2, // Hazard type
70 + Math.floor(Math.random() * 30) // Danger level between 70-100
);
}
// Run simulation for specified number of steps
for (let step = 0; step < steps; step++) {
// Run all systems
for (const system of systems) {
system(world);
}
// Update world time
world.time = step;
// Here you would add code to collect data, visualize, etc.
}
return world;
}
// Helper function for component addition
function addComponent(world: IWorld, component: any, entity: number) {
component.addTo(world)(entity);
}