diff --git a/examples/epstein_civil_violence/epstein_civil_violence/epstein_civil_violence.py b/examples/epstein_civil_violence/epstein_civil_violence/epstein_civil_violence.py new file mode 100644 index 00000000..9a7314e0 --- /dev/null +++ b/examples/epstein_civil_violence/epstein_civil_violence/epstein_civil_violence.py @@ -0,0 +1,273 @@ +import enum +import math + +from mesa import Agent, Model +from mesa.experimental.devs.simulator import ABMSimulator +from mesa.space import SingleGrid + + +class EpsteinAgent(Agent): + def __init__(self, unique_id, model, vision, movement): + super().__init__(unique_id, model) + self.vision = vision + self.movement = movement + + +class AgentState(enum.IntEnum): + QUIESCENT = enum.auto() + ARRESTED = enum.auto() + ACTIVE = enum.auto() + + +class Citizen(EpsteinAgent): + """ + A member of the general population, may or may not be in active rebellion. + Summary of rule: If grievance - risk > threshold, rebel. + + Attributes: + unique_id: unique int + model : + hardship: Agent's 'perceived hardship (i.e., physical or economic + privation).' Exogenous, drawn from U(0,1). + regime_legitimacy: Agent's perception of regime legitimacy, equal + across agents. Exogenous. + risk_aversion: Exogenous, drawn from U(0,1). + threshold: if (grievance - (risk_aversion * arrest_probability)) > + threshold, go/remain Active + vision: number of cells in each direction (N, S, E and W) that agent + can inspect + condition: Can be "Quiescent" or "Active;" deterministic function of + greivance, perceived risk, and + grievance: deterministic function of hardship and regime_legitimacy; + how aggrieved is agent at the regime? + arrest_probability: agent's assessment of arrest probability, given + rebellion + """ + + def __init__( + self, + unique_id, + model, + vision, + movement, + hardship, + regime_legitimacy, + risk_aversion, + threshold, + arrest_prob_constant, + ): + """ + Create a new Citizen. + Args: + unique_id: unique int + model : model instance + hardship: Agent's 'perceived hardship (i.e., physical or economic + privation).' Exogenous, drawn from U(0,1). + regime_legitimacy: Agent's perception of regime legitimacy, equal + across agents. Exogenous. + risk_aversion: Exogenous, drawn from U(0,1). + threshold: if (grievance - (risk_aversion * arrest_probability)) > + threshold, go/remain Active + vision: number of cells in each direction (N, S, E and W) that + agent can inspect. Exogenous. + """ + super().__init__(unique_id, model, vision, movement) + self.hardship = hardship + self.regime_legitimacy = regime_legitimacy + self.risk_aversion = risk_aversion + self.threshold = threshold + self.condition = AgentState.QUIESCENT + self.grievance = self.hardship * (1 - self.regime_legitimacy) + self.arrest_probability = None + self.arrest_prob_constant = arrest_prob_constant + + def step(self): + """ + Decide whether to activate, then move if applicable. + """ + self.update_neighbors() + self.update_estimated_arrest_probability() + net_risk = self.risk_aversion * self.arrest_probability + if self.grievance - net_risk > self.threshold: + self.condition = AgentState.ACTIVE + else: + self.condition = AgentState.QUIESCENT + if self.movement and self.empty_neighbors: + new_pos = self.random.choice(self.empty_neighbors) + self.model.grid.move_agent(self, new_pos) + + def update_neighbors(self): + """ + Look around and see who my neighbors are + """ + self.neighborhood = self.model.grid.get_neighborhood( + self.pos, moore=True, radius=self.vision + ) + self.neighbors = self.model.grid.get_cell_list_contents(self.neighborhood) + self.empty_neighbors = [ + c for c in self.neighborhood if self.model.grid.is_cell_empty(c) + ] + + def update_estimated_arrest_probability(self): + """ + Based on the ratio of cops to actives in my neighborhood, estimate the + p(Arrest | I go active). + """ + cops_in_vision = len([c for c in self.neighbors if isinstance(c, Cop)]) + actives_in_vision = 1.0 # citizen counts herself + for c in self.neighbors: + if isinstance(c, Citizen) and c.condition == AgentState.ACTIVE: + actives_in_vision += 1 + self.arrest_probability = 1 - math.exp( + -1 * self.arrest_prob_constant * (cops_in_vision / actives_in_vision) + ) + + def sent_to_jail(self, value): + self.model.active_agents.remove(self) + self.condition = AgentState.ARRESTED + self.model.simulator.schedule_event_relative(self.release_from_jail, value) + + def release_from_jail(self): + self.model.active_agents.add(self) + self.condition = AgentState.QUIESCENT + + +class Cop(EpsteinAgent): + """ + A cop for life. No defection. + Summary of rule: Inspect local vision and arrest a random active agent. + + Attributes: + unique_id: unique int + x, y: Grid coordinates + vision: number of cells in each direction (N, S, E and W) that cop is + able to inspect + """ + + def __init__(self, unique_id, model, vision, movement, max_jail_term): + super().__init__(unique_id, model, vision, movement) + self.max_jail_term = max_jail_term + + def step(self): + """ + Inspect local vision and arrest a random active agent. Move if + applicable. + """ + self.update_neighbors() + active_neighbors = [] + for agent in self.neighbors: + if isinstance(agent, Citizen) and agent.condition == "Active": + active_neighbors.append(agent) + if active_neighbors: + arrestee = self.random.choice(active_neighbors) + arrestee.sent_to_jail(self.random.randint(0, self.max_jail_term)) + if self.movement and self.empty_neighbors: + new_pos = self.random.choice(self.empty_neighbors) + self.model.grid.move_agent(self, new_pos) + + def update_neighbors(self): + """ + Look around and see who my neighbors are. + """ + self.neighborhood = self.model.grid.get_neighborhood( + self.pos, moore=True, radius=self.vision + ) + self.neighbors = self.model.grid.get_cell_list_contents(self.neighborhood) + self.empty_neighbors = [ + c for c in self.neighborhood if self.model.grid.is_cell_empty(c) + ] + + +class EpsteinCivilViolence(Model): + """ + Model 1 from "Modeling civil violence: An agent-based computational + approach," by Joshua Epstein. + http://www.pnas.org/content/99/suppl_3/7243.full + Attributes: + height: grid height + width: grid width + citizen_density: approximate % of cells occupied by citizens. + cop_density: approximate % of cells occupied by cops. + citizen_vision: number of cells in each direction (N, S, E and W) that + citizen can inspect + cop_vision: number of cells in each direction (N, S, E and W) that cop + can inspect + legitimacy: (L) citizens' perception of regime legitimacy, equal + across all citizens + max_jail_term: (J_max) + active_threshold: if (grievance - (risk_aversion * arrest_probability)) + > threshold, citizen rebels + arrest_prob_constant: set to ensure agents make plausible arrest + probability estimates + movement: binary, whether agents try to move at step end + max_iters: model may not have a natural stopping point, so we set a + max. + """ + + def __init__( + self, + width=40, + height=40, + citizen_density=0.7, + cop_density=0.074, + citizen_vision=7, + cop_vision=7, + legitimacy=0.8, + max_jail_term=1000, + active_threshold=0.1, + arrest_prob_constant=2.3, + movement=True, + max_iters=1000, + seed=None, + ): + super().__init__(seed) + if cop_density + citizen_density > 1: + raise ValueError("Cop density + citizen density must be less than 1") + + self.width = width + self.height = height + self.citizen_density = citizen_density + self.cop_density = cop_density + + self.max_iters = max_iters + + self.grid = SingleGrid(self.width, self.height, torus=True) + + for _, pos in self.grid.coord_iter(): + if self.random.random() < self.cop_density: + agent = Cop( + self.next_id(), + self, + cop_vision, + movement, + max_jail_term, + ) + elif self.random.random() < (self.cop_density + self.citizen_density): + agent = Citizen( + self.next_id(), + self, + citizen_vision, + movement, + hardship=self.random.random(), + regime_legitimacy=legitimacy, + risk_aversion=self.random.random(), + threshold=active_threshold, + arrest_prob_constant=arrest_prob_constant, + ) + else: + continue + self.grid.place_agent(agent, pos) + + self.active_agents = self.agents + + def step(self): + self.active_agents.shuffle(inplace=True).do("step") + + +if __name__ == "__main__": + model = EpsteinCivilViolence(seed=15) + simulator = ABMSimulator() + + simulator.setup(model) + + simulator.run(time_delta=100) diff --git a/examples/wolf_sheep/wolf_sheep.py b/examples/wolf_sheep/wolf_sheep.py new file mode 100644 index 00000000..9fa6b3d9 --- /dev/null +++ b/examples/wolf_sheep/wolf_sheep.py @@ -0,0 +1,250 @@ +""" +Wolf-Sheep Predation Model +================================ + +Replication of the model found in NetLogo: + Wilensky, U. (1997). NetLogo Wolf Sheep Predation model. + http://ccl.northwestern.edu/netlogo/models/WolfSheepPredation. + Center for Connected Learning and Computer-Based Modeling, + Northwestern University, Evanston, IL. +""" + +import mesa +from mesa.experimental.devs.simulator import ABMSimulator + + +class Animal(mesa.Agent): + def __init__(self, unique_id, model, moore, energy, p_reproduce, energy_from_food): + super().__init__(unique_id, model) + self.energy = energy + self.p_reproduce = p_reproduce + self.energy_from_food = energy_from_food + self.moore = moore + + def random_move(self): + next_moves = self.model.grid.get_neighborhood(self.pos, self.moore, True) + next_move = self.random.choice(next_moves) + # Now move: + self.model.grid.move_agent(self, next_move) + + def spawn_offspring(self): + self.energy /= 2 + offspring = self.__class__( + self.model.next_id(), + self.model, + self.moore, + self.energy, + self.p_reproduce, + self.energy_from_food, + ) + self.model.grid.place_agent(offspring, self.pos) + + def feed(self): ... + + def die(self): + self.model.grid.remove_agent(self) + self.remove() + + def step(self): + self.random_move() + self.energy -= 1 + + self.feed() + + if self.energy < 0: + self.die() + elif self.random.random() < self.p_reproduce: + self.spawn_offspring() + + +class Sheep(Animal): + """ + A sheep that walks around, reproduces (asexually) and gets eaten. + + The init is the same as the RandomWalker. + """ + + def feed(self): + # If there is grass available, eat it + agents = self.model.grid.get_cell_list_contents(self.pos) + grass_patch = next(obj for obj in agents if isinstance(obj, GrassPatch)) + if grass_patch.fully_grown: + self.energy += self.energy_from_food + grass_patch.fully_grown = False + + +class Wolf(Animal): + """ + A wolf that walks around, reproduces (asexually) and eats sheep. + """ + + def feed(self): + agents = self.model.grid.get_cell_list_contents(self.pos) + sheep = [obj for obj in agents if isinstance(obj, Sheep)] + if len(sheep) > 0: + sheep_to_eat = self.random.choice(sheep) + self.energy += self.energy + + # Kill the sheep + sheep_to_eat.die() + + +class GrassPatch(mesa.Agent): + """ + A patch of grass that grows at a fixed rate and it is eaten by sheep + """ + + @property + def fully_grown(self) -> bool: + return self._fully_grown + + @fully_grown.setter + def fully_grown(self, value: bool): + self._fully_grown = value + + if not value: + self.model.simulator.schedule_event_relative( + setattr, + self.grass_regrowth_time, + function_args=[self, "fully_grown", True], + ) + + def __init__(self, unique_id, model, fully_grown, countdown, grass_regrowth_time): + """ + Creates a new patch of grass + + Args: + grown: (boolean) Whether the patch of grass is fully grown or not + countdown: Time for the patch of grass to be fully grown again + """ + super().__init__(unique_id, model) + self._fully_grown = fully_grown + self.grass_regrowth_time = grass_regrowth_time + + if not self.fully_grown: + self.model.simulator.schedule_event_relative( + setattr, countdown, function_args=[self, "fully_grown", True] + ) + + def set_fully_grown(self): + self.fully_grown = True + + +class WolfSheep(mesa.Model): + """ + Wolf-Sheep Predation Model + + A model for simulating wolf and sheep (predator-prey) ecosystem modelling. + """ + + def __init__( + self, + height, + width, + initial_sheep, + initial_wolves, + sheep_reproduce, + wolf_reproduce, + grass_regrowth_time, + wolf_gain_from_food=13, + sheep_gain_from_food=5, + moore=False, + simulator=None, + seed=None, + ): + """ + Create a new Wolf-Sheep model with the given parameters. + + Args: + initial_sheep: Number of sheep to start with + initial_wolves: Number of wolves to start with + sheep_reproduce: Probability of each sheep reproducing each step + wolf_reproduce: Probability of each wolf reproducing each step + wolf_gain_from_food: Energy a wolf gains from eating a sheep + grass: Whether to have the sheep eat grass for energy + grass_regrowth_time: How long it takes for a grass patch to regrow + once it is eaten + sheep_gain_from_food: Energy sheep gain from grass, if enabled. + moore: + """ + super().__init__(seed=seed) + # Set parameters + self.height = height + self.width = width + self.initial_sheep = initial_sheep + self.initial_wolves = initial_wolves + self.simulator = simulator + + # self.sheep_reproduce = sheep_reproduce + # self.wolf_reproduce = wolf_reproduce + # self.grass_regrowth_time = grass_regrowth_time + # self.wolf_gain_from_food = wolf_gain_from_food + # self.sheep_gain_from_food = sheep_gain_from_food + # self.moore = moore + + self.grid = mesa.space.MultiGrid(self.height, self.width, torus=False) + + for _ in range(self.initial_sheep): + pos = ( + self.random.randrange(self.width), + self.random.randrange(self.height), + ) + energy = self.random.randrange(2 * sheep_gain_from_food) + sheep = Sheep( + self.next_id(), + self, + moore, + energy, + sheep_reproduce, + sheep_gain_from_food, + ) + self.grid.place_agent(sheep, pos) + + # Create wolves + for _ in range(self.initial_wolves): + pos = ( + self.random.randrange(self.width), + self.random.randrange(self.height), + ) + energy = self.random.randrange(2 * wolf_gain_from_food) + wolf = Wolf( + self.next_id(), + self, + moore, + energy, + wolf_reproduce, + wolf_gain_from_food, + ) + self.grid.place_agent(wolf, pos) + + # Create grass patches + possibly_fully_grown = [True, False] + for _agent, pos in self.grid.coord_iter(): + fully_grown = self.random.choice(possibly_fully_grown) + if fully_grown: + countdown = grass_regrowth_time + else: + countdown = self.random.randrange(grass_regrowth_time) + patch = GrassPatch( + self.next_id(), self, fully_grown, countdown, grass_regrowth_time + ) + self.grid.place_agent(patch, pos) + + def step(self): + self.get_agents_of_type(Sheep).shuffle(inplace=True).do("step") + self.get_agents_of_type(Wolf).shuffle(inplace=True).do("step") + + +if __name__ == "__main__": + import time + + simulator = ABMSimulator() + + model = WolfSheep(25, 25, 60, 40, 0.2, 0.1, 20, simulator=simulator, seed=15) + + simulator.setup(model) + + start_time = time.perf_counter() + simulator.run(100) + print(simulator.time) + print("Time:", time.perf_counter() - start_time)