@@ -33,7 +33,7 @@ class Champion(Player):
3333 classifier = {
3434 'memory_depth' : float ('inf' ),
3535 'stochastic' : True ,
36- 'makes_use_of' : set ([ "length" ] ),
36+ 'makes_use_of' : set (),
3737 'long_run_time' : False ,
3838 'inspects_source' : False ,
3939 'manipulates_source' : False ,
@@ -42,14 +42,13 @@ class Champion(Player):
4242
4343 def strategy (self , opponent : Player ) -> Action :
4444 current_round = len (self .history )
45- expected_length = self .match_attributes ['length' ]
46- # Cooperate for the first 1/20-th of the game
45+ # Cooperate for the first 10 turns
4746 if current_round == 0 :
4847 return C
49- if current_round < expected_length / 20 :
48+ if current_round < 10 :
5049 return C
5150 # Mirror partner for the next phase
52- if current_round < expected_length * 5 / 40 :
51+ if current_round < 25 :
5352 return opponent .history [- 1 ]
5453 # Now cooperate unless all of the necessary conditions are true
5554 defection_prop = opponent .defections / len (opponent .history )
@@ -204,22 +203,22 @@ def strategy(self, opponent: Player) -> Action:
204203 return opponent .history [- 1 ]
205204
206205class Tranquilizer (Player ):
207-
206+
208207 """
209208 Submitted to Axelrod's second tournament by Craig Feathers
210209
211- Description given in Axelrod's "More Effective Choice in the
212- Prisoner's Dilemma" paper: The rule normally cooperates but
213- is ready to defect if the other player defects too often.
210+ Description given in Axelrod's "More Effective Choice in the
211+ Prisoner's Dilemma" paper: The rule normally cooperates but
212+ is ready to defect if the other player defects too often.
214213 Thus the rule tends to cooperate for the first dozen or two moves
215- if the other player is cooperating, but then it throws in a
216- defection. If the other player continues to cooperate, then defections
217- become more frequent. But as long as Tranquilizer is maintaining an
218- average payoff of at least 2.25 points per move, it will never defect
219- twice in succession and it will not defect more than
214+ if the other player is cooperating, but then it throws in a
215+ defection. If the other player continues to cooperate, then defections
216+ become more frequent. But as long as Tranquilizer is maintaining an
217+ average payoff of at least 2.25 points per move, it will never defect
218+ twice in succession and it will not defect more than
220219 one-quarter of the time.
221220
222- This implementation is based on the reverse engineering of the
221+ This implementation is based on the reverse engineering of the
223222 Fortran strategy K67R from Axelrod's second tournament.
224223 Reversed engineered by: Owen Campbell, Will Guo and Mansour Hakem.
225224
@@ -228,87 +227,87 @@ class Tranquilizer(Player):
228227 At the start of the strategy it updates its states:
229228
230229 - It counts the number of consecutive defections by the opponent.
231- - If it was in state 2 it moves to state 0 and calculates the
230+ - If it was in state 2 it moves to state 0 and calculates the
232231 following quantities two_turns_after_good_defection_ratio and
233232 two_turns_after_good_defection_ratio_count.
234-
233+
235234 Formula for:
236-
235+
237236 two_turns_after_good_defection_ratio:
238237
239238 self.two_turns_after_good_defection_ratio = (
240- ((self.two_turns_after_good_defection_ratio
241- * self.two_turns_after_good_defection_ratio_count)
242- + (3 - (3 * self.dict[opponent.history[-1]]))
243- + (2 * self.dict[self.history[-1]])
244- - ((self.dict[opponent.history[-1]]
245- * self.dict[self.history[-1]])))
239+ ((self.two_turns_after_good_defection_ratio
240+ * self.two_turns_after_good_defection_ratio_count)
241+ + (3 - (3 * self.dict[opponent.history[-1]]))
242+ + (2 * self.dict[self.history[-1]])
243+ - ((self.dict[opponent.history[-1]]
244+ * self.dict[self.history[-1]])))
246245 / (self.two_turns_after_good_defection_ratio_count + 1)
247246 )
248247
249248 two_turns_after_good_defection_ratio_count =
250249 two_turns_after_good_defection_ratio + 1
251250
252- - If it was in state 1 it moves to state 2 and calculates the
253- following quantities one_turn_after_good_defection_ratio and
251+ - If it was in state 1 it moves to state 2 and calculates the
252+ following quantities one_turn_after_good_defection_ratio and
254253 one_turn_after_good_defection_ratio_count.
255254
256255 Formula for:
257-
256+
258257 one_turn_after_good_defection_ratio:
259258
260259 self.one_turn_after_good_defection_ratio = (
261- ((self.one_turn_after_good_defection_ratio
260+ ((self.one_turn_after_good_defection_ratio
262261 * self.one_turn_after_good_defection_ratio_count)
263- + (3 - (3 * self.dict[opponent.history[-1]]))
264- + (2 * self.dict[self.history[-1]])
265- - (self.dict[opponent.history[-1]]
266- * self.dict[self.history[-1]]))
262+ + (3 - (3 * self.dict[opponent.history[-1]]))
263+ + (2 * self.dict[self.history[-1]])
264+ - (self.dict[opponent.history[-1]]
265+ * self.dict[self.history[-1]]))
267266 / (self.one_turn_after_good_defection_ratio_count + 1)
268267 )
269268
270269 one_turn_after_good_defection_ratio_count:
271270
272271 one_turn_after_good_defection_ratio_count =
273272 one_turn_after_good_defection_ratio + 1
274-
273+
275274 If after this it is in state 1 or 2 then it cooperates.
276275
277- If it is in state 0 it will potentially perform 1 of the 2
276+ If it is in state 0 it will potentially perform 1 of the 2
278277 following stochastic tests:
279278
280- 1. If average score per turn is greater than 2.25 then it calculates a
279+ 1. If average score per turn is greater than 2.25 then it calculates a
281280 value of probability:
282-
281+
283282 probability = (
284283 (.95 - (((self.one_turn_after_good_defection_ratio)
285- + (self.two_turns_after_good_defection_ratio) - 5) / 15))
284+ + (self.two_turns_after_good_defection_ratio) - 5) / 15))
286285 + (1 / (((len(self.history))+1) ** 2))
287286 - (self.dict[opponent.history[-1]] / 4)
288- )
287+ )
289288
290- and will cooperate if a random sampled number is less than that value of
291- probability. If it does not cooperate then the strategy moves to state 1
289+ and will cooperate if a random sampled number is less than that value of
290+ probability. If it does not cooperate then the strategy moves to state 1
292291 and defects.
293-
294- 2. If average score per turn is greater than 1.75 but less than 2.25
292+
293+ 2. If average score per turn is greater than 1.75 but less than 2.25
295294 then it calculates a value of probability:
296295
297296 probability = (
298297 (.25 + ((opponent.cooperations + 1) / ((len(self.history)) + 1)))
299- - (self.opponent_consecutive_defections * .25)
300- + ((current_score[0]
301- - current_score[1]) / 100)
298+ - (self.opponent_consecutive_defections * .25)
299+ + ((current_score[0]
300+ - current_score[1]) / 100)
302301 + (4 / ((len(self.history)) + 1))
303302 )
304303
305- and will cooperate if a random sampled number is less than that value of
304+ and will cooperate if a random sampled number is less than that value of
306305 probability. If not, it defects.
307306
308307 If none of the above holds the player simply plays tit for tat.
309308
310- Tranquilizer came in 27th place in Axelrod's second torunament.
311-
309+ Tranquilizer came in 27th place in Axelrod's second torunament.
310+
312311
313312 Names:
314313
@@ -335,75 +334,75 @@ def __init__(self):
335334 self .one_turn_after_good_defection_ratio_count = 1 # equal to AK variable
336335 self .two_turns_after_good_defection_ratio_count = 1 # equal to NK variable
337336 # All above variables correspond to those in original Fotran Code
338- self .dict = {C : 0 , D : 1 }
337+ self .dict = {C : 0 , D : 1 }
339338
340339
341- def update_state (self , opponent ):
342-
340+ def update_state (self , opponent ):
341+
343342 """
344343 Calculates the ratio values for the one_turn_after_good_defection_ratio,
345- two_turns_after_good_defection_ratio and the probability values,
344+ two_turns_after_good_defection_ratio and the probability values,
346345 and sets the value of num_turns_after_good_defection.
347346 """
348- if opponent .history [- 1 ] == D :
347+ if opponent .history [- 1 ] == D :
349348 self .opponent_consecutive_defections += 1
350349 else :
351350 self .opponent_consecutive_defections = 0
352351
353352 if self .num_turns_after_good_defection == 2 :
354353 self .num_turns_after_good_defection = 0
355354 self .two_turns_after_good_defection_ratio = (
356- ((self .two_turns_after_good_defection_ratio
357- * self .two_turns_after_good_defection_ratio_count )
358- + (3 - (3 * self .dict [opponent .history [- 1 ]]))
359- + (2 * self .dict [self .history [- 1 ]])
360- - ((self .dict [opponent .history [- 1 ]]
361- * self .dict [self .history [- 1 ]])))
355+ ((self .two_turns_after_good_defection_ratio
356+ * self .two_turns_after_good_defection_ratio_count )
357+ + (3 - (3 * self .dict [opponent .history [- 1 ]]))
358+ + (2 * self .dict [self .history [- 1 ]])
359+ - ((self .dict [opponent .history [- 1 ]]
360+ * self .dict [self .history [- 1 ]])))
362361 / (self .two_turns_after_good_defection_ratio_count + 1 )
363362 )
364363 self .two_turns_after_good_defection_ratio_count += 1
365364 elif self .num_turns_after_good_defection == 1 :
366365 self .num_turns_after_good_defection = 2
367366 self .one_turn_after_good_defection_ratio = (
368- ((self .one_turn_after_good_defection_ratio
367+ ((self .one_turn_after_good_defection_ratio
369368 * self .one_turn_after_good_defection_ratio_count )
370- + (3 - (3 * self .dict [opponent .history [- 1 ]]))
371- + (2 * self .dict [self .history [- 1 ]])
372- - (self .dict [opponent .history [- 1 ]]
373- * self .dict [self .history [- 1 ]]))
369+ + (3 - (3 * self .dict [opponent .history [- 1 ]]))
370+ + (2 * self .dict [self .history [- 1 ]])
371+ - (self .dict [opponent .history [- 1 ]]
372+ * self .dict [self .history [- 1 ]]))
374373 / (self .one_turn_after_good_defection_ratio_count + 1 )
375374 )
376375 self .one_turn_after_good_defection_ratio_count += 1
377-
376+
378377 def strategy (self , opponent : Player ) -> Action :
379378
380379 if not self .history :
381380 return C
382381
383-
382+
384383 self .update_state (opponent )
385384 if self .num_turns_after_good_defection in [1 , 2 ]:
386- return C
385+ return C
387386
388387 current_score = compute_final_score (zip (self .history , opponent .history ))
389388
390389 if (current_score [0 ] / ((len (self .history )) + 1 )) >= 2.25 :
391390 probability = (
392391 (.95 - (((self .one_turn_after_good_defection_ratio )
393- + (self .two_turns_after_good_defection_ratio ) - 5 ) / 15 ))
392+ + (self .two_turns_after_good_defection_ratio ) - 5 ) / 15 ))
394393 + (1 / (((len (self .history ))+ 1 ) ** 2 ))
395394 - (self .dict [opponent .history [- 1 ]] / 4 )
396395 )
397- if random .random () <= probability :
396+ if random .random () <= probability :
398397 return C
399398 self .num_turns_after_good_defection = 1
400399 return D
401400 if (current_score [0 ] / ((len (self .history )) + 1 )) >= 1.75 :
402401 probability = (
403402 (.25 + ((opponent .cooperations + 1 ) / ((len (self .history )) + 1 )))
404- - (self .opponent_consecutive_defections * .25 )
405- + ((current_score [0 ]
406- - current_score [1 ]) / 100 )
403+ - (self .opponent_consecutive_defections * .25 )
404+ + ((current_score [0 ]
405+ - current_score [1 ]) / 100 )
407406 + (4 / ((len (self .history )) + 1 ))
408407 )
409408 if random .random () <= probability :
0 commit comments