Skip to content

Commit 0e3712a

Browse files
authored
Merge pull request #773 from Axelrod-Python/ann-mojones
Merge evolved artificial neural network strategy
2 parents 168fc85 + 0c103df commit 0e3712a

File tree

4 files changed

+277
-2
lines changed

4 files changed

+277
-2
lines changed

axelrod/strategies/_strategies.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22

33
from .alternator import Alternator
44
from .adaptive import Adaptive
5+
from .ann import EvolvedANN
56
from .apavlov import APavlov2006, APavlov2011
67
from .appeaser import Appeaser
78
from .averagecopier import AverageCopier, NiceAverageCopier
@@ -105,6 +106,8 @@
105106
EasyGo,
106107
Eatherley,
107108
EventualCycleHunter,
109+
EvolvedANN,
110+
EvolvedLookerUp,
108111
Feld,
109112
FirmButFair,
110113
FoolMeForever,
@@ -150,7 +153,6 @@
150153
LimitedRetaliate,
151154
LimitedRetaliate2,
152155
LimitedRetaliate3,
153-
EvolvedLookerUp,
154156
MathConstantHunter,
155157
NaiveProber,
156158
MindBender,

axelrod/strategies/ann.py

Lines changed: 230 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,230 @@
1+
# Source: https://gist.github.com/mojones/550b32c46a8169bb3cd89d917b73111a#file-ann-strategy-test-L60
2+
# Original Author: Martin Jones, @mojones
3+
4+
from axelrod import Actions, Player, init_args
5+
6+
C, D = Actions.C, Actions.D
7+
8+
9+
def split_weights(weights, input_values, hidden_layer_size):
10+
"""Splits the input vector into the the NN bias weights and layer
11+
parameters."""
12+
number_of_input_to_hidden_weights = input_values * hidden_layer_size
13+
number_of_hidden_to_output_weights = hidden_layer_size
14+
15+
input2hidden = []
16+
for i in range(0, number_of_input_to_hidden_weights, input_values):
17+
input2hidden.append(weights[i:i + input_values])
18+
19+
start = number_of_input_to_hidden_weights
20+
end = number_of_input_to_hidden_weights + number_of_hidden_to_output_weights
21+
22+
hidden2output = weights[start: end]
23+
bias = weights[end:]
24+
25+
return (input2hidden, hidden2output, bias)
26+
27+
28+
class ANN(Player):
29+
"""A single layer neural network based strategy."""
30+
name = 'ANN'
31+
classifier = {
32+
'memory_depth': float('inf'),
33+
'stochastic': False,
34+
'inspects_source': False,
35+
'makes_use_of': set(),
36+
'manipulates_source': False,
37+
'manipulates_state': False,
38+
'long_run_time': False
39+
}
40+
41+
def activate(self, inputs):
42+
"""Compute the output of the neural network."""
43+
# Calculate values of hidden nodes
44+
hidden_values = []
45+
for i in range(self.hidden_layer_size):
46+
hidden_node_value = 0
47+
bias_weight = self.bias_weights[i]
48+
hidden_node_value += bias_weight
49+
for j in range(self.input_values):
50+
weight = self.input_to_hidden_layer_weights[i][j]
51+
hidden_node_value += inputs[j] * weight
52+
53+
# ReLU activation function
54+
hidden_node_value = max(hidden_node_value, 0)
55+
56+
hidden_values.append(hidden_node_value)
57+
58+
# Calculate output value
59+
output_value = 0
60+
for i in range(self.hidden_layer_size):
61+
output_value += hidden_values[i] * \
62+
self.hidden_to_output_layer_weights[i]
63+
64+
return output_value
65+
66+
@init_args
67+
def __init__(
68+
self,
69+
input_to_hidden_layer_weights=[],
70+
hidden_to_output_layer_weights=[],
71+
bias_weights=[]
72+
):
73+
74+
Player.__init__(self)
75+
self.input_to_hidden_layer_weights = input_to_hidden_layer_weights
76+
self.hidden_to_output_layer_weights = hidden_to_output_layer_weights
77+
self.bias_weights = bias_weights
78+
79+
self.input_values = len(input_to_hidden_layer_weights[0])
80+
self.hidden_layer_size = len(hidden_to_output_layer_weights)
81+
82+
def strategy(self, opponent):
83+
# Compute features for Neural Network
84+
# These are True/False 0/1
85+
if len(opponent.history) == 0:
86+
opponent_first_c = 0
87+
opponent_first_d = 0
88+
opponent_second_c = 0
89+
opponent_second_d = 0
90+
my_previous_c = 0
91+
my_previous_d = 0
92+
my_previous2_c = 0
93+
my_previous2_d = 0
94+
opponent_previous_c = 0
95+
opponent_previous_d = 0
96+
opponent_previous2_c = 0
97+
opponent_previous2_d = 0
98+
99+
elif len(opponent.history) == 1:
100+
opponent_first_c = 1 if opponent.history[0] == C else 0
101+
opponent_first_d = 1 if opponent.history[0] == D else 0
102+
opponent_second_c = 0
103+
opponent_second_d = 0
104+
my_previous_c = 1 if self.history[-1] == C else 0
105+
my_previous_d = 0 if self.history[-1] == D else 0
106+
my_previous2_c = 0
107+
my_previous2_d = 0
108+
opponent_previous_c = 1 if opponent.history[-1] == C else 0
109+
opponent_previous_d = 1 if opponent.history[-1] == D else 0
110+
opponent_previous2_c = 0
111+
opponent_previous2_d = 0
112+
113+
else:
114+
opponent_first_c = 1 if opponent.history[0] == C else 0
115+
opponent_first_d = 1 if opponent.history[0] == D else 0
116+
opponent_second_c = 1 if opponent.history[1] == C else 0
117+
opponent_second_d = 1 if opponent.history[1] == D else 0
118+
my_previous_c = 1 if self.history[-1] == C else 0
119+
my_previous_d = 0 if self.history[-1] == D else 0
120+
my_previous2_c = 1 if self.history[-2] == C else 0
121+
my_previous2_d = 1 if self.history[-2] == D else 0
122+
opponent_previous_c = 1 if opponent.history[-1] == C else 0
123+
opponent_previous_d = 1 if opponent.history[-1] == D else 0
124+
opponent_previous2_c = 1 if opponent.history[-2] == C else 0
125+
opponent_previous2_d = 1 if opponent.history[-2] == D else 0
126+
127+
# Remaining Features
128+
turns_remaining = self.match_attributes['length'] - len(self.history)
129+
total_opponent_c = opponent.history.count(C)
130+
total_opponent_d = opponent.history.count(D)
131+
total_self_c = self.history.count(C)
132+
total_self_d = self.history.count(D)
133+
134+
output = self.activate([
135+
opponent_first_c,
136+
opponent_first_d,
137+
opponent_second_c,
138+
opponent_second_d,
139+
my_previous_c,
140+
my_previous_d,
141+
my_previous2_c,
142+
my_previous2_d,
143+
opponent_previous_c,
144+
opponent_previous_d,
145+
opponent_previous2_c,
146+
opponent_previous2_d,
147+
total_opponent_c,
148+
total_opponent_d,
149+
total_self_c,
150+
total_self_d,
151+
turns_remaining
152+
])
153+
if output > 0:
154+
return C
155+
else:
156+
return D
157+
158+
159+
class EvolvedANN(ANN):
160+
"""
161+
A strategy based on a pre-trained neural network.
162+
163+
Names:
164+
165+
- EvolvedANN: : Original name by Martin Jones.
166+
"""
167+
168+
name = "EvolvedANN"
169+
170+
@init_args
171+
def __init__(self):
172+
self.classifier['makes_use_of'] = set(['length'])
173+
input_values = 17
174+
hidden_layer_size = 10
175+
176+
weights = [0.19789658035994948, -5575.476236516673, 0.1028948855131803, 0.7421752484224489,
177+
-16.286246197005298, 11708.007255945553, 0.01400184611448853, -33.39126355009626,
178+
-12.755203414662356, -32.92388754142929, 197.3517717772447, 108262.87038790248,
179+
-0.1084768512582505, 85.20738888799768, 723.9537664890132, -2.59453614458083,
180+
0.5599936275978272, 7.89217571665664, -48014.821440080384, -1.364025168184463,
181+
-1.062138244222801, 11153713.883580556, -59.58314524751318, 51278.916519524784,
182+
3196.528224457722, -4635.771421694692, -129.93354968926164, -0.7927383528469051,
183+
98.47779304649353, -81.19056440190543, 29.53082483602472, -48.16562780387682,
184+
49.40755170297665, 288.3295763937912, -68.38780651250116, -167.64039570334904,
185+
-0.1576073061122998, 160.6846658333963, 34.55451693336857, -0.08213997499783675,
186+
-4.802560347075611, -1.4042000430302104, -0.9832145174590058, 0.008705149387813573,
187+
14.041842191255089, 0.05395665905821821, -0.13856885306885558, 5.311455433711278,
188+
-5.835498171845142, 0.00010294700612334848, 26.42528200366623, 33.690839945794785,
189+
7.931017950666591, -0.00037662122944226125, 59.295075951374606, -0.15888507169191035,
190+
3.670332254391659, 789.6230735057893, -0.7367125124436135, -198.44119280589902,
191+
537.9939493545736, -287.54344903637207, 1759.5455359353778, -18.48997020629342,
192+
-8426184.81603275, -82.36805426730088, 1144.1032034358543, 15635.402592538396,
193+
3095.643889329041, 2332.107673930774, -0.5601648316602144, 101.98300711150003,
194+
-7387.135294747112, -4241.004613717573, 3.06175607282536e-05, -35122.894421260884,
195+
-38591.45572476855, -0.16081285130591272, -29608.73087879185, 122.47563639056185,
196+
6.381946054740736, -0.8978628581801188, 17658.47647781355, -0.011719257684286711,
197+
0.10734295104044986, -378.35448968529494, 225.06912279045062, -351.12326495980847,
198+
-1.927322672845826, 0.0014584395475859544, -8.629826916169318, 22.43281153854352,
199+
87.10895591188721, -0.22253937914423294, -233.06796470563208, -620.4917481128365,
200+
-1.8253699204909606,-0.0030318160426064467, -77.25818476745101, -2057.311059352977,
201+
3.764204074005541, -47.47629147374066, 233.16096124330778, -160721.96744375565,
202+
-278292.9688140893, -2.093640525920404, -142886.66171202937, 53.64449245132945,
203+
12.5162147724691, -207.75462390139955, 132167.659160016, 21.197418541051732,
204+
83979.45623573882, -49.47558832987566, 0.05242625398046057, -842.1484416713075,
205+
-0.1581049310461208, 2359.2124343564096, 1170.5147830681053, -847999.9145843558,
206+
-0.8053911061885284, -5363.722820739466, 171.58433274294117, -724.7468082647013,
207+
2500359.853524033, 1595.3955511798079, -4.254009123616706, -171.12968391407912,
208+
-32.30624102753424, -558.412338112568, -234.29754199019308, -18768.34057250429,
209+
8338.792126484348, -0.18593140210730602, -7.758804964874875, 0.39736677884665267,
210+
547.0567585452197, 1.1969366369973133, 0.4861465741177498, -51.19319208716985,
211+
12.775051406025534, -0.09185362260212569, 22.08417300332754, -5090.013231748707,
212+
-0.814394991797045, 1.1534025840023847, 8.390439959276764, -0.02227253403481858,
213+
0.14162040507921927, -0.011508263843203926, 0.22372493104861083, 0.7754713610627112,
214+
0.1044033140236981, -4.377055307648915, -41.898221495326574, -18656.755601828827,
215+
-134.56719406539244, -2405.8148785743474, 16864.049985157206, -0.5124682025216784,
216+
14521.069005125159, -10.740782200739309, 18756.807715014013, -1723.9353962656946,
217+
87029.99828299093, 5.7383786020894195e-05, 4762.960401619296, 0.7331769713238158,
218+
-308.5673034493341, 85.29725765515369, 0.4268843538235295, -0.17788805472511407,
219+
-1.1727033611646802, 7578.6822604990175, 0.5124673187864222, 0.1595627909684813,
220+
-145.93742731401096, -2954.234440189563, 0.009672881359732015, 106.4646644917487,
221+
-0.050606976105730346, 2.3904047264403596, -4.987645640997455, -43.22984692765006,
222+
-36.177108409134966, -0.3812547430698569, -2959.4921368963633, -1.8635802741029985,
223+
0.020513128847167047, -0.9179124323385958]
224+
225+
(i2h, h2o, bias) = split_weights(
226+
weights,
227+
input_values,
228+
hidden_layer_size
229+
)
230+
ANN.__init__(self, i2h, h2o, bias)

axelrod/tests/unit/test_ann.py

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
"""Test for the Adaptive strategy."""
2+
3+
import axelrod
4+
5+
from .test_player import TestHeadsUp, TestPlayer
6+
7+
C, D = axelrod.Actions.C, axelrod.Actions.D
8+
9+
10+
class TestEvolvedANN(TestPlayer):
11+
12+
name = "EvolvedANN"
13+
player = axelrod.EvolvedANN
14+
expected_classifier = {
15+
'memory_depth': float('inf'),
16+
'stochastic': False,
17+
'makes_use_of': set(["length"]),
18+
'long_run_time': False,
19+
'inspects_source': False,
20+
'manipulates_source': False,
21+
'manipulates_state': False
22+
}
23+
24+
def test_strategy(self):
25+
# Test initial play sequence
26+
self.first_play_test(C)
27+
28+
29+
class TestEvolvedANNvsCooperator(TestHeadsUp):
30+
def test_rounds(self):
31+
self.versus_test(axelrod.EvolvedANN(), axelrod.Cooperator(),
32+
[C, D, D, C, D], [C] * 5)
33+
34+
35+
class TestEvolvedANNvsDefector(TestHeadsUp):
36+
def test_rounds(self):
37+
self.versus_test(axelrod.EvolvedANN(), axelrod.Defector(),
38+
[C, D, D, D, D], [D] * 5)
39+
40+
class TestEvolvedANNvsTFT(TestHeadsUp):
41+
def test_rounds(self):
42+
self.versus_test(axelrod.EvolvedANN(), axelrod.TitForTat(),
43+
[C, D, D, C, C], [C, C, D, D, C] * 5)

docs/tutorials/advanced/classification_of_strategies.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ length of each match of the tournament::
8181
... }
8282
>>> strategies = axl.filtered_strategies(filterset)
8383
>>> len(strategies)
84-
5
84+
6
8585

8686
Note that in the filterset dictionary, the value for the 'makes_use_of' key
8787
must be a list. Here is how we might identify the number of strategies that use

0 commit comments

Comments
 (0)