Claw  1.7.3
game_ai.tpp
Go to the documentation of this file.
1 /*
2  CLAW - a C++ Library Absolutely Wonderful
3 
4  CLAW is a free library without any particular aim but being useful to
5  anyone.
6 
7  Copyright (C) 2005 S├ębastien Angibaud
8  Copyright (C) 2005-2011 Julien Jorge
9 
10  This library is free software; you can redistribute it and/or
11  modify it under the terms of the GNU Lesser General Public
12  License as published by the Free Software Foundation; either
13  version 2.1 of the License, or (at your option) any later version.
14 
15  This library is distributed in the hope that it will be useful,
16  but WITHOUT ANY WARRANTY; without even the implied warranty of
17  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  Lesser General Public License for more details.
19 
20  You should have received a copy of the GNU Lesser General Public
21  License along with this library; if not, write to the Free Software
22  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 
24  contact: julien.jorge@gamned.org
25 */
31 #include <claw/max_vector.hpp>
32 
33 #include <cstdlib>
34 
35 //**************************** gamestate **************************************
36 
37 /*---------------------------------------------------------------------------*/
41 template<typename Action, typename Numeric>
43 {
44  // nothing to do
45 } // game_state::~game_state()
46 
47 /*---------------------------------------------------------------------------*/
51 template <typename Action, typename Numeric>
54 {
55  return s_min_score;
56 } // game_state::min_score()
57 
58 /*---------------------------------------------------------------------------*/
62 template <typename Action, typename Numeric>
65 {
66  return s_max_score;
67 } // game_state::max_score()
68 
69 /*---------------------------------------------------------------------------*/
74 template<typename Action, typename Numeric>
77 ( score score_val ) const
78 {
79  if ( s_max_score < score_val )
80  return s_max_score;
81  else if ( score_val < s_min_score )
82  return s_min_score;
83  else
84  return score_val;
85 } // game_state::fit()
86 
87 
88 //**************************** action_eval ************************************
89 
90 
91 /*---------------------------------------------------------------------------*/
97 template <typename Action, typename Numeric>
99 ( const Action& a, const Numeric& e)
100  : action(a), eval(e)
101 {
102 
103 } // action_eval::action_eval()
104 
105 /*---------------------------------------------------------------------------*/
110 template <typename Action, typename Numeric>
112  ( const action_eval& ae ) const
113 {
114  return eval < ae.eval;
115 } // action_eval::operator<()
116 
117 #if 0
118 /*---------------------------------------------------------------------------*/
123 template <typename Action, typename Numeric>
125  ( const action_eval& ae ) const
126 {
127  return eval == ae.eval;
128 } // action_eval::operator==()
129 #endif
130 
131 
132 
133 //********************************* min_max ***********************************
134 
135 
136 /*---------------------------------------------------------------------------*/
143 template<typename State>
146  ( int depth, const state& current_state, bool computer_turn ) const
147 {
148  score score_val;
149 
150  // we reached a final state or we are not allowed to search more.
151  if ( current_state.final() || (depth == 0) )
152  score_val = current_state.evaluate();
153  else
154  {
155  std::list<action> next_actions;
156  typename std::list<action>::const_iterator it;
157  state* new_state;
158 
159  // get all reachable states
160  current_state.next_actions( next_actions );
161 
162  if ( next_actions.empty() )
163  score_val = current_state.evaluate();
164  else if (computer_turn)
165  {
166  score_val = current_state.min_score();
167 
168  for (it = next_actions.begin(); it!=next_actions.end(); ++it)
169  {
170  new_state=static_cast<state*>(current_state.do_action(*it));
171 
172  // evaluate the action of the human player
173  score s = (*this)( depth-1, *new_state, false );
174 
175  // and keep the best action he can do.
176  if (s > score_val)
177  score_val = s;
178 
179  delete new_state;
180  }
181  }
182  else // human player's turn
183  {
184  score_val = current_state.max_score();
185 
186  for (it = next_actions.begin(); it!=next_actions.end(); ++it)
187  {
188  new_state=static_cast<state*>(current_state.do_action(*it));
189 
190  // evaluate the action of the computer player
191  score s = (*this)( depth-1, *new_state, true );
192 
193  // and keep the worst action he can do
194  if (s < score_val)
195  score_val = s;
196 
197  delete new_state;
198  }
199  }
200  }
201 
202  return score_val;
203 } // min_max::operator()
204 
205 
206 
207 
208 
209 //******************************** alpha_beta *********************************
210 
211 
212 /*---------------------------------------------------------------------------*/
219 template <typename State>
220 typename State::score claw::ai::game::alpha_beta<State>::operator()
221  ( int depth, const state& current_state, bool computer_turn ) const
222 {
223  return this->compute
224  ( depth, current_state, computer_turn, current_state.min_score(),
225  current_state.max_score() );
226 } // alpha_beta::operator()
227 
228 /*---------------------------------------------------------------------------*/
237 template<typename State>
240 ( int depth, const state& current_state, bool computer_turn, score alpha,
241  score beta ) const
242 {
243  score score_val;
244 
245  // we reached a final state or we are not allowed to search more.
246  if ( current_state.final() || (depth == 0) )
247  score_val = current_state.evaluate();
248  else
249  {
250  std::list<action> next_actions;
251  typename std::list<action>::const_iterator it;
252  State* new_state;
253 
254  // get all reachable states
255  current_state.next_actions( next_actions );
256 
257  if ( next_actions.empty() )
258  score_val = current_state.evaluate();
259  else if (computer_turn)
260  {
261  score_val = current_state.min_score();
262 
263  it = next_actions.begin();
264 
265  while ( it!=next_actions.end() && (score_val < beta) )
266  {
267  new_state=static_cast<state*>(current_state.do_action(*it));
268 
269  // evaluate the action of the human player
270  score s = compute
271  ( depth-1, *new_state, false, std::max(alpha, score_val), beta );
272 
273  // and keep the best action he can do.
274  if (s > score_val)
275  score_val = s;
276 
277  delete new_state;
278 
279  ++it;
280  }
281  }
282  else // human player's turn
283  {
284  score_val = current_state.max_score();
285 
286  it = next_actions.begin();
287 
288  while ( it!=next_actions.end() && (score_val > alpha) )
289  {
290  new_state=static_cast<state*>(current_state.do_action(*it));
291 
292  // evaluate the action of the computer player
293  score s = compute
294  ( depth-1, *new_state, true, alpha, std::min(beta, score_val) );
295 
296  // and keep the worst action he can do
297  if (s < score_val)
298  score_val = s;
299  ++it;
300 
301  delete new_state;
302  }
303  }
304  }
305 
306  return score_val;
307 } // alpha_beta::compute()
308 
309 
310 
311 
312 
313 //***************************** select_action *********************************
314 
315 
316 
317 
318 /*---------------------------------------------------------------------------*/
326 template<typename Method>
328  ( int depth, const state& current_state, action& new_action,
329  bool computer_turn ) const
330 {
331  std::list<action> l;
332  typename std::list<action>::iterator it;
333  score best_eval;
334  Method method;
335 
336  // get all reachable states
337  current_state.next_actions( l );
338  best_eval = current_state.min_score();
339 
340  for (it=l.begin(); it!=l.end(); ++it)
341  {
342  state* new_state;
343  score eval;
344 
345  // try and evaluate each action
346  new_state = static_cast<state*>(current_state.do_action(*it));
347  eval = method(depth-1, *new_state, !computer_turn);
348 
349  delete new_state;
350 
351  // we keep one of the best actions
352  if (eval > best_eval)
353  {
354  best_eval = eval;
355  new_action = *it;
356  }
357  }
358 } // select_action::operator()
359 
360 
361 //*************************** select_random_action ****************************
362 
370 template<typename Method>
372  ( int depth, const state& current_state, action& new_action,
373  bool computer_turn ) const
374 {
375  std::list<action> l;
376  typename std::list<action>::iterator it;
377  action_eval<action, score> eval( new_action, current_state.min_score() );
378  Method method;
380 
381  // get all reachable states
382  current_state.next_actions( l );
383 
384  for (it=l.begin(); it!=l.end(); ++it)
385  {
386  state* new_state;
387 
388  // try and evaluate each action
389  new_state = static_cast<state*>(current_state.do_action(*it));
390 
391  eval.action = *it;
392  eval.eval = method(depth-1, *new_state, !computer_turn);
393 
394  delete new_state;
395 
396  // keep the best actions.
397  events.add( eval );
398  }
399 
400  std::size_t i = (double)rand()/(RAND_MAX + 1) * events.get_v().size();
401  new_action = events.get_v()[i].action;
402 } // select_random_action::operator()
403