inspire.ml 14.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
open Type;;

(*
let new_inspiration_search state =
  ()
;;
*)

let make_move_plan state p_ship eval dir =
  let position = Game.directional_offset_position state p_ship.entity.position dir in
  let mining = dir = Still in
  let happiness = eval state p_ship position mining in
  {
    plan_direction = dir;
    plan_position = position;
    mining = mining;
    happiness = happiness;
    plan_ship = p_ship;
  }
;;

let plan_is_inspired state p_ship position =
  let count = List.fold_left (fun acc o_ship ->
    if o_ship.entity.owner = p_ship.entity.owner then acc
    else (
      let o_pos = match o_ship.plan with
        | None -> o_ship.entity.position
        | Some plan -> plan.plan_position
      in
      let distance = Game.calculate_distance state position o_pos in
        if distance <= state.const.inspiration_radius then acc + 1 else acc
    )
  ) 0 p_ship.could_inspire in
  count > 1
;;

let plan_count_inspires state p_ship position =
  List.fold_left (fun acc o_ship ->
    if plan_is_inspired state o_ship position then acc + 1 else acc
  ) 0 p_ship.could_inspire
;;

let nearship_could_occupy state p_ship position =
  List.exists (fun o_ship -> 
    (p_ship.entity.owner != o_ship.entity.owner )
    && ((o_ship.entity.position = position) || Game.adjacent state o_ship.entity.position position)
  ) p_ship.could_inspire
;;

let nearship_at state p_ship position =
  List.exists (fun o_ship -> 
    (p_ship.entity.owner != o_ship.entity.owner)
    && (o_ship.entity.position = position)
  ) p_ship.could_inspire
;;

let plan_could_crash state p_ship position =
  nearship_could_occupy state p_ship position
;;

let plan_likely_crash state p_ship position =
  nearship_at state p_ship position
;;

let plan_crash_avoidance state p_ship = 
  if p_ship.assignment = Sabotage then 0. else 1.
;; (*FIXME*)

let get_plan_position p_ship =
  match p_ship.plan with 
    | None -> p_ship.entity.position
    | Some plan -> plan.plan_position
;;

let plan_could_crash_penalty state p_ship could_crash likely_crash crash_avoidance =
  if crash_avoidance = 0. then 0. else
  let danger_score = Iguana.inspired_danger_score state p_ship (get_plan_position p_ship) in
    if could_crash || likely_crash then 
      min (-1.) (-2. +. (2. *. danger_score)) 
    else 0. (*FIXME*)
;;

let plan_inspiring_ship state p_ship =
  let ppos = match p_ship.plan with 
    | None -> p_ship.entity.position
    | Some plan -> plan.plan_position
  in
  let nearby = List.filter (fun o_ship ->
    let opos = match o_ship.plan with
      | None -> o_ship.entity.position
      | Some plan -> p_ship.entity.position
    in
    Game.calculate_distance state ppos opos <= state.const.inspiration_radius
  ) p_ship.could_inspire in
  nearby
;;

let plan_inspires_penalty state p_ship position inspires =
  let inspires_collection = List.fold_left (fun acc o_ship ->
    let inspiring_o = plan_inspiring_ship state o_ship in
    let num_inspiring = List.length inspiring_o in
    let p_ship_is_inspiring = (List.exists (fun i -> i.entity.owner = p_ship.entity.owner && i.entity.id = p_ship.entity.id) inspiring_o) in
    let num_inspiring' = if p_ship_is_inspiring then num_inspiring - 1 else num_inspiring in
    let o_pos = get_plan_position o_ship in
    let distance = Game.calculate_distance state position o_pos in
        
    let position_would_inspire = num_inspiring' = 1 && (distance <= state.const.inspiration_radius) in
    acc +. if position_would_inspire then (
      let (row, col) = match o_ship.plan with
        | None -> o_ship.entity.position
        | Some plan -> plan.plan_position
      in
      let halite = state.map.(row).(col) in
      (float_of_int (halite * 2 / state.const.extract_ratio)) /. (float_of_int state.const.max_energy);
    ) else 0.
  ) 0. p_ship.could_inspire in
  0. -. inspires_collection (*float_of_int inspires (*FIXME*)*)
;;

let plan_inspired_bonus state p_ship position inspired =
  if inspired then (
    let row, col = position in
    let mul = 
      if p_ship.entity.position = p_ship.prev_position then (
        if position = p_ship.entity.position then 1. else 0.25
      )
      else (
        if position = p_ship.entity.position then 1.2 else 0.05
      )
    in
    let bonus = state.map.(row).(col) in
    float_of_int bonus *. 2.  *. mul /. (float_of_int state.const.max_energy)
  ) else 0. (*FIXME*)
;;

let mining_collect_bonus state p_ship position mining =
  let row, col = position in
  if mining then (state.map.(row).(col) / 4) else (
    let cr, cc = p_ship.entity.position in
      (state.map.(row).(col) - state.map.(cr).(cc)) / 4
  )
;;

let general_plan_insp_eval state p_ship eval position mining =
  let inspires = plan_count_inspires state p_ship position in
  let inspired = plan_is_inspired state p_ship position in
  let could_crash = plan_could_crash state p_ship position in
  let likely_crash = plan_likely_crash state p_ship position in
  let crash_avoidance = plan_crash_avoidance state p_ship in
  (*
  let likely_penalty = match likely_crash with
    | None -> 0.
    | Some likely -> plan_crash_penalty state p_ship likely
  in
  *) (*FIXME*)
  let crash_penalty = plan_could_crash_penalty state p_ship could_crash likely_crash crash_avoidance in
  (*if crash_penalty < 0. then Debug.debug "Plan could crash ";*)
  let inspires_penalty = plan_inspires_penalty state p_ship position inspires in
  let inspired_bonus = plan_inspired_bonus state p_ship position inspired in
  let dest_bonus = match eval with
    | None -> 0.
    | Some eval' ->
      eval' position
  in
  let result = dest_bonus +. (inspires_penalty +. if (crash_penalty >= 0.) then inspired_bonus else 0.) in
    (*let prow, pcol = position in*)
    (*Debug.debug (Printf.sprintf "dest_bonus = %f : crash_penalty = %f : inspires_penalty = %f : inspired_bonus = %f  " dest_bonus crash_penalty inspires_penalty inspired_bonus);*)
    (*Debug.debug (Printf.sprintf "inspired result for move to %d, %d was %f\n" prow pcol result);*)
    result
;;

(*
let sub_collect_plan_insp_eval state p_ship eval position mining =
  let inspires = plan_count_inspires state p_ship position in
  let inspired = plan_is_inspired state p_ship position in
  let could_crash = plan_could_crash state p_ship position in
  let likely_crash = plan_likely_crash state p_ship position in
  let crash_avoidance = plan_crash_avoidance state p_ship in
  (*
  let likely_penalty = match likely_crash with
    | None -> 0.
    | Some likely -> plan_crash_penalty state p_ship likely
  in
  *) (*FIXME*)
  let crash_penalty = plan_could_crash_penalty state p_ship could_crash likely_crash crash_avoidance in
  if crash_penalty < 0. then Debug.debug "Plan could crash ";
  let inspires_penalty = plan_inspires_penalty state p_ship position inspires in
  let inspired_bonus = plan_inspired_bonus state p_ship position inspired in
  let collect_bonus' = mining_collect_bonus state p_ship position mining in
  let collect_bonus = float_of_int (collect_bonus') /. (float_of_int state.const.max_energy) in
  let dest_bonus = match eval with
    | None -> 0.
    | Some eval' ->
      eval' position
  in
  let result = dest_bonus +. crash_penalty +. (inspires_penalty +. inspired_bonus +. (collect_bonus *. 1.)) in
    let prow, pcol = position in
    Debug.debug (Printf.sprintf "dest_bonus = %f : crash_penalty = %f : inspires_penalty = %f : inspired_bonus = %f : collect_bonus = %f : collect_bonus' = %d " dest_bonus crash_penalty inspires_penalty inspired_bonus collect_bonus collect_bonus');
    Debug.debug (Printf.sprintf "inspired result for move to %d, %d was %f\n" prow pcol result);
    result
;;
*)

let collect_plan_insp_eval state p_ship position mining =
  let dest_eval = Mole.collector_get_eval state p_ship in
  (*sub_collect_plan_insp_eval state p_ship dest_eval position mining*)
  general_plan_insp_eval state p_ship dest_eval position mining
;;

let sabotage_plan_insp_eval state p_ship position mining =
  let dest_eval = Mole.get_sabotage_eval state p_ship in
  (*sub_collect_plan_insp_eval state p_ship dest_eval position mining*)
  general_plan_insp_eval state p_ship dest_eval position mining
;;

let return_plan_insp_eval state p_ship position mining =
  let eval = Mole.returner_get_eval state p_ship in
  match eval with
  | None -> failwith "do not call for non-returner"
  | Some eval' -> eval' position
;;

let choose_insp_eval state p_ship =
  match p_ship.assignment with
  | Collect -> collect_plan_insp_eval
  | Return -> (*return_insp_eval*) return_plan_insp_eval (*FIXME*)
  | _ -> sabotage_plan_insp_eval (*collect_plan_insp_eval*) (*FIXME*)
;;

let get_collect_threshold state (row, col) =
  let ct' = List.nth state.persist.percentile_thresholds 3 in
  let base_dist = state.persist.dropoff_search.(row).(col) in
  max 1 (ct' - (ct' / (base_dist + 2)))
;;

let collect_plan_should_mine state p_ship =
  let collect_threshold = get_collect_threshold state p_ship.entity.position in
  let row, col = p_ship.entity.position in
  let reward = state.map.(row).(col) / 4 in
  (p_ship.entity.halite <= state.const.max_energy - reward / 2) 
  && (state.map.(row).(col) >= collect_threshold)
  && (p_ship.entity.halite <= (state.const.max_energy * 9 / 10))
  && (p_ship.contested_target = None)
;;

let raise_floor_mining_plan_happiness p_ship =
  List.iter (fun plan ->
    match plan.plan_direction with
    | Still ->
      let max_other_happiness = List.fold_left (fun acc plan' ->
        match plan'.plan_direction with
        | Still -> acc
        | _ ->
          max acc plan'.happiness
      ) 0. p_ship.possible_moves
      in
      plan.happiness <- max plan.happiness (max_other_happiness +. 0.0001)
    | _ -> ()
  ) p_ship.possible_moves;
;;

let choose_insp_should_mine p_ship =
  match p_ship.assignment with
  | Collect -> collect_plan_should_mine
  | Return -> (*return_insp_eval*) collect_plan_should_mine (*FIXME*)
  | _ -> (*sabotage_insp_eval*) collect_plan_should_mine (*FIXME*)
;;

let update_inspiration_plans state p_ship =

  let eval = choose_insp_eval state p_ship in
  let should_mine = choose_insp_should_mine p_ship in

  List.iter (fun plan ->
    let happiness = eval state p_ship plan.plan_position (plan.plan_position = p_ship.entity.position) in
      plan.happiness <- happiness;
      (*Debug.debug (Printf.sprintf "Happiness for ship %d owned by %d plan moving %s was %f\n" p_ship.entity.id p_ship.entity.owner (Debug.string_of_dir plan.plan_direction) plan.happiness);*)
  ) p_ship.possible_moves;

  if should_mine state p_ship then raise_floor_mining_plan_happiness p_ship;

  match p_ship.plan with
  | None -> failwith "p_ship must have plan here\n"
  | Some current_plan ->
    let best = List.fold_left (fun prev_best plan ->
      if prev_best.happiness > plan.happiness then prev_best else plan
    ) current_plan p_ship.possible_moves in

    p_ship.plan <- Some best;
;;

let init_inspiration_plans state =
  (*Debug.debug (Printf.sprintf "time elapsed before init_inspiration_plans = %f\n" (Game.time_seconds_elapsed_this_turn state));*)
  List.iter (fun p_ship ->
    let eval = choose_insp_eval state p_ship in
    let still_plan = make_move_plan state p_ship eval Still in
    let move_plans = if (Iguana.cannot_move state p_ship.entity) then [] else List.map (fun dir ->
      make_move_plan state p_ship eval dir
    ) Game.move_dirs in
    p_ship.plan <- Some still_plan;
    p_ship.possible_moves <- still_plan :: move_plans;
  ) state.persist.p_ships;
  (*Debug.debug (Printf.sprintf "time elapsed after init_inspiration_plans = %f\n" (Game.time_seconds_elapsed_this_turn state));*)
;;

let sorted_i_ship_list state =
  let ships = List.filter (fun p_ship -> p_ship.inspiration_relevant && not (p_ship.assignment = Construct)) state.persist.p_ships in
  let sorted = List.stable_sort (fun p_ship o_ship ->
    if p_ship.entity.owner = state.my_id 
    && (o_ship.entity.owner != state.my_id) then 
      1
    else if p_ship.entity.owner = o_ship.entity.owner then
      o_ship.entity.halite - p_ship.entity.halite (* high halite goes first *)
    else p_ship.entity.owner - o_ship.entity.owner
  ) ships in
    sorted
;;

(* FIXME this should use min and max, not if ... else if ... else *)
let plan_list_happiness_limits plans =
  List.fold_left (fun (prev_min, prev_max) plan ->
    if plan.happiness < prev_min then (plan.happiness, prev_max)
    else if plan.happiness > prev_max then (prev_min, plan.happiness)
    else (prev_min, prev_max)
  ) (max_float, min_float) plans
;;

let ships_plans_happiness_limits p_ships =
  List.fold_left (fun (prev_min, prev_max) p_ship ->
    let min_happiness, max_happiness = plan_list_happiness_limits p_ship.possible_moves in
    let new_min = min min_happiness prev_min in
    let new_max = max max_happiness prev_max in
      new_min, new_max
  ) (max_float, min_float) p_ships
;;

let convert_plans_to_desires state p_ships =
  (*let min_happiness, max_happiness = ships_plans_happiness_limits p_ships in*)
  List.iter (fun p_ship ->
    if p_ship.entity.owner = state.my_id then (
      let weighted = List.map (fun plan ->
        let weight = (*Iguana.normalise_float min_happiness max_happiness plan.happiness in*) plan.happiness in
        (plan.plan_direction, weight)
      ) p_ship.possible_moves in
      Iguana.add_weighted_desires state p_ship weighted 1.
    )
  ) p_ships
;;

let choose_inspiration_relevant_moves state =
  let continue = ref true in
  (*let insp_search = new_inspiration_search state in*)
  (* set a plan for all ships - stay where you are *)
  init_inspiration_plans state;
  let count = ref 0 in
  let i_ships = sorted_i_ship_list state in
  while (List.length i_ships > 0 && !continue && (Game.time_seconds_elapsed_this_turn state < 1.4)) do
    let index = !count mod (List.length i_ships) in
    let p_ship = List.nth i_ships index in
    if (p_ship.entity.owner = state.my_id) then (
      update_inspiration_plans state p_ship;
    );
    (*
    count := !count + 1;
    if !count >= List.length i_ships * 10 then continue := false (*FIXME*)
    *)

    (*ignore insp_search*)
    (* 
      Choose a ship (randomly? sequentially? 
        All of mine first then all of each opponent's)
      Evaluate possible moves, choose the happiest and update plan
      Update happiness of all ships in p_ship's could_inspire list

      After three passes, choose pairs of nearby aligned ships and update their plans together
    *)
  done;
  List.iter (fun p_ship ->
    let should_mine = choose_insp_should_mine p_ship in
    if should_mine state p_ship then raise_floor_mining_plan_happiness p_ship;
  ) i_ships;
  convert_plans_to_desires state i_ships;
  (* Add planned moves to intent map and note that they have decided their moves*)
;;