gameboy superoptimizer
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

75 lines
2.2 KiB

2 years ago
2 years ago
  1. from math import log
  2. from random import random
  3. from typing import List, Optional, Tuple
  4. from gbso.program.test_case import Output, TestCase, eq_on_testcase
  5. from gbso.program.mutate import mutate_program
  6. from gbso.program.program import Program
  7. EPSILON = 0.00001
  8. DEFAULT_NUM_ITERS = 1_000_000
  9. DEFAULT_PROB_OPCODE = 0.25
  10. DEFAULT_PROB_OPERAND = 0.25
  11. DEFAULT_PROB_SWAP = 0.25
  12. DEFAULT_PROB_INSN = 0.25
  13. DEFAULT_PROB_INSN_UNUSED = 0.1
  14. def cost(orig_prgm, test_cases, outputs, prgm) -> Tuple[int, bool]:
  15. c = prgm.perf() - orig_prgm.perf()
  16. eq = c == 0
  17. # print(f"init cost: {c}")
  18. for test_case in test_cases:
  19. c += eq_on_testcase(orig_prgm, prgm, test_case, outputs)
  20. # print(f"cost after testcase: {c}")
  21. return c, eq
  22. def optimize(
  23. target_prgm: Program,
  24. max_size: int,
  25. test_cases: List[TestCase],
  26. outputs: List[Output],
  27. beta: int = 0.5, # How far away in cost you are allowed to search
  28. init_prgm: Optional[Program] = None,
  29. num_iters: int = DEFAULT_NUM_ITERS,
  30. prob_opcode: float = DEFAULT_PROB_OPCODE,
  31. prob_operand: float = DEFAULT_PROB_OPERAND,
  32. prob_swap: float = DEFAULT_PROB_SWAP,
  33. prob_insn: float = DEFAULT_PROB_INSN,
  34. prob_insn_unused: float = DEFAULT_PROB_INSN_UNUSED,
  35. ) -> Program:
  36. padded_prgm = (init_prgm or target_prgm).pad(max_size)
  37. last_prgm = padded_prgm
  38. last_cost, _last_eq = cost(target_prgm, test_cases, outputs, last_prgm)
  39. best_prgm = target_prgm.pad(max_size)
  40. best_cost = 0
  41. num_candidates = 0
  42. for _ in range(num_iters):
  43. candidate_prgm = mutate_program(
  44. last_prgm, prob_opcode, prob_operand, prob_swap, prob_insn, prob_insn_unused
  45. )
  46. candidate_cost, candidate_eq = cost(
  47. target_prgm, test_cases, outputs, candidate_prgm
  48. )
  49. if candidate_cost < best_cost and candidate_eq:
  50. best_prgm = candidate_prgm
  51. best_cost = candidate_cost
  52. num_candidates += 1
  53. if candidate_cost < last_cost - log(random()) / beta:
  54. last_prgm = candidate_prgm
  55. last_cost = candidate_cost
  56. print(f"Optimization complete. Total candidates: {num_candidates}")
  57. return best_prgm