|
@ -19,15 +19,17 @@ DEFAULT_PROB_INSN_UNUSED = 0.1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def cost(orig_prgm, test_cases, outputs, prgm) -> Tuple[int, bool]: |
|
|
def cost(orig_prgm, test_cases, outputs, prgm) -> Tuple[int, bool]: |
|
|
c = prgm.perf() - orig_prgm.perf() |
|
|
|
|
|
eq = c == 0 |
|
|
|
|
|
# print(f"init cost: {c}") |
|
|
|
|
|
|
|
|
# Since each instruction executes in 4*k cycles (for some k), this can have |
|
|
|
|
|
# the undesirable effect of performance improvements being weighted much |
|
|
|
|
|
# higher than correctness. This hurts convergence pretty badly, so we scale |
|
|
|
|
|
# by 1/4 to compensate. |
|
|
|
|
|
perf = (prgm.perf() - orig_prgm.perf()) / 4.0 |
|
|
|
|
|
eq = 0 |
|
|
|
|
|
|
|
|
for test_case in test_cases: |
|
|
for test_case in test_cases: |
|
|
c += eq_on_testcase(orig_prgm, prgm, test_case, outputs) |
|
|
|
|
|
# print(f"cost after testcase: {c}") |
|
|
|
|
|
|
|
|
eq += eq_on_testcase(orig_prgm, prgm, test_case, outputs) |
|
|
|
|
|
|
|
|
return c, eq |
|
|
|
|
|
|
|
|
return perf + eq, eq == 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def optimize( |
|
|
def optimize( |
|
|