Hello world for Advent of Code 2021
Second year - even less time π Let’s see…
Run the solution with python solution.py
print("Hello AoC 2021")
Sweep list
Data is read from the file 'input<two-digit-day>.txt' line by line. List compare for . Sliding window compare for *.
# see description.adoc
from pprint import pprint
def read_daily_input(filename):
''' Read lines from file with given input name
cast to daily required type and return list '''
with open(filename) as input_file:
local_list = input_file.readlines()
return_list = [int(item.strip()) for item in local_list]
return return_list
def preprocess_input(daily_list):
''' not needed today
'''
pass
def main():
daily_list = read_daily_input('input01.txt')
sum1 = 0
previous_item = -1
for item in daily_list:
if (previous_item >= 0):
if (previous_item < item):
sum1 += 1
previous_item = item
star1 = sum1
print(f"Result (*): {star1}")
sum2 = 0
previous_window = -1
for i in range(len(daily_list)-2):
current_window = sum(daily_list[i:i+3])
if (previous_window >= 0):
if (previous_window < current_window):
sum2 += 1
previous_window = current_window
star2 = sum2
print(f"Result (**): {star2}")
if __name__ == "__main__":
main()
Calculate coordinates
Data is read from the file 'input<two-digit-day>.txt' line by line. Simple tracking of depth and position for . Add aim and the corresponding change for depth calculation for *.
"Wer lesen kann, ist klar im Vorteil…" π (-→ Reading helps π)
# see description.adoc
from pprint import pprint
def read_daily_input(filename):
''' Read lines from file with given input name
cast to daily required type and return list '''
with open(filename) as input_file:
local_list = input_file.readlines()
return_list = [item.strip() for item in local_list]
return return_list
def preprocess_input(daily_list):
''' not needed today
'''
pass
def main():
daily_list = read_daily_input('input02.txt')
depth = 0
position = 0
for item in daily_list:
command, parameter = item.split()
parameter_input = int(parameter)
if (command == 'down'):
depth += parameter_input
elif (command == 'up'):
depth -= parameter_input
elif (command == 'forward'):
position += parameter_input
if (depth < 0):
print('jump!')
star1 = depth * position
print(f"Result (*): {star1}")
depth = 0
position = 0
aim = 0
for item in daily_list:
command, parameter = item.split()
parameter_input = int(parameter)
if (command == 'down'):
aim += parameter_input
elif (command == 'up'):
aim -= parameter_input
elif (command == 'forward'):
position += parameter_input
depth += aim*parameter_input
if (depth < 0):
print('jump, jump!')
star2 = depth * position
print(f"Result (**): {star2}")
if __name__ == "__main__":
main()
Check health of sub. : Get max_count('0'/'1') and min_count in transposed input list and multiply the two (decimal) numbers *: Filter input list from (recalculated) max/min_counts to single element and multiply
Data is read from the file 'input<two-digit-day>.txt' line by line. Transpose list. Get most common bit-status per (transposed) position. : Invert binary and multiply *: Filter list and iterate
Took me too long to clear out the algo… π
# see description.adoc
from pprint import pprint
def read_daily_input(filename):
''' Read lines from file with given input name
cast to daily required type and return list '''
with open(filename) as input_file:
local_list = input_file.readlines()
return_list = [item.strip() for item in local_list]
return return_list
def transpose_input(daily_list):
''' Transpose list for analysis
'''
item_length = len(daily_list[0])
pos_list = [''] * item_length;
for item in daily_list:
for i in range(item_length):
pos_list[i] = pos_list[i] + item[i]
return pos_list
def get_most_common_value(transposed_list):
''' Return binary number strings with most common value (0 or 1)
per transposed list index
'''
most_common = ""
number_length = len(transposed_list)
pos_count = [0] * number_length
max_count = len(transposed_list[0])
for i in range(number_length):
pos_count[i] = transposed_list[i].count('1')
most_common = most_common + ('1' if (pos_count[i] >= (max_count - pos_count[i])) else '0')
return most_common
def main():
daily_list = read_daily_input('input03.txt')
power_list = transpose_input(daily_list)
gamma = get_most_common_value(power_list)
epsilon = ''.join(['1' if i == '0' else '0'
for i in gamma])
print(gamma, epsilon)
star1 = int(gamma, base=2) * int(epsilon, base=2)
print(f"Result (*): {star1}")
co2_list = daily_list.copy()
i = 0
while ((len(co2_list) > 1) & (i < len(co2_list[0]))):
co2_transposed_list = transpose_input(co2_list)
gamma_co2 = get_most_common_value(co2_transposed_list)
co2_list = list(filter(lambda co2_value: co2_value[i] == gamma_co2[i], co2_list))
i += 1
oxy_list = daily_list.copy()
i = 0
while ((len(oxy_list) > 1) & (i < len(oxy_list[0]))):
oxy_transposed_list = transpose_input(oxy_list)
epsilon_oxy = ''.join(['1' if i == '0' else '0'
for i in get_most_common_value(oxy_transposed_list)])
oxy_list = list(filter(lambda oxy_value: oxy_value[i] == epsilon_oxy[i], oxy_list))
i += 1
print(co2_list, oxy_list)
star2 = int(co2_list[0], base=2) * int(oxy_list[0], base=2)
print(f"Result (**): {star2}")
if __name__ == "__main__":
main()
Play bingo with giant squid attached to sub *: Win (checksum from first board to bingo) *: Loose (checksum from last board to bingo) Checksum: All unmarked numbers of winning bingo field * last number to complete row/column
Data is read from the file 'input<two-digit-day>.txt' line by line and prepocessed into bingo_numbers and bingo_fields. Separate functions to mark a number in a field and to check for complete row/col and calculation of checksum. Marking is done by substracting 100 from field (max 99) and later checking for <0. Iterate trough numbers and fields. Note first winning board. Have index of winning boards, calculate checksum of all newly winning boards and note last one.
Got back somewhat into multi-dim lists and some list comprehension - although not everything worked as expected. Yet :)
# see description.adoc
from os import linesep
from pprint import pprint
def read_daily_input(filename):
''' Read lines from file with given input name
cast to daily required type and return list
'''
with open(filename) as input_file:
local_list = input_file.readlines()
local_list = [item.strip() for item in local_list]
bingo_numbers = [int(num) for num in local_list[0].split(',')]
bingo_fields = list()
bingo_field = list()
for item in local_list[2:]:
field_line = [int(num) for num in item.split()]
if (len(field_line) == 0):
bingo_fields.append(bingo_field)
bingo_field = list()
else:
bingo_field.append(field_line)
return bingo_numbers, bingo_fields
def transpose_list(input_list):
''' Transpose list for analysis
'''
item_length = len(input_list[0])
transposed_list = [[] for i in range(item_length)]
for item in input_list:
for i in range(item_length):
transposed_list[i].append(item[i])
return transposed_list
def check_bingo_field(bingo_field):
''' Check single bingo field for full row or full column of negative values
Returns sum of positive values (unmarked), else 0
'''
bingo = False
bingo_result = 0
bingo_columns = transpose_list(bingo_field)
for bingo_line in bingo_field+bingo_columns:
if (all(elem < 0 for elem in bingo_line)):
bingo = True
if bingo:
for bingo_line in bingo_field:
for elem in bingo_line:
bingo_result += elem if elem > 0 else 0
return bingo_result
def mark_bingo_field(bingo_field, new_number):
''' Marks numbers in bingo_field by subtracting 100 (max bingo value = 99)
Returns marked bingo_field
'''
for i_row in range(len(bingo_field)):
for i_col in range(len(bingo_field[i_row])):
if (bingo_field[i_row][i_col] == new_number):
bingo_field[i_row][i_col] -= 100
return bingo_field
def main():
bingo_numbers, bingo_fields = read_daily_input('input04.txt')
pprint(bingo_numbers)
pprint(bingo_fields)
star1 = 0
star2 = 0
have_won = [False] * len(bingo_fields)
for num in bingo_numbers:
for i, bingo_field in enumerate(bingo_fields):
bingo_field = mark_bingo_field(bingo_field, num)
checksum = check_bingo_field(bingo_field)
if (checksum > 0):
if (star1 == 0):
star1 = num * check_bingo_field(bingo_field)
print(num, checksum, star1)
if not have_won[i]:
star2 = num * check_bingo_field(bingo_field)
print(i, num, checksum, star2)
have_won[i] = True
print(f"Result (*): {star1}")
print(f"Result (**): {star2}")
if __name__ == "__main__":
main()
Avoid hydrothermal ventline clouds : Find hot-spots on grid with >1 lines crossing, consider only vertical & horizontal lines *: Find hot-spots including diagonal lines (45Β° only) Return number of hot-spots on 1000x1000 grid
Data is read from the file 'input<two-digit-day>.txt' line by line and prepocessed -→ list of list with two coordinates (set of x,y). : check if horizontal or vertical, then add +1 to grid. Increase hot-spot count when grid point equals 2. *: add second loop to additionally process horizontal lines and increase count accordingly.
Quite straightforward, just a few small traps wrt. processing order of hor/vert lines vs. diagonal lines
# see description.adoc
from os import linesep
from pprint import pprint
def read_daily_input(filename):
''' Read lines from file with given input name
cast to daily required type and return list
'''
with open(filename) as input_file:
local_list = input_file.readlines()
local_list = [item.strip() for item in local_list]
return_list = list()
for item in local_list:
start_str, stop_str = item.split("->")
start = tuple(int(elem) for elem in start_str.split(','))
stop = tuple(int(elem) for elem in stop_str.split(','))
return_list.append([start, stop])
return return_list
def transpose_list(input_list):
''' Transpose list for analysis
'''
item_length = len(input_list[0])
transposed_list = [[] for i in range(item_length)]
for item in input_list:
for i in range(item_length):
transposed_list[i].append(item[i])
return transposed_list
def generate_field(line_list):
''' Generate 2D field from coordinate list
star1: only horizontal & vertical lines are considered
'''
my_field = [[0 for i in range(1000)] for j in range(1000)]
mine_count1 = 0
mine_count2 = 0
for line in line_list:
x1, y1 = line[0]
x2, y2 = line[1]
if ((x1 == x2) | (y1 == y2)):
for x in range(min(x1, x2), max(x1, x2)+1):
for y in range(min(y1, y2), max(y1, y2)+1):
my_field[x][y] += 1
if (my_field[x][y] == 2):
mine_count1 += 1
mine_count2 = mine_count1
for line in line_list:
x1, y1 = line[0]
x2, y2 = line[1]
if ((x1 == x2) | (y1 == y2)):
continue
x = x1
dx = -1 if x1>x2 else 1
y = y1
dy = -1 if y1>y2 else 1
while(x!=(x2+dx)):
my_field[x][y] += 1
if (my_field[x][y] == 2):
mine_count2 += 1
x += dx
y += dy
return my_field, mine_count1, mine_count2
def main():
daily_list = read_daily_input('input05.txt')
line_field, star1, star2 = generate_field(daily_list)
print(f"Result (*): {star1}")
print(f"Result (**): {star2}")
if __name__ == "__main__":
main()
Calculate exp. growth of lanternfish : After 80 days *: After 256 days
Data is read from the file 'input<two-digit-day>.txt' line by line (one line today…) and prepocessed. Being wary of the mentioned exp. growth, I choose a list with bins for every counter state, i.e. 10 fish with timer "3" -→ list[3] = 10. Then one iteration was just to manage bin "0" (1x to "9", 1x added to "7") and shift afterwards all bins >1 down one bin. Glad I did for star 2 π
Exp. growth does not always have to be scary.
# see description.adoc
from pprint import pprint
def read_daily_input(filename):
''' Read lines from file with given input name
cast to daily required type and return list
'''
with open(filename) as input_file:
local_list = [item.strip() for item in input_file.readlines()]
numbers_list = [int(elem) for elem in local_list[0].split(',')]
return_list = [numbers_list.count(i) for i in range(10)]
return return_list
def iterate_one_day(fishcount_list):
''' Decrease counters by 1, spawn new 8's by 0
Return new list
'''
fishcount_list[9] = fishcount_list[0]
fishcount_list[7] += fishcount_list[0]
for i in range(1, len(fishcount_list)):
fishcount_list[i-1] = fishcount_list[i]
fishcount_list[9] = 0
return fishcount_list
def main():
daily_list = read_daily_input('input06.txt')
pprint(daily_list)
for i in range(80):
daily_list = iterate_one_day(daily_list)
print(i+1, daily_list)
star1 = sum(daily_list)
print(f"Result (*): {star1}")
for i in range(256-80):
daily_list = iterate_one_day(daily_list)
print(i+1, daily_list)
star2 = sum(daily_list)
print(f"Result (**): {star2}")
if __name__ == "__main__":
main()
Align crab subs horizontally at single position with minimum fuel to escape the whale. * Star1 (): Linear fuel consumption with distance * Star2 (*): Fuel consumption increases linearly with distance (1:1 + 2:2 + 3:3 + …)
Data is read from the file 'input<two-digit-day>.txt' line by line (one long line today…) and prepocessed. * Two functions: * First star to calculate the sum of the differences * Second star to calculate the sum of the sums of (difference range + 1), to account for increasing fuel need
Basic algebra: had to recheck the formula to sum first n integers :)
# see description.adoc
from pprint import pprint
def read_daily_input(filename):
''' Read lines from file with given input name
cast to daily required type and return list
'''
with open(filename) as input_file:
local_list = [item.strip() for item in input_file.readlines()]
return_list = [int(elem) for elem in local_list[0].split(',')]
return return_list
def align_on_position_lin_fuel(crab_positions, align_position):
''' Compute fuel needed to align crab's positions on align_position,
star1: linear fuel need with distance
Return amount of fuel (1 per position in-/decrement)
'''
max_pos = max(crab_positions)
if (align_position > max_pos):
return 0
fuel_needed = [abs(crab_positions[i]-align_position) for i in range(len(crab_positions))]
return sum(fuel_needed)
def align_on_position_inc_fuel(crab_positions, align_position):
''' Compute fuel needed to align crab's positions on align_position,
star2: growing fuel need per distance with higher distance (1:1 + 2:2 + 3:3 + ...)
Return amount of fuel (1 per position in-/decrement)
'''
max_pos = max(crab_positions)
if (align_position > max_pos):
return 0
fuel_needed = [sum(range(abs(crab_positions[i]-align_position)+1)) for i in range(len(crab_positions))]
return sum(fuel_needed)
def main():
daily_list = read_daily_input('input07.txt')
max_pos = max(daily_list)
min_fuel = sum(daily_list)
for i in range(max_pos):
pos_fuel = align_on_position_lin_fuel(daily_list, i)
if (pos_fuel == 0):
continue
if (pos_fuel < min_fuel):
min_fuel = pos_fuel
print(i, min_fuel)
star1 = min_fuel
print(f"Result (*): {star1}")
min_fuel = sum(daily_list)*sum(daily_list)
for i in range(max_pos):
pos_fuel = align_on_position_inc_fuel(daily_list, i)
if (pos_fuel == 0):
continue
if (pos_fuel < min_fuel):
min_fuel = pos_fuel
print(i, min_fuel)
star2 = min_fuel
print(f"Result (*): {star1}")
print(f"Result (**): {star2}")
if __name__ == "__main__":
main()
Interpret garbled seven segment display with two data structures (input | output) * Star1 (): Identify easy digits by counting only output digits with uniquely identifiable length (1, 4, 7, 8) * Star2 (*): Identify all digits and construct single output number from digits. Sum all output numbers
Data is read from the file 'input<two-digit-day>.txt' line by line and prepocessed into list of (input | output) lines * Star1: Just check output digits for length and count the uniquely identifiable digits * Star2: * Identify ambiguous digits by checking for containing segments from 'one' resp. segments from 'four' without 'one' ("crochet") * Construct string number output digit by digit * Return int(result)
Had to sort out the identification of digits on paper for lean solution. Long live the pencil! π
# see description.adoc
import io
from pprint import pprint
def read_daily_input(filename):
''' Read lines from file with given input name
cast to daily required type and return list
'''
with open(filename) as input_file:
local_list = [item.strip() for item in input_file.readlines()]
digit_list = [digit for digit in (elem.split() for elem in local_list)]
return digit_list
def identify_output_number(io_line):
''' Parse digits to identify the numbers
Use 'one' and ('four' minus 'one') as distinguishing feature for 5 and 6 active segments
Return output number as int
'''
len_line = [len(digit) for digit in io_line]
digit_presence = [len_line.count(i) for i in range(7)]
# print(digit_presence)
one = list(next(obj for obj in io_line if len(obj) == 2))
four = list(next(obj for obj in io_line if len(obj) == 4))
crochet = [seg for seg in four if seg not in one]
# print(one, four, crochet)
output = False
result = ''
for digit in io_line:
brightness = len(digit)
if (brightness == 1):
output = True
continue
if output:
if (brightness == 2):
result += '1'
if (brightness == 3):
result += '7'
if (brightness == 4):
result += '4'
if (brightness == 7):
result += '8'
if (brightness == 5): # 2, 3 or 5
if (all(seg in digit for seg in one)):
result += '3'
elif (all(seg in digit for seg in crochet)):
result += '5'
else:
result += '2'
if (brightness == 6): # 0, 6 or 9
if (not all(seg in digit for seg in crochet)):
result += '0'
elif (all(seg in digit for seg in one)):
result += '9'
else:
result += '6'
return int(result)
def main():
daily_list = read_daily_input('input08.txt')
easy_digits = 0
for line in daily_list:
output = False
for digit in line:
brightness = len(digit)
if (brightness == 1):
output = True
if output:
if ((brightness == 2) | (brightness == 3) | (brightness == 4) | (brightness == 7)):
easy_digits += 1
star1 = easy_digits
print(f"Result (*): {star1}")
sum_output = 0
for line in daily_list:
sum_output += identify_output_number(line)
star2 = sum_output
print(f"Result (**): {star2}")
if __name__ == "__main__":
main()
Determine 2D minima in cave floor (height: 0 .. 9)
Star1 (*): Consider adjacent (l, r, t, b, no diagonals) fields
Star2 (**): Fill minima up to height 8 and determine size (extension in positions) of basin. Multiply 3 largest basins.
Data is read from the file 'input<two-digit-day>.txt' line by line and prepocessed into 2d int list
Star1: Straightforward, check for lower adjacent field for every position. Just make sure to have the comparisons right π
Star2: Take recursive approach comparable to floodfill (thanks @rdmueller for the pointer in Slack π ). Don’t forget to mark visited positions…
No easy hacking with head full of booster aftereffects…
# see description.adoc
from pprint import pprint
def read_daily_input(filename):
''' Read lines from file with given input name
cast to daily required type and return list
'''
with open(filename) as input_file:
local_list = [item.strip() for item in input_file.readlines()]
return_list = [[int(digit) for digit in list(line)] for line in local_list]
return return_list
def check_adjacent_for_minimum(height_map, x, y):
''' Checks adjacent positions for lower values, consider boundary
x: row, y: col
Returns 0 if no minimum, else height value + 1
'''
if (x>=len(height_map)):
return 0
if (y>=len(height_map[0])):
return 0
min_value = True
nominal_value = height_map[x][y]
if (x!=0):
if (height_map[x-1][y] <= nominal_value):
min_value = False
if (x!=len(height_map)-1):
if (height_map[x+1][y] <= nominal_value):
min_value = False
if (y!=0):
if (height_map[x][y-1] <= nominal_value):
min_value = False
if (y!=len(height_map[0])-1):
if (height_map[x][y+1] <= nominal_value):
min_value = False
return (nominal_value+1) if min_value else 0
def fill_basin_recursive(height_map, min_x, min_y):
''' Get basin size for a minimum coordinate
Returns basin size (number of places)
'''
dx = len(height_map)
dy = len(height_map[0])
basin_size = 0
def fill(x,y):
if (height_map[x][y] == 9): # 9 = limit of basin
return
if (height_map[x][y] == -1): # -1 = already visited
return
nonlocal basin_size
basin_size += 1
height_map[x][y] = -1
if (x!=0):
fill(x-1, y)
if (x!=len(height_map)-1):
fill(x+1, y)
if (y!=0):
fill(x, y-1)
if (y!=len(height_map[0])-1):
fill(x, y+1)
fill(min_x,min_y)
return basin_size
def main():
daily_list = read_daily_input('input09.txt')
sum_mins = 0
basins = []
for x in range(len(daily_list)):
for y in range(len(daily_list[0])):
valley_value = check_adjacent_for_minimum(daily_list, x, y)
if (valley_value > 0):
sum_mins += valley_value
basins.append(fill_basin_recursive(daily_list, x, y))
star1 = sum_mins
print(f"Result (*): {star1}")
print(basins)
prod_basins = 1
for i in range(3):
max_basin = max(basins)
prod_basins *= max_basin
basins.remove(max_basin)
star2 = prod_basins
print(f"Result (**): {star2}")
if __name__ == "__main__":
main()
Navigation Subsystem on the fritz: Find corrupt data
Star1 (*): Find closing brackets (of four kinds ([{<>}])) with no opening counterpart (corrupt lines) and calculate score
Star2 (**): Discard corrupt lines and calculate scores for incomplete lines and take middle score of result list
Data is read from the file 'input<two-digit-day>.txt' line by line and prepocessed into list of strings.
Star1: Clean each line from completed pairs until line length stays constant. Then check for any remaining closing bracket and use that to calculate score
Star2: Use cleaned lines from Star1 and iterate from back to forth through chars to calculate score
Simple counting of brackets was not enough for part1, I had to switch to actually considering opening and closing brackets. I’m glad that removing full pairs worked rather well - also for part2.
But of course I fell once more for the "middle position" in a list, with the index starting with 0… π
# see description.adoc
import io
from pprint import pprint
def read_daily_input(filename):
''' Read lines from file with given input name
cast to daily required type and return list
'''
with open(filename) as input_file:
local_list = [item.strip() for item in input_file.readlines()]
return local_list
def parse_brackets(command_line):
''' parse bracket types and throw return upon incorrect closing type
Return values:
): 3 points.
]: 57 points.
}: 1197 points.
>: 25137 points.
else 0
'''
old_len = len(command_line)+1
while old_len > len(command_line):
old_len = len(command_line)
command_line = command_line.replace("()", "")
command_line = command_line.replace("[]", "")
command_line = command_line.replace("{}", "")
command_line = command_line.replace("<>", "")
bracket_pos = [command_line.find(c) for c in [')',']','}','>']]
bracket_pos = [999 if item == -1 else item for item in bracket_pos]
min_pos = min(bracket_pos)
if (all(elem == min_pos for elem in bracket_pos)):
return 0, command_line
if (bracket_pos.index(min_pos) == 0):
return 3, command_line
elif (bracket_pos.index(min_pos) == 1):
return 57, command_line
elif (bracket_pos.index(min_pos) == 2):
return 1197, command_line
elif (bracket_pos.index(min_pos) == 3):
return 25137, command_line
return 0, command_line
def count_brackets(command_line):
''' count bracket types from back to forth and calculate score
): 1 points.
]: 2 points.
}: 3 points.
>: 4 points.
'''
score = 0
for i in range(len(command_line)-1, -1, -1):
c = command_line[i]
if (c == '('):
score = score*5 + 1
elif (c == '['):
score = score*5 + 2
elif (c == '{'):
score = score*5 + 3
elif (c == '<'):
score = score*5 + 4
return score
def main():
daily_list = read_daily_input('input10.txt')
syntax_sum = 0
incomplete_lines = []
for line in daily_list:
syntax_value, parsed_line = parse_brackets(line)
if (syntax_value == 0):
incomplete_lines.append(parsed_line)
else:
syntax_sum += syntax_value
star1 = syntax_sum
print(f"Result (*): {star1}")
scores = []
for line in incomplete_lines:
scores.append(count_brackets(line))
scores.sort()
star2 = scores[round(len(scores)/2)-1]
print(f"Result (**): {star2}")
if __name__ == "__main__":
main()
Christmas lights had to be switched off for bioluminescently flashing dumbo octopus'. Navigate by the flashing from the squids.
Star1 (*): Squids gain 1 energy per step, flash at energy level >9 and also charge adjacent 8 squids, which might then also flash in single step. Calculate number of flashes in 100 steps.
Star2 (**): Iterate squid game further until all squids flash together first time in a single step.
Data is read from the file 'input<two-digit-day>.txt' line by line and prepocessed into list of ints.
Star1: Iterate single step. Small array, no need for recursion. Spread flash to adjacents and mark flashed as 0. Important: Stick to 0 level after flashing.
Star2: Continue iterations until first time flash count equals all squids.
Straightforward. Some hiccups like count step as 195 or 196, but all manageable with the test input :)
# see description.adoc
import io
from pprint import pprint
def read_daily_input(filename):
''' Read lines from file with given input name
cast to daily required type and return list
'''
with open(filename) as input_file:
local_list = [item.strip() for item in input_file.readlines()]
return_list = [[int(digit) for digit in line] for line in local_list]
return return_list
def do_single_step(energy_levels):
''' Iterate energy levels through single step
Return new energy_levels, number of flashes (energy levels >9), bool if flash count equals all squids
'''
flashes = 0
depth = len(energy_levels)
width = len(energy_levels[0])
energy_levels = [[energy+1 for energy in line] for line in energy_levels]
old_flashes = -1
while (flashes > old_flashes):
old_flashes = flashes
for x in range(depth):
for y in range(width):
if (energy_levels[x][y]>9):
flashes +=1
energy_levels[x][y] = 0
adjacents = [(x-1,y+1),(x,y+1),(x+1,y+1),(x-1,y),(x+1,y),(x-1,y-1),(x,y-1),(x+1,y-1)]
for adj in adjacents:
if ((0 <= adj[0] < depth) and (0 <= adj[1] < width)):
if (energy_levels[adj[0]][adj[1]] != 0):
energy_levels[adj[0]][adj[1]] += 1
return energy_levels, flashes, flashes == depth*width
def main():
daily_list = read_daily_input('input11.txt')
sum_flashes = 0
star2 = 0
for i in range(100):
daily_list, flash_count, is_sync = do_single_step(daily_list)
sum_flashes += flash_count
if (is_sync and (star2 == 0)):
star2 = i+1
star1 = sum_flashes
print(f"Result (*): {star1}")
if (star2 == 0):
i = 100
is_sync = False
while not is_sync:
daily_list, flash_count, is_sync = do_single_step(daily_list)
i += 1
star2 = i
print(f"Result (**): {star2}")
if __name__ == "__main__":
main()
Map cave system with small (lowercase) and large (highercase) caves
Star1 (*): How many distinct paths through most caves there are without visiting any smaller case twice?
Star2 (**): How many distinct paths through most caves there are without visiting A SINGLE smaller case twice?
Data is read from the file 'input<two-digit-day>.txt' line by line and prepocessed into list of caves and list of links.
Star1: Recursively run from cave through all cave’s links, starting with 'start', ending with 'end'. Discard small caves if already in path. Remove double paths and those not ending in 'end' (coming from spawning a new path for every link, as my other approaches did not run reliably through all options… π)
Star2: Repeat Star1, only this time allowing a small cave to spawn a new path twice, if no other small caves are already twice in path. Takes a while with my anti-optimized approach…
Got reminded, that recursive without practice and template can be a long series of pitfalls (especially in a cave system π). Plus once more: Read the task thoroughly, as my VS Code crashed while trying to run through all paths with any number of small caves allowed twice…
# see README.adoc
import io
from pprint import pprint
def read_daily_input(filename):
''' Read lines from file with given input name
cast to daily required type and return list
'''
with open(filename) as input_file:
local_list = [item.strip() for item in input_file.readlines()]
connection_list = [item.split('-') for item in local_list]
caves = set([item for sublist in connection_list for item in sublist])
connectors = dict()
for cave in caves:
for connection in connection_list:
if cave == 'end':
continue
if cave in connection:
if not (cave in connectors.keys()):
connectors[cave] = []
for cv in connection:
if ((cv != cave) and (cv != 'start')):
connectors[cave].append(cv)
return caves, connectors
def find_paths(caves, links, star = 1):
''' find number of paths from 'start' to 'end' by recursively iterating
star1: visit small caves (lowercase) only once
star2: visit single small cave twice
'''
paths = list()
def go_step(path, new_cave):
path.append(new_cave)
if new_cave == 'end':
return
for cave in links[new_cave]:
if (all(c.islower() for c in cave) and (cave in path)):
if (star == 1):
continue
lowies = [cv for cv in path if all(c.islower() for c in cv)]
if (any(path.count(cv)==2 for cv in lowies)):
continue
paths.append(path.copy())
go_step(paths[-1], cave)
paths.append([])
go_step(paths[-1], 'start')
paths = [p for p in paths if p[-1] == 'end']
paths = [p for i,p in enumerate(paths) if p not in paths[i+1:]]
# pprint(paths)
return paths
def main():
caves, links = read_daily_input('input12.txt')
star1 = len(find_paths(caves, links))
print(f"Result (*): {star1}")
print("Path finding for Star2 will take some time...")
star2 = len(find_paths(caves, links, 2))
print(f"Result (**): {star2}")
if __name__ == "__main__":
main()
Get code by setting dots on transparent paper and folding it.
Star1 (*): How many visible dots on sheet after first fold
Star2 (**): What 8 letters are visible after all folds
Data is read from the file 'input<two-digit-day>.txt' line by line and prepocessed into sheet with '1' for dot and '0' for transparent, as well as list of folds.
Star1: Iterate single fold and count visible dots.
Star2: Iterate all folds and display the eight letter from the dots. Be careful to move the right lines - move in both directions from the folding line. Otherwise the output will be illegible…
Took some time to find the error (garbled characters). Did not iterate from foldline first, but from the outside, leading to a pixel shift on some folds…
# see README.adoc
import math
from pprint import pprint
def read_daily_input(filename):
''' Read lines from file with given input name
cast to daily required type and return list
'''
with open(filename) as input_file:
local_list = [item.strip() for item in input_file.readlines()]
coordinates = [item.split(',') for item in local_list[:local_list.index('')]]
fold_instructions = [item.split()[2] for item in local_list[local_list.index('')+1:]]
return coordinates, fold_instructions
def preprocess_input(coordinates_list, fold_instructions):
''' Preprocess daily input
Put coordinates into sheet, parse fold_instructions
Return sheet with dots, fold lines
! BEWARE: 2d list 'sheet' is flipped & rotated vs. instructions
'''
coordinates = [[int(x), int(y)] for (x, y) in coordinates_list]
max_x = max([x for (x, __) in coordinates])
max_y = max([y for (__, y) in coordinates])
sheet = [[0 for y in range(max_y+1)] for x in range(max_x+1)]
for point in coordinates:
sheet[point[0]][point[1]] = 1
folds=[[axis, int(coord)] for (axis, coord) in (item.split('=') for item in fold_instructions)]
return sheet, folds
def transpose_list(any_list):
''' Transpose list for analysis
'''
return [[row[i] for row in any_list] for i in range(len(any_list[0]))]
def fold_sheet(sheet, fold_axis, fold_coord):
''' "Fold" sheet along axis on line coord.
superpose inverted second half with first half and discard second half
Return new sheet
! BEWARE: 2d list 'sheet' is flipped & rotated vs. instructions
'''
max_row = len(sheet)
max_col = len(sheet[0])
if fold_axis == 'y': # fold along col
for row in range(max_row):
for i, col in enumerate(range(fold_coord+1, max_col)):
sheet[row][fold_coord-1-i] = 1 if (sheet[row][fold_coord-1-i]+sheet[row][col] > 0) else 0
folded_sheet = [item[:fold_coord] for item in sheet]
return folded_sheet
else: # fold along row
for i, row in enumerate(range(fold_coord+1, max_row)):
for col in range(max_col):
sheet[fold_coord-1-i][col] = 1 if (sheet[fold_coord-1-i][col]+sheet[row][col] > 0) else 0
folded_sheet = [item for item in sheet[:fold_coord]]
return folded_sheet
def main():
coordinates, fold_instructions = read_daily_input('input13.txt')
sheet, folds = preprocess_input(coordinates, fold_instructions)
first_folded_sheet = fold_sheet(sheet, folds[0][0], folds[0][1])
star1 = sum([sum(row) for row in first_folded_sheet])
print(f"Result (*): {star1}")
fully_folded_sheet = sheet.copy()
for fold in folds:
fully_folded_sheet = fold_sheet(fully_folded_sheet, fold[0], fold[1])
star2_list = transpose_list(fully_folded_sheet)
for row in star2_list:
print(''.join(['β' if c == 1 else ' ' for c in row]))
star2 = 's. above'
print(f"Result (**): {star2}")
if __name__ == "__main__":
main()
Polymerization to protect the outer sub shell. Provided: initial polymer string and reactions (char pairs into which new char is inserted)
Star1 (*): 10 polymerization steps: highest element number - lowest element number
Star2 (**): 40 polymerization steps: highest element number - lowest element number
Data is read from the file 'input<two-digit-day>.txt' line by line and prepocessed into the polymer as well as a dict of reactions (e.g. 'AB': 'C').
Star1: Classic string action. 10 iterations work well. Easy to process and easy to count…
Star2: 40 iterations do not fit into RAM. New approach: Count and manage letter pairs. Worked well, except for own stupidity.
Had all afternoon that feeling, that I just need to find the little glitch, as test data worked, but not real input. But I did not test the 40cyc test data…
Only much too late found the error: no reset after the first 10 cycles to check for match with star1 -→ ran 50 cycles π€¦ββοΈ
# see README.adoc
import math
from pprint import pprint
def read_daily_input(filename):
''' Read lines from file with given input name
cast to daily required type and return list
'''
with open(filename) as input_file:
local_list = [item.strip() for item in input_file.readlines()]
initial_polymer = local_list[0]
reactions = {item.split('->')[0].strip():item.split('->')[1].strip() for item in local_list[2:]}
return initial_polymer, reactions
def single_step(polymer, reactions):
''' Iterate one polymerization step by inserting monomers
Return new polymer
'''
new_polymer = ''
for i, c in enumerate(polymer[:-1]):
new_polymer += polymer[i]
interface = polymer[i:i+2]
if (interface in reactions.keys()):
new_polymer += reactions[interface]
new_polymer += polymer[-1]
return(new_polymer)
def single_step_count(paircount, reactions):
''' Iterate one polymerization step by counting and adapting the pairs
Return new dict of pair count
'''
for key, n in paircount.copy().items():
paircount[key] -= n
middle = reactions[key]
left, right = key
paircount[left + middle] = n if (left + middle not in paircount.keys()) else paircount[left + middle]+n
paircount[middle + right] = n if (middle + right not in paircount.keys()) else paircount[middle+right]+n
return paircount
def count_pairs(initial_polymer, chars, polypairs):
''' Count pairs as if in polymer
Return max count - min count
'''
char_count = dict()
for c in chars:
char_count[c] = 0
for k in polypairs.keys():
if (c == k[0]):
char_count[c] += polypairs[k]
char_count[initial_polymer[-1]] += 1
return max(char_count.values()) - min(char_count.values())
def main():
polymer, reactions = read_daily_input('input14.txt')
new_polymer = polymer
for __ in range(10):
new_polymer = single_step(new_polymer, reactions)
chars = set([c for c in new_polymer])
char_count = dict()
for c in chars:
char_count[c] = new_polymer.count(c)
star1 = max(char_count.values()) - min(char_count.values())
print(f"Result (*): {star1}")
polypairs = dict()
for i in range(len(polymer)-1):
polypairs[polymer[i:i+2]] = 1 if (polymer[i:i+2] not in polypairs.keys()) else polypairs[polymer[i:i+2]] + 1
for __ in range(10):
polypairs = single_step_count(polypairs, reactions)
star1 = count_pairs(polymer, chars, polypairs)
print(f"Result (*) counting: {star1}")
for __ in range(30):
polypairs = single_step_count(polypairs, reactions)
star2 = count_pairs(polymer, chars, polypairs)
print(f"Result (**): {star2}")
if __name__ == "__main__":
main()
Another 2D cave with risk level per position, due to chiton covered cave walls
Star1 (*): What is the lowest risk level of all paths through cave
Star2 (**): Expand cave to 5x5 map with incremental risk levels. Get new path with lowest risk.
Data is read from the file 'input<two-digit-day>.txt' line by line and preprocessed into 2d list of risk levels. First approach was to aggregate first line into second line, as example had no complicated path. O naΓ―ve me… (It actually worked, just the result was wrong… π)
So with several hints from Slack and Reddit I looked up Dijkstra and implemented it. My first version worked for star1, too, but was considerably slower than the online example. By swapping some lines and adjusting some others it got considerably faster, though
Star1: Dijkstra version 1 (by "me")
Star2: Expand risk layer, and use optimized Dijkstra (inspired by others π€)
First Dijkstra implementation: Map risk layer to dict with (coord):risk and use accumulated path risk as priority for PriorityQueue. Store visited coords in dict (coord):accumulated_risk.
I actually learned that it is much faster to put the adjacents directly to the visited list than the coordinates fresh from the queue. Probably avoids several re-visits of coords until all adjacents have been popped from the queue…
# see README.adoc
import math
from pprint import pprint
from queue import PriorityQueue
def read_daily_input(filename):
''' Read lines from file with given input name
cast to daily required type and return list
'''
with open(filename) as input_file:
local_list = [item.strip() for item in input_file.readlines()]
return_list = [[int(c) for c in line] for line in local_list]
return return_list
def transpose_list(any_list):
''' Transpose list for analysis
'''
return [[row[i] for row in any_list] for i in range(len(any_list[0]))]
def eval_nextline(risk_layer, firstline = False):
''' Evaluate risk for all positions in second line
Return new risklayer
Does not work if optimal path has to go up a line...
'''
less_risk_layer = [row[:] for row in risk_layer]
if (len(less_risk_layer) == 1):
return less_risk_layer
if firstline:
for i in range(len(risk_layer[0])):
risk_factors = [sum(risk_layer[0][:j+1])+sum(risk_layer[1][j:i+1])-risk_layer[0][0] for j in range(i+1)]
# print(i, risk_factors)
less_risk_layer[1][i] = min(risk_factors)
else:
min_risks = [sum(risk_layer[0]) for __ in range(len(risk_layer[0]))]
for start in range(len(risk_layer[0])):
risk_factors = [0 for __ in range(len(risk_layer[0]))]
for i in range(len(risk_layer[0])):
if (i<start):
risk_factors[i] = risk_layer[0][start] + sum(risk_layer[1][i:start])
else:
risk_factors[i] = min([sum(risk_layer[0][start:j+1])+sum(risk_layer[1][j:i+1]) for j in range(start, i+1)])
if (risk_factors[i] < min_risks[i]):
min_risks[i] = risk_factors[i]
# print(i, risk_factors)
less_risk_layer[1] = min_risks
return less_risk_layer[1:]
def get_path(risk_layer):
''' First Djikstra approach: Create graph (dict) of risk layer and push connections to PriorityQueue.
Return lower right value
'''
risk_graph = {(x, y): r for x, line in enumerate(risk_layer) for y, r in enumerate(line)}
max_x = len(risk_layer)
max_y = len(risk_layer[0])
q = PriorityQueue()
q.put((0, (0, 0)))
visited = {(0, 0): 0}
while not q.empty():
_r, (_x, _y) = q.get()
for adj in {(-1, 0), (1, 0), (0, -1), (0, 1)}:
_nx = _x + adj[0]
_ny = _y + adj[1]
if ((_nx < 0) or (_nx >= max_x) or (_ny < 0) or (_ny >= max_y)):
continue
_nr = _r + risk_graph[(_nx, _ny)]
if (_nx, _ny) == (max_x - 1, max_y - 1):
return _nr
if (((_nx, _ny) in visited.keys()) and (visited[(_nx, _ny)] <= _nr)):
continue
visited[(_nx, _ny)] = _nr
q.put((_nr, (_nx, _ny)))
# print(visited)
def expand_risklayer(risk_layer):
''' Expand map 4x to the right and to the bottom for star2
Increase risk level on each expansion by 1
Returns expanded risklayer
'''
large_risklayer = list(risk_layer).copy()
for x_add in range(4):
chunk = [[((r+x_add)%9+1) for r in line] for line in risk_layer]
for j in range(len(large_risklayer.copy())):
large_risklayer[j] = large_risklayer[j] + chunk[j]
basic_risklayer = large_risklayer.copy()
for y_add in range(4):
for j, line in enumerate(basic_risklayer):
new_line = [((r+y_add)%9+1) for r in line]
large_risklayer.append(new_line)
# print("-------------------")
# for line in new_risklayer:
# print('.'.join(str(i) for i in line))
return large_risklayer
def main():
risk_layer = read_daily_input('input15.txt')
# risk_layer = read_daily_input('input_test.txt')
# for line in risk_layer:
# print('.'.join(str(i) for i in line))
star1 = get_path(risk_layer)
print(f"Result (*): {star1}")
huge_risklayer = expand_risklayer(risk_layer)
star2 = get_path(huge_risklayer)
print(f"Result (**): {star2}")
if __name__ == "__main__":
main()
Decode long hexadecimal packet with several sub-packets
Star1: Sum up version info from all packets and sub-packets.
Star2: Carry out encoded operations in packets and calculate final score of main packet.
Data is read from the file 'input<two-digit-day>.txt' line by line and preprocessed. Today single line and preprocessing is main task of star1…
Star1: Write parser to parse packet hierarchy and sum up version info.
Star2: Improve parser to carry out operations as well and calculate final score of main packet. As parser from Star1 produced already recursively a list of all packets, the aggregation operations were just a small addition. Still did unelegantly copy the parser functions and separately implemented the code execution… π
Getting better with recursive stuff. There are probably lots of libs out there to do the parsing with two lines of regex, but my hands-on approach worked, too, in the end. Part 2 was thus quite easy to add in the end, after no more duplicate packets were in the final packet list π
# see README.adoc
import math
from pprint import pprint
def read_daily_input(filename):
''' Read lines from file with given input name
cast to daily required type and return list
'''
with open(filename) as input_file:
local_list = [item.strip() for item in input_file.readlines()]
# local_list[0] = 'D2FE28'
# local_list[0] = '8A004A801A8002F478'
bin_list = [n for c in local_list[0] for n in format(int(c, 16), '04b')]
return bin_list
def parse_input(main_stream, packets = []):
''' Parse packet as list of bit-strings
Return list of packets
'''
def read_chunk(stream, n):
''' Reads number n of bits of stream
Returns value of bits and returns rest of stream
'''
return int(''.join(stream[:n]), 2), stream[n:]
def read_literal(stream):
''' Reads and splits sub-packets of 5 bit
Returns list of data numbers and rest of stream
'''
last = False
data = []
while not last:
data_packet = stream[:5]
if (data_packet[0] == '0'):
last = True
data.extend(data_packet[1:])
stream = stream[5:]
return int(''.join(data), 2), stream
def read_single_packet(stream):
''' Reads single packet
Return list of dict(s) and rest of stream
'''
new_packet = {'ver': None, 'type': None, 'op_l': None, 'data': None}
# print(stream)
ver, stream = read_chunk(stream, 3)
new_packet['ver'] = ver
type, stream = read_chunk(stream, 3)
new_packet['type'] = type
# print(ver, type)
if (type == 4):
data, stream = read_literal(stream)
new_packet['data'] = data
# print("new literal packet\n", new_packet)
return [new_packet], stream
else:
op_l, stream = read_chunk(stream, 1)
new_packet['op_l'] = op_l
# print(op_l)
if (op_l == 0):
bit_length, stream = read_chunk(stream, 15)
sub_stream = stream[:bit_length]
new_0packets = []
while ((len(sub_stream) > 10) and (sum([int(c) for c in sub_stream]) > 0)):
bit_packets, sub_stream = read_single_packet(sub_stream)
new_0packets.extend(bit_packets)
return ([new_packet] + new_0packets), stream[bit_length:]
else:
packet_count, stream = read_chunk(stream, 11)
new_1packets = []
# print("sub_packetcount:", packet_count)
while len(new_1packets) < packet_count:
counted_packets, stream = read_single_packet(stream)
new_1packets.extend(counted_packets)
# print("new sub packets\n", new_packets)
# print("new 1 packet\n", new_packets)
return ([new_packet] + new_1packets), stream
# print("main stream: \n", main_stream)
while ((len(main_stream) > 10) and (sum([int(c) for c in main_stream]) > 0)):
new_packets, main_stream = read_single_packet(main_stream)
packets.extend(new_packets)
# print("packets \n", packets)
return packets
def interpret_input(main_stream, packets = []):
''' Parse packet as list of bit-strings and interpret op packets
Return score
'''
def read_chunk(stream, n):
''' Reads number n of bits of stream
Returns value of bits and returns rest of stream
'''
return int(''.join(stream[:n]), 2), stream[n:]
def read_literal(stream):
''' Reads and splits sub-packets of 5 bit
Returns list of data numbers and rest of stream
'''
last = False
data = []
while not last:
data_packet = stream[:5]
if (data_packet[0] == '0'):
last = True
data.extend(data_packet[1:])
stream = stream[5:]
return int(''.join(data), 2), stream
def do_the_op(op, packets):
''' Carry out the operation from op code with data packets
Return result value
'''
if op == 0: # sum
return sum([p['data'] for p in packets])
if op == 1: # product
prod = 1
for p in packets:
prod = prod * p['data'] if p['data'] != None else prod
return prod
if op == 2: # min
return min([p['data'] for p in packets])
if op == 3: # max
return max([p['data'] for p in packets])
if op == 5: # p1 > p2 ? 1 : 0
return 1 if packets[0]['data'] > packets[1]['data'] else 0
if op == 6: # p1 < p2 ? 1 : 0
return 1 if packets[0]['data'] < packets[1]['data'] else 0
if op == 7: # p1 = p2 ? 1 : 0
return 1 if packets[0]['data'] == packets[1]['data'] else 0
def read_single_packet(stream):
''' Reads single packet
Return list of dict(s) and rest of stream
'''
new_packet = {'ver': None, 'type': None, 'op_l': None, 'data': None}
# print(stream)
ver, stream = read_chunk(stream, 3)
new_packet['ver'] = ver
type, stream = read_chunk(stream, 3)
new_packet['type'] = type
# print(ver, type)
if (type == 4):
data, stream = read_literal(stream)
new_packet['data'] = data
# print("new literal packet\n", new_packet)
return [new_packet], stream
else:
op_l, stream = read_chunk(stream, 1)
new_packet['op_l'] = op_l
# print(op_l)
if (op_l == 0):
bit_length, stream = read_chunk(stream, 15)
sub_stream = stream[:bit_length]
new_0packets = []
while ((len(sub_stream) > 10) and (sum([int(c) for c in sub_stream]) > 0)):
bit_packet, sub_stream = read_single_packet(sub_stream)
new_0packets.extend(bit_packet)
result = do_the_op(type, new_0packets)
new_packet['data'] = result
return [new_packet], stream[bit_length:]
else:
packet_count, stream = read_chunk(stream, 11)
new_1packets = []
# print("sub_packetcount:", packet_count)
while len(new_1packets) < packet_count:
counted_packet, stream = read_single_packet(stream)
new_1packets.extend(counted_packet)
# print("new sub packets\n", new_packets)
# print("new 1 packet\n", new_packets)
result = do_the_op(type, new_1packets)
new_packet['data'] = result
return [new_packet], stream
# print("main stream: \n", main_stream)
while ((len(main_stream) > 10) and (sum([int(c) for c in main_stream]) > 0)):
new_packet, main_stream = read_single_packet(main_stream)
packets.extend(new_packet)
# print("packets \n", packets)
return packets
def main():
main_packet = read_daily_input('input16.txt')
# print(''.join(main_packet))
packets = parse_input(main_packet.copy())
# pprint(packets)
star1 = sum([p['ver'] for p in packets])
print(f"Result (*): {star1}")
star2 = interpret_input(main_packet)[0]['data']
print(f"Result (**): {star2}")
if __name__ == "__main__":
main()
Shoot probe into target area under the influence of drag and gravity.
Star1 (*): What is the highest culmination point possible?
Star2 (**): How many start velocities are hitting the mark?
Data is read from the file 'input<two-digit-day>.txt' line by line and preprocessed into target_x and target_y ranges.
Star1: Determine min velocity_x to reach target. Iterate through large enough range to hit all possibilities. Return max height.
Star2: Use length of results list. Increase range of velocity_y to negative start values π
There’s always a range+1 initial value that might still hit the target π
# see README.adoc
import math
from pprint import pprint
def read_daily_input(filename):
''' Read lines from file with given input name
cast to daily required type and return list
'''
with open(filename) as input_file:
local_list = [item.strip() for item in input_file.readlines()]
target_area_range = [int(num.strip(', ')) for item in local_list[0].split()[2:4] for num in item.split("=")[1].split('..')]
x = target_area_range[:2]
y = target_area_range[2:]
return x, y
def check_target(pos, target_x, target_y):
''' Check if pos is in target_range
Return bool
'''
x, y = pos
return ((target_x[0] <= x <= target_x[1]) and (target_y[0] <= y <= target_y[1]))
def shoot_probe(velocity, target_x, target_y):
''' Shoot probe with initial velocity.
Return whether target region reached as max height or not as 0
Rules:
The probe's x position increases by its x velocity.
The probe's y position increases by its y velocity.
Due to drag, the probe's x velocity changes by 1 toward the value 0; that is, it decreases by 1 if it is greater than 0, increases by 1 if it is less than 0, or does not change if it is already 0.
Due to gravity, the probe's y velocity decreases by 1.
'''
x, y = (0, 0)
v_x, v_y = velocity
max_height = 0
while x <= max(target_x) and y >= min(target_y):
x += v_x
y += v_y
if (y > max_height):
max_height = y
if check_target((x, y), target_x, target_y):
return True, max_height
v_x = max(0, v_x - 1)
v_y -= 1
return False, 0
def main():
target_x, target_y = read_daily_input('input17.txt')
# target_x, target_y = read_daily_input('input_test.txt')
print('Target area: ', target_x, target_y)
n = 0
sum_num = 0
while sum_num < target_x[0]:
n += 1
sum_num = (n * (n + 1)) / 2
results = []
for v_x in range(n, max(target_x)+1):
for v_y in range((-20*n), 20*n):
mark, result = shoot_probe((v_x, v_y), target_x, target_y)
if (mark):
results.append(result)
star1 = max(results)
print(f"Result (*): {star1}")
star2 = len(results)
print(f"Result (**): {star2}")
if __name__ == "__main__":
main()
Do the math addition homework … in snailfish maths with nested pair numbers, exploding pairs if nesting gets too deep and splitting if numbers get higher than 9…
Star1: Evaluate (explode / split) addition of snailfish number series. Calculate "magnitude" of resulting expression.
Star2: Evaluate all homework snailfish number pairs and find highest magnitude
Data is read from the file 'input<two-digit-day>.txt' line by line and preprocessed.
Star1: Write parser for expression by eval() of string. Ugly extensive nested ifs/isinstances. Long bug hunt for rare case of double splits π.
Star2: Struggled with shallow copy effects, spoiling the main list. Eval string expression just prior to usage -→ another ugly hack…
Do not forget to return directly after recursive function call, else active part might get called twice…
Got reminded of the f* by reference handling of those deeply-nested lists
# see README.adoc
import math
import re
import copy
from pprint import pprint
def read_daily_input(filename):
''' Read lines from file with given input name
cast to daily required type and return list
'''
with open(filename) as input_file:
local_list = [item.strip() for item in input_file.readlines()]
return local_list
def eval_string(sno):
''' Evaluate directly the string
'''
if (re.search('[a-zA-Z]', sno) != None): # limit risks with eval() below
return sno
while not finished:
bracket_level = 0
last_num_index = 0
last_digit = ''
finished = True
print(sno)
for i, c in enumerate(sno):
if (c == '['):
bracket_level += 1
if (bracket_level == 5): # explode
close_i = sno[i:].find(']')
bracket = eval(sno[i, i+close_i+1])
print(bracket)
if (last_num_index != 0):
left_no = sno[:last_num_index] + str(int(last_digit)+int(bracket[0])) + '0'
else:
left_no = sno[:i] + '0'
sno = left_no + '0' + f"[{lefty},{righty}]" + sno[i+1:]
finished = False
break
if (c == ']'):
bracket_level -= 1
for i, c in enumerate(sno):
if c.isdigit():
if (last_num_index == i - 1): # split
value = int(last_digit + c)
lefty = str(math.floor(value/2))
righty = str(round(value/2))
sno = sno[:i-1] + f"[{lefty},{righty}]" + sno[i+1:]
finished = False
break
last_num_index = i
last_digit = c
def evaluate_expression(expression, verbose = True):
''' Evaluate single snailfish number for explode or split
Return new expression
'''
def add_to(element, addition, index = -1):
if isinstance(element, int):
element += addition
return element
else:
element[index] = add_to(element[index], addition, index)
return element
def check_splitfree(my_list, splitfree = True):
if not splitfree:
return my_list, splitfree
for i, item in enumerate(my_list):
if isinstance(item, list):
item, splitfree = check_splitfree(item, splitfree)
if not splitfree:
return my_list, splitfree
elif item > 9:
my_list[i] = [math.floor(float(item)/2), math.ceil(float(item)/2)]
return my_list, False
return my_list, splitfree
finishable = False
if verbose:
print(expression)
while not finishable:
explode_buffer = 0
finishable = True
for i1, item1 in enumerate(expression):
if isinstance(item1,list): # [[a, b], c]
for i2, item2 in enumerate(item1):
if isinstance(item2,list): # [[[a, b], c], d]
for i3, item3 in enumerate(item2):
if isinstance(item3,list): # [[[[a, b], c], d], e]
for i4, item4 in enumerate(item3):
if isinstance(item4, list) and finishable: # explode
finishable = False
if (i4 != 0):
item3[i4-1] = add_to(item3[i4-1], item4[0])
elif (i3 != 0):
item2[i3-1] = add_to(item2[i3-1], item4[0])
elif (i2 != 0):
item1[i2-1] = add_to(item1[i2-1], item4[0])
elif (i1 != 0):
expression[i1-1] = add_to(expression[i1-1], item4[0])
# print('explode1:', expression)
explode_buffer = item4[1]
item3[i4] = 0
elif not finishable:
item3[i4] = add_to(item3[i4], explode_buffer, 0)
explode_buffer = 0
break
elif not finishable:
item2[i3] = add_to(item2[i3], explode_buffer, 0)
explode_buffer = 0
break
elif not finishable:
item1[i2] = add_to(item1[i2], explode_buffer, 0)
explode_buffer = 0
break
elif not finishable:
expression[i1] = add_to(expression[i1], explode_buffer, 0)
explode_buffer = 0
break
if verbose and not finishable:
print('explode2: ', expression)
if finishable:
expression, finishable = check_splitfree(expression, finishable)
# if max_value > 9:
# finishable = False
if verbose:
print('split: ', expression)
return expression
def get_magnitude(expression):
''' Calculate magnitude
'''
left_exp, right_exp = expression
lefty = 3 * left_exp if isinstance(left_exp, int) else 3 * get_magnitude(left_exp)
righty = 2 * right_exp if isinstance(right_exp, int) else 2 * get_magnitude(right_exp)
return lefty + righty
def main():
calc_sheet = read_daily_input('input18.txt')
# calc_sheet = ['[[[[2, [1,12]],4], 3], 6]']
# calc_sheet = ['[[[[0, [5, 8]], [[1, 7], [9, 6]]], [[4, [1, 2]], [[1, 4], 2]]],2]']
# pprint(calc_sheet)
calculus = []
for sno_line in calc_sheet:
if (re.search('[a-zA-Z]', sno_line) == None): # basic eval() protection
expression = list(eval(sno_line))
calculus.append(evaluate_expression(expression, False))
result = calculus[0]
for i, sno in enumerate(calculus[1:]):
# print([result] + [sno])
result = evaluate_expression([result] + [sno], False)
# print('---------------------------\nresult: ', result)
star1 = get_magnitude(result)
print(f"Result (*): {star1}")
mags = []
span = len(calc_sheet)
for i in range(span):
for j in range(i+1, span):
if (re.search('[a-zA-Z]', calc_sheet[i]) == None): # basic eval() protection
lefty = list(eval(calc_sheet[i]))
if (re.search('[a-zA-Z]', calc_sheet[j]) == None): # basic eval() protection
righty = list(eval(calc_sheet[j]))
input1 = [lefty] + [righty]
buffer1 = evaluate_expression(input1.copy(), False)
mags.append(get_magnitude(buffer1))
if (re.search('[a-zA-Z]', calc_sheet[i]) == None): # basic eval() protection
lefty = list(eval(calc_sheet[i]))
if (re.search('[a-zA-Z]', calc_sheet[j]) == None): # basic eval() protection
righty = list(eval(calc_sheet[j]))
input2 = [righty] + [lefty]
buffer2 = evaluate_expression(input2.copy(), False)
mags.append(get_magnitude(buffer2))
star2 = max(mags)
print(f"Result (**): {star2}")
if __name__ == "__main__":
main()
Improve 2-bit image with image processing (3x3 sliding window to determine position in image processing table)
Star1 (*): Amount of pixels after two iterations of image processing
Star2 (**): Amount of pixels after 50 iterations…
Data is read from the file 'input<two-digit-day>.txt' line by line and preprocessed into dict of bright pixels (1) and image_processor list. The catch (thanks to reddit!) was, that 9 unlit pixel generate a lit one and vice versa - on an infinite canvas… π. Two things follow:
Every uneven iteration leads to an infinite canvas of lit pixels → an even number of iterations is necessary to have a countable results.
By simply iterating the image processing with a growing canvas, the border starts to get ugly from unhandled border conditions and spoil the result.
The approach here is to "pump" the canvas: every uneven iteration grows (by 8 pixels on every side, empiric determination…) to generate a solid border of lit pixels, then every even iteration’s canvas gets reduced (by -4 pixels per side, same procedure…) to avoid the ugly border condition and end in a cleanly lit border.
Star1: Iterate manually twice
Star2: Further(!) iterate 48 times
Result canvas is plotted.
Same bug was found much faster this time, that iteration happend 52 times π
Eric can be mean with his puzzles. Reddit can be useful π
# see README.adoc
import math
from pprint import pprint
def read_daily_input(filename):
''' Read lines from file with given input name
cast to daily required type and return list
'''
with open(filename) as input_file:
local_list = [item.strip() for item in input_file.readlines()]
image_processor = list()
dots = set()
read_dots = False
for i, line in enumerate(local_list):
if line == '':
read_dots = True
line_shift = i + round((len(local_list) - i)/2)
if not read_dots:
image_processor.extend([1 if c == '#' else 0 for c in line])
else:
col_shift = round(len(line)/2)
dots.update({(i-line_shift, j-col_shift) for j, c in enumerate(line) if c == '#'})
return image_processor, dots
def process_raw(dots, image_processor, border, verbose = True):
''' Process one iteration of image
Returns new dots
'''
def get_niner(dots, x, y, verbose):
''' Get value of niner-block as bits
'''
niner = ['1' if (dx, dy) in dots else '0' for dx in range(x-1, x+2) for dy in range(y-1, y+2)]
if verbose:
print(''.join(niner), int(''.join(niner),2), image_processor[int(''.join(niner),2)])
return int(''.join(niner),2)
min_x = min([x for x, y in dots])
max_x = max([x for x, y in dots])
min_y = min([y for x, y in dots])
max_y = max([y for x, y in dots])
proc_dots = set()
if image_processor[get_niner(dots, 0, 0, verbose)] == 1:
proc_dots.add((0, 0))
for x in range(min_x-border, max_x+border+1):
for y in range(min_y-border, max_y+border+1):
if image_processor[get_niner(dots, x, y, verbose)] == 1:
proc_dots.add((x, y))
### Trying to fix catch by starting in center... no difference
# for dx in range(1, max_x+border):
# for dy in range(1, max_y+border):
# for x in range(-dx, dx):
# if image_processor[get_niner(dots, x, dy, verbose)] == 1:
# proc_dots.add((x, dy))
# if image_processor[get_niner(dots, x, -dy, verbose)] == 1:
# proc_dots.add((x, -dy))
# for y in range(-dy, dy):
# if image_processor[get_niner(dots, dx, y, verbose)] == 1:
# proc_dots.add((dx, y))
# if image_processor[get_niner(dots, -dx, y, verbose)] == 1:
# proc_dots.add((-dx, y))
if verbose:
plot_dots(proc_dots)
return proc_dots
def plot_dots(dots):
''' Print dots matrix
'''
print()
min_x = min([x for x, y in dots])
max_x = max([x for x, y in dots])
min_y = min([y for x, y in dots])
max_y = max([y for x, y in dots])
for x in range(min_x-1, max_x+2):
line = ['#' if (x, y) in dots else '.' for y in range(min_y-1, max_y+2)]
print(''.join(line))
print()
def main():
image_processor, dots = read_daily_input('input20.txt')
# image_processor, dots = read_daily_input('input_test.txt')
plot_dots(dots)
print('\n\n...processing...\n\n')
dots = process_raw(dots, image_processor, 8, False)
dots = process_raw(dots, image_processor, -4, False)
plot_dots(dots)
star1 = len(dots)
print(f"Result (*): {star1}")
for i in range(24):
dots = process_raw(dots, image_processor, 8, False)
dots = process_raw(dots, image_processor, -4, False)
plot_dots(dots)
star2 = len(dots)
print(f"Result (*): {star1}")
print(f"Result (**): {star2}")
if __name__ == "__main__":
main()