feat: days 1 2 3 done with duckdb
This commit is contained in:
69
solutions/01.sql
Normal file
69
solutions/01.sql
Normal file
@@ -0,0 +1,69 @@
|
||||
set preserve_insertion_order = true;
|
||||
|
||||
create or replace table day01_data as
|
||||
select operation,
|
||||
if(operation[1] == 'L', -1, 1) as multiplier,
|
||||
operation[2:]::int as add
|
||||
from read_csv(
|
||||
'inputs/01/input.txt',
|
||||
header = false,
|
||||
columns = { 'operation': 'VARCHAR' }
|
||||
);
|
||||
|
||||
create or replace table day01_test as
|
||||
select operation,
|
||||
if(operation[1] == 'L', -1, 1) as multiplier,
|
||||
operation[2:]::int as add
|
||||
from read_csv(
|
||||
'inputs/01/test.txt',
|
||||
header = false,
|
||||
columns = { 'operation': 'VARCHAR' }
|
||||
);
|
||||
|
||||
create or replace table day01_data_states as
|
||||
select
|
||||
rowid,
|
||||
operation,
|
||||
multiplier,
|
||||
add as raw_add,
|
||||
floor(add / 100)::int as turns,
|
||||
add - (turns * 100) as clean_add,
|
||||
50 + SUM(clean_add * multiplier) OVER (
|
||||
ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
|
||||
) as state
|
||||
from day01_data
|
||||
;
|
||||
|
||||
with
|
||||
states as (
|
||||
select
|
||||
lag(abs(state + 100) % 100, 1, 50) over () as previous,
|
||||
operation,
|
||||
turns,
|
||||
abs(state + 100) % 100 as state
|
||||
from day01_data_states
|
||||
)
|
||||
select count(1) as part1
|
||||
from states
|
||||
where state == 0;
|
||||
|
||||
--------------
|
||||
|
||||
create or replace table day01_part2 as
|
||||
select
|
||||
lag(state, 1, 50) over () as raw_previous,
|
||||
state as raw_current,
|
||||
operation,
|
||||
raw_previous % 100 == 0 as went_from_zero,
|
||||
floor(raw_previous / 100)::int as prev_hundred,
|
||||
floor(raw_current / 100)::int as current_hundred,
|
||||
if(not went_from_zero and prev_hundred != current_hundred, 1, NULL) as hundred_change,
|
||||
if((state + 100) % 100 == 0, 1, NULL) as exact_0,
|
||||
turns,
|
||||
turns + coalesce(exact_0, hundred_change, 0) as total_zero_clicks
|
||||
from day01_data_states;
|
||||
|
||||
select
|
||||
sum(total_zero_clicks) as part2
|
||||
from day01_part2;
|
||||
|
||||
49
solutions/02.sql
Normal file
49
solutions/02.sql
Normal file
@@ -0,0 +1,49 @@
|
||||
create or replace table day02_data as
|
||||
with input1 as (
|
||||
select unnest(split(column0, ',')) as ranges
|
||||
from read_csv('inputs/02/input.txt', header=false, delim='\n'))
|
||||
select
|
||||
split(ranges, '-')[1]::long as range_start,
|
||||
split(ranges, '-')[2]::long as range_end
|
||||
from input1;
|
||||
|
||||
create or replace table day02_test as
|
||||
with input1 as (
|
||||
select unnest(split(column0, ',')) as ranges
|
||||
from read_csv('inputs/02/test.txt', header=false, delim='\n'))
|
||||
select
|
||||
split(ranges, '-')[1]::long as range_start,
|
||||
split(ranges, '-')[2]::long as range_end
|
||||
from input1;
|
||||
|
||||
create or replace table day02_product_list as
|
||||
select unnest(generate_series(range_start, range_end))::string as product_id
|
||||
from day02_data;
|
||||
|
||||
select sum(product_id::long) as part1
|
||||
from day02_product_list
|
||||
where LENGTH(product_id) % 2 == 0
|
||||
and product_id[:(LENGTH(product_id)/2)] == product_id[(LENGTH(product_id)/2)+1:];
|
||||
|
||||
with split_lengths as (
|
||||
select product_id,
|
||||
length(product_id) as len,
|
||||
(
|
||||
select array_agg(l)
|
||||
from generate_series(1, length(product_id)-1) as t(l)
|
||||
where length(product_id) % l == 0
|
||||
) as lengths
|
||||
from day02_product_list
|
||||
),
|
||||
with_chunks as (
|
||||
select product_id, chunk_size,
|
||||
(
|
||||
select array_agg(substring(product_id, (j-1)*chunk_size+1, chunk_size))
|
||||
from generate_series(1, ceil(length(product_id)/chunk_size)::int) as u(j)
|
||||
) as chunks
|
||||
FROM split_lengths, unnest(lengths) as t(chunk_size)
|
||||
)
|
||||
select sum(distinct product_id::long) as part2
|
||||
from with_chunks
|
||||
where length(array_distinct(chunks)) = 1
|
||||
;
|
||||
69
solutions/03.sql
Normal file
69
solutions/03.sql
Normal file
@@ -0,0 +1,69 @@
|
||||
create or replace table day03_data as
|
||||
select bank
|
||||
from read_csv(
|
||||
'inputs/03/input.txt',
|
||||
header = false,
|
||||
columns = { 'bank': 'VARCHAR' }
|
||||
);
|
||||
create or replace table day03_test as
|
||||
select bank
|
||||
from read_csv(
|
||||
'inputs/03/test.txt',
|
||||
header = false,
|
||||
columns = { 'bank': 'VARCHAR' }
|
||||
);
|
||||
|
||||
with with_best as (
|
||||
select bank,
|
||||
coalesce((
|
||||
select max(digit),
|
||||
from generate_series(2, 9) as t(digit)
|
||||
where contains(bank[:-2], digit::string) is true
|
||||
), 1) as best
|
||||
from day03_test
|
||||
),
|
||||
with_best_pos as (
|
||||
select *,
|
||||
instr(bank, best::string) as best_pos,
|
||||
bank[best_pos+1:],
|
||||
coalesce((
|
||||
select max(digit)
|
||||
from generate_series(2, 9) as t(digit)
|
||||
where contains(bank[best_pos+1:], digit::string) is true
|
||||
), 1) as second_best
|
||||
from with_best
|
||||
)
|
||||
select sum(best * 10 + second_best) as part1
|
||||
from with_best_pos
|
||||
;
|
||||
|
||||
with recursive seq as (
|
||||
select
|
||||
bank,
|
||||
12 as step,
|
||||
'' as digits,
|
||||
0 as pos,
|
||||
bank[0:-12] as sub,
|
||||
from day03_data
|
||||
|
||||
union all
|
||||
|
||||
select
|
||||
s.bank,
|
||||
s.step - 1 as new_step,
|
||||
s.digits || max_digit::string as digits,
|
||||
s.pos+instr(s.sub, max_digit::string) as new_pos,
|
||||
s.bank[new_pos+1:-new_step] as sub,
|
||||
from seq s
|
||||
join lateral (
|
||||
select max(digit) as max_digit
|
||||
from generate_series(1,9) as t(digit)
|
||||
where contains(s.sub, digit::string)
|
||||
) as m
|
||||
on s.step > 0
|
||||
)
|
||||
select sum(digits::hugeint) as total
|
||||
from seq
|
||||
where step = 0
|
||||
;
|
||||
|
||||
Reference in New Issue
Block a user