This commit is contained in:
Bel LaPointe
2025-11-10 21:32:18 -07:00
parent 7a23fba2ae
commit 56d5f59daf
4 changed files with 711 additions and 1 deletions

View File

@@ -1,3 +1,263 @@
use clap::Parser;
use serde_yaml;
use std::io::Read;
fn main() {
println!("Hello, world!");
for file in Flags::new()
.expect("failed to flags")
.files()
.expect("failed to files")
.files
.iter()
{
file.reconcile_snapshot_changes().unwrap();
println!(
"{} => {:?}",
file.file,
file.events().unwrap().snapshot().unwrap()
);
}
}
#[derive(Debug, Parser)]
struct Flags {
#[arg(short = 'f', long = "path", default_value = "$PTTODO_FILE")]
path: String,
#[arg(short = 'a', long = "add")]
add: Option<String>,
#[arg(short = 'e', long = "edit", default_value = "false")]
edit: bool,
#[arg(short = 'd', long = "dry-run", default_value = "false")]
dry_run: bool,
}
impl Flags {
fn new() -> Result<Flags, String> {
let mut result = Flags::parse();
if result.path.get(..1) == Some("$") {
result.path = match std::env::var(result.path.get(1..).unwrap()) {
Ok(v) => Ok(v),
Err(msg) => Err(format!("'{}' unset: {}", result.path, msg)),
}?;
}
let _ = result.files()?;
Ok(result)
}
fn files(&self) -> Result<Files, String> {
let metadata = match std::fs::metadata(self.path.clone()) {
Ok(v) => Ok(v),
Err(msg) => Err(format!("failed to load {}: {}", self.path.clone(), msg)),
}?;
let files = match metadata.is_dir() {
false => Ok(vec![self.path.clone()]),
true => match std::fs::read_dir(self.path.clone()) {
Ok(paths) => Ok(paths
.filter(|x| x.is_ok())
.map(|x| x.unwrap())
.filter(|x| x.metadata().unwrap().is_file())
// TODO filter out hidden files
.map(|x| x.path().display().to_string())
.collect()),
Err(msg) => Err(format!("failed to read {}: {}", self.path.clone(), msg)),
},
}?;
Ok(Files::new(&files))
}
}
#[derive(Debug, Clone)]
struct Files {
files: Vec<File>,
}
impl Files {
fn new(files: &Vec<String>) -> Files {
Files {
files: files.into_iter().map(|x| File::new(x)).collect(),
}
}
}
#[derive(Debug, Clone)]
struct File {
file: String,
}
impl File {
fn new(file: &String) -> File {
File { file: file.clone() }
}
fn events(&self) -> Result<Events, String> {
Events::new(&self.file)
}
fn reconcile_snapshot_changes(&self) -> Result<(), String> {
let history = self.events()?.snapshot()?;
let cached = self.snapshot()?;
match history == cached {
true => Ok(()),
false => {
for task in history.iter() {
if !cached.contains(task) {
self.pop(task.clone())?;
}
}
for i in 0..cached.len() {
if !history.contains(&cached[i]) {
self.push(TaskAt {
task: cached[i].clone(),
at: i,
})?;
}
}
panic!("not impl")
}
}
}
fn snapshot(&self) -> Result<Vec<Task>, String> {
let mut r = match std::fs::File::open(self.file.clone()) {
Ok(f) => Ok(f),
Err(msg) => Err(format!("could not open {}: {}", &self.file, msg)),
}?;
let mut buff = String::new();
match r.read_to_string(&mut buff) {
Err(msg) => Err(format!("failed reading {}: {}", &self.file, msg)),
_ => Ok({}),
}?;
let mut result = vec![];
match serde_yaml::from_str::<Vec<serde_yaml::Value>>(&buff) {
Ok(v) => {
result.extend(v.iter().map(|x| Task(x.clone())));
Ok({})
}
Err(msg) => Err(format!("failed parsing {}: {}", &self.file, msg)),
}?;
Ok(result)
}
fn push(&self, task_at: TaskAt) -> Result<(), String> {
self.append(Delta {
pushed_at: vec![task_at],
popped: vec![],
})
}
fn pop(&self, task: Task) -> Result<(), String> {
self.append(Delta {
pushed_at: vec![],
popped: vec![task],
})
}
fn append(&self, delta: Delta) -> Result<(), String> {
use std::fs::OpenOptions;
let hostname = gethostname::gethostname();
let mut file = match OpenOptions::new().write(true).append(true).open(&self.file) {
Ok(f) => Ok(f),
Err(msg) => Err(format!(
"failed to open {} for appending: {}",
&self.file, msg
)),
}?;
panic!("not impl: {:?}", file)
}
}
#[derive(Debug, Clone)]
struct Delta {
pushed_at: Vec<TaskAt>,
popped: Vec<Task>,
}
#[derive(Debug, Clone)]
struct TaskAt {
task: Task,
at: usize,
}
#[derive(Debug, Clone, PartialEq)]
struct Task(serde_yaml::Value);
#[derive(Debug, Clone)]
struct Events(Vec<Delta>);
impl Events {
fn new(file: &String) -> Result<Events, String> {
let logs = match std::fs::read_dir(Self::dir(&file)) {
Ok(files) => Ok(files
.filter(|x| x.is_ok())
.map(|x| x.unwrap())
.filter(|x| x.metadata().unwrap().is_file())
.map(|x| x.path().display().to_string())
.filter(|x| x.starts_with(&Self::log_prefix(&file)))
.collect::<Vec<String>>()),
Err(msg) => Err(format!("failed to read dir {}: {}", Self::dir(&file), msg)),
}?;
let mut result = vec![];
for log in logs.iter() {
panic!("{:?}", log);
}
Ok(Events(result))
}
fn log_prefix(file: &String) -> String {
format!("{}/.{}.", Self::dir(&file), Self::basename(&file)).to_string()
}
fn dir(file: &String) -> String {
let path = std::path::Path::new(&file);
path.parent()
.expect("cannot get dirname")
.to_str()
.expect("cannot stringify dirname")
.to_string()
}
fn basename(file: &String) -> String {
let path = std::path::Path::new(&file);
path.file_name()
.expect("cannot get basename")
.to_str()
.expect("cannot stringify basename")
.to_string()
}
fn snapshot(&self) -> Result<Vec<Task>, String> {
let mut result = vec![];
for event in self.0.iter() {
for popped in event.popped.iter() {
for i in (0..result.len())
.map(|i| i as usize)
.filter(|i| result[*i] == *popped)
{
panic!("not impl")
}
}
for push in event.pushed_at.iter() {
result.insert(
if push.at < result.len() {
push.at
} else {
result.len()
},
push.task.clone(),
);
}
panic!("not impl: {:?}", result)
}
Ok(result)
}
}

65
pttodoest/src/testdata/root.yaml vendored Executable file
View File

@@ -0,0 +1,65 @@
- read; https://topicpartition.io/blog/postgres-pubsub-queue-benchmarks
- |
pglogical vs ha
# api.git#breel/keys-620-pglogical-always-set-cr/2-user-survives-cr
$ mise run pulsegres-new ^logical/toggl
- drive; VERIFY spoc posts daily summary w/ unresolved
- drive; VERIFY spoc refreshes summary w/ thread comment contianing 'refresh'
- 637; reconcile deploy if replicas wrong; https://github.com/renderinc/api/pull/26540/files
- https://linear.app/render-com/issue/KEYS-633/add-3-when-max-connections-overridden-for-3-superuser-connections
- https://linear.app/render-com/issue/KEYS-637/billing-resume-should-1-unsuspend-pg-in-cloudsql-2-unsuspend-pg-in-cr
- https://linear.app/render-com/issue/KEYS-638/pgoperator-generates-new-ha-patroni-cert-every-reconcile-no-matter
- pg; how2partition; https://renderinc.slack.com/archives/C0319NYCSSG/p1756357545556659?thread_ts=1756357467.613369&cid=C0319NYCSSG
- pitr; backup purge cronjob for PL types
- pg11 pgbackup doesnt write to envsetting mucked env key
- incident io; teach spocbotvr to read slacks
- userdb to internal; peer packages can use internal as userdb
- fcr; cannot pitr because pgbackrest doesnt know wal spans thus pgexporter and friends
cant know pitr works
- |
etcd statefulset of 1 (for no random podname, no conflict, k8s ensures pod replace)
patroni always
- maher; https://slab.render.com/posts/hopes-and-dreams-blegf8fx#hdsyt-valkey-bundle
- maher; shadow lizhi pm loops
- maher; get more interviewers
- maher; get concrete career and project plans so i can get promo in 2y; no manager
to advocate
- read; https://trychroma.com/engineering/wal3
- read; https://github.com/renderinc/dashboard/pull/8883
- read; https://litestream.io/getting-started/
- |
kr
to del gcloud old key
ie https://console.cloud.google.com/iam-admin/serviceaccounts/details/104206017956912104938/keys?hl=en&project=render-prod
- todo: blocked
subtasks:
- ""
- |
pitr
https://slab.render.com/posts/pitr-as-a-service-health-abvnqx11
more aggressive alert autotune backup cores
more aggressive alert on MOAR backup cores
create alert autotune archive-push cores
create alert MOAR archive-push cores
- cr; frontend
- cr; cli.git
- cr; public-api-schema.git; https://github.com/renderinc/public-api-schema/pull/407
STILL NEED EVENTS
- cr; website.git
- cr; changelog
- ops; pgproxy rate limits 50ps 100burst; https://github.com/renderinc/dbproxy/pull/91
- 2873; no conn patroni if upgradeInProgressWithoutHA; https://github.com/renderinc/api/pull/26328
- 2733; only EnvSettings; https://github.com/renderinc/api/pull/25322/files
- pg18; after cred rotation works, re enable e2e
- 655; pg18; pub api sch; https://github.com/renderinc/public-api-schema/pull/421
- 655; pg18; go generate pub api sch; https://github.com/renderinc/api/pull/26694
- 663; das; show status in /info; https://github.com/renderinc/dashboard/pull/9616
- 664; pg18; go gen terraform; https://github.com/renderinc/api/pull/26701
- 664; pg18; ga; push terraform.git#breel/keys-664-pg18
- 656; pg18; website; https://github.com/renderinc/website/pull/985/files
- 663; das; note disk cannot decrease even if autoscaled; https://github.com/renderinc/dashboard/pull/9621
- pulsegres; pls let me keep my test emails; https://github.com/renderinc/api/pull/26741
- pgup; restore view owner; https://github.com/renderinc/api/pull/26814
- pgup; resync if missing resync; https://github.com/renderinc/api/pull/26817
- pgup; replicas use $RESYNC; https://github.com/renderinc/api/pull/26878