Compare commits
12 Commits
001712ed1c
...
5d2441ce0c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5d2441ce0c | ||
|
|
30d3875e09 | ||
|
|
52fdf79d65 | ||
|
|
06d1144c85 | ||
|
|
e3f0d816de | ||
|
|
bcd479ceae | ||
|
|
1dc9b6149c | ||
|
|
9242d41a0f | ||
|
|
a899241c28 | ||
|
|
07e2750db5 | ||
|
|
e48338ff0e | ||
|
|
c174181435 |
1
pttodoest/Cargo.lock
generated
1
pttodoest/Cargo.lock
generated
@@ -226,6 +226,7 @@ dependencies = [
|
|||||||
"clap",
|
"clap",
|
||||||
"gethostname",
|
"gethostname",
|
||||||
"json-patch",
|
"json-patch",
|
||||||
|
"jsonptr",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"serde_yaml",
|
"serde_yaml",
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ edition = "2024"
|
|||||||
clap = { version = "4.5.51", features = ["derive"] }
|
clap = { version = "4.5.51", features = ["derive"] }
|
||||||
gethostname = "1.1.0"
|
gethostname = "1.1.0"
|
||||||
json-patch = "4.1.0"
|
json-patch = "4.1.0"
|
||||||
|
jsonptr = "0.7.1"
|
||||||
serde = { version = "1.0.228", features = ["serde_derive"] }
|
serde = { version = "1.0.228", features = ["serde_derive"] }
|
||||||
serde_json = "1.0.145"
|
serde_json = "1.0.145"
|
||||||
serde_yaml = "0.9.34"
|
serde_yaml = "0.9.34"
|
||||||
|
|||||||
@@ -1,21 +1,40 @@
|
|||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
use serde_yaml;
|
use serde_yaml;
|
||||||
use std::io::{Read, Write};
|
use std::io::{BufRead, Read, Write};
|
||||||
use serde::{Serialize, Deserialize};
|
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
for file in Flags::new()
|
let flags = Flags::new().expect("failed to flags");
|
||||||
.expect("failed to flags")
|
let files = flags.files().expect("failed to files");
|
||||||
.files()
|
assert!(files.files.len() > 0, "no files");
|
||||||
.expect("failed to files")
|
|
||||||
.files
|
if !flags.dry_run {
|
||||||
.iter()
|
for file in files.files.iter() {
|
||||||
{
|
file.persist_stage()
|
||||||
file.reconcile_snapshot_changes().unwrap();
|
.expect("failed to persist staged changes to log file");
|
||||||
|
file.stage_persisted().expect("failed to stage log files");
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(add) = flags.add {
|
||||||
|
let patch: json_patch::PatchOperation =
|
||||||
|
json_patch::PatchOperation::Add(json_patch::AddOperation {
|
||||||
|
path: jsonptr::PointerBuf::parse("/-").expect("cannot create path to /-"),
|
||||||
|
value: serde_json::json!(add),
|
||||||
|
});
|
||||||
|
files.files[0]
|
||||||
|
.append(Delta::now(patch))
|
||||||
|
.expect("failed to add");
|
||||||
|
files.files[0]
|
||||||
|
.stage_persisted()
|
||||||
|
.expect("failed to stage added");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for file in files.files.iter() {
|
||||||
println!(
|
println!(
|
||||||
"{} => {:?}",
|
"{} => {}",
|
||||||
file.file,
|
file.file,
|
||||||
file.events().unwrap().snapshot().unwrap()
|
serde_yaml::to_string(&file.events().unwrap().snapshot().unwrap()).unwrap(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -80,8 +99,10 @@ struct Files {
|
|||||||
|
|
||||||
impl Files {
|
impl Files {
|
||||||
fn new(files: &Vec<String>) -> Files {
|
fn new(files: &Vec<String>) -> Files {
|
||||||
|
let mut files = files.clone();
|
||||||
|
files.sort();
|
||||||
Files {
|
Files {
|
||||||
files: files.into_iter().map(|x| File::new(x)).collect(),
|
files: files.into_iter().map(|x| File::new(&x)).collect(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -100,26 +121,37 @@ impl File {
|
|||||||
Events::new(&self.file)
|
Events::new(&self.file)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn stash_staged_changes(&self, stashed: Vec<Task>) -> Result<(), String> {
|
fn stage_persisted(&self) -> Result<(), String> {
|
||||||
let snapshot = serde_json::to_string(&stashed).unwrap();
|
let stage = self.events()?.snapshot()?;
|
||||||
let snapshot: serde_json::Value = serde_json::from_str(snapshot.as_str()).unwrap();
|
let plaintext = serde_yaml::to_string(&stage).unwrap();
|
||||||
|
let mut f = std::fs::File::create(&self.file).expect("failed to open file for writing");
|
||||||
|
writeln!(f, "{}", plaintext).expect("failed to write");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
let stage = self.snapshot()?;
|
fn persist_stage(&self) -> Result<(), String> {
|
||||||
|
let persisted = self.events()?.snapshot()?;
|
||||||
|
let snapshot = serde_json::to_string(&persisted).unwrap();
|
||||||
|
let snapshot = snapshot.as_str();
|
||||||
|
let snapshot: serde_json::Value = serde_json::from_str(&snapshot).unwrap();
|
||||||
|
|
||||||
|
let stage = self.stage()?;
|
||||||
let stage = serde_json::to_string(&stage).unwrap();
|
let stage = serde_json::to_string(&stage).unwrap();
|
||||||
let stage: serde_json::Value = serde_json::from_str(stage.as_str()).unwrap();
|
let stage: serde_json::Value = serde_json::from_str(stage.as_str()).unwrap();
|
||||||
|
|
||||||
let patches = json_patch::diff(&snapshot, &stage);
|
let patches = json_patch::diff(&snapshot, &stage);
|
||||||
let deltas: Vec<Delta> = patches.iter()
|
let deltas: Vec<Delta> = patches
|
||||||
|
.iter()
|
||||||
.map(|patch| patch.clone())
|
.map(|patch| patch.clone())
|
||||||
.map(|patch| Delta::now(patch.clone()))
|
.map(|patch| Delta::now(patch.clone()))
|
||||||
.collect();
|
.collect();
|
||||||
for delta in deltas.iter() {
|
for delta in deltas.iter() {
|
||||||
self.append(serde_json::to_string(delta).unwrap())?;
|
self.append(delta.clone())?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn snapshot(&self) -> Result<Vec<Task>, String> {
|
fn stage(&self) -> Result<Vec<Task>, String> {
|
||||||
let mut r = match std::fs::File::open(self.file.clone()) {
|
let mut r = match std::fs::File::open(self.file.clone()) {
|
||||||
Ok(f) => Ok(f),
|
Ok(f) => Ok(f),
|
||||||
Err(msg) => Err(format!("could not open {}: {}", &self.file, msg)),
|
Err(msg) => Err(format!("could not open {}: {}", &self.file, msg)),
|
||||||
@@ -146,17 +178,23 @@ impl File {
|
|||||||
fn append(&self, delta: Delta) -> Result<(), String> {
|
fn append(&self, delta: Delta) -> Result<(), String> {
|
||||||
use std::fs::OpenOptions;
|
use std::fs::OpenOptions;
|
||||||
let hostname = gethostname::gethostname();
|
let hostname = gethostname::gethostname();
|
||||||
let log = format!("{}{}", Events::log_prefix(&self.file), gethostname::gethostname().into_string().unwrap());
|
assert!(hostname.len() > 0, "empty hostname");
|
||||||
let mut file = match OpenOptions::new().write(true).append(true).open(&log) {
|
let log = format!(
|
||||||
|
"{}{}",
|
||||||
|
Events::log_prefix(&self.file),
|
||||||
|
hostname.into_string().unwrap()
|
||||||
|
);
|
||||||
|
let mut file = match OpenOptions::new()
|
||||||
|
.write(true)
|
||||||
|
.append(true)
|
||||||
|
.create(true)
|
||||||
|
.open(&log)
|
||||||
|
{
|
||||||
Ok(f) => Ok(f),
|
Ok(f) => Ok(f),
|
||||||
Err(msg) => Err(format!(
|
Err(msg) => Err(format!("failed to open {} for appending: {}", &log, msg)),
|
||||||
"failed to open {} for appending: {}",
|
|
||||||
&self.file, msg
|
|
||||||
)),
|
|
||||||
}?;
|
}?;
|
||||||
let line = serde_json::to_string(&delta).unwrap();
|
let line = serde_json::to_string(&delta).unwrap();
|
||||||
match
|
match writeln!(file, "{}", line) {
|
||||||
writeln!(file, "{}", line) {
|
|
||||||
Ok(_) => Ok(()),
|
Ok(_) => Ok(()),
|
||||||
Err(msg) => Err(format!("failed to append: {}", msg)),
|
Err(msg) => Err(format!("failed to append: {}", msg)),
|
||||||
}
|
}
|
||||||
@@ -164,22 +202,30 @@ impl File {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
struct Delta{
|
struct Delta {
|
||||||
ts: u64,
|
ts: u64,
|
||||||
patch: json_patch::PatchOperation,
|
patch: json_patch::PatchOperation,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Delta {
|
impl Delta {
|
||||||
fn new(patch: json_patch::PatchOperation, ts: u64) -> Delta {
|
fn new(patch: json_patch::PatchOperation, ts: u64) -> Delta {
|
||||||
Delta{
|
Delta {
|
||||||
patch: patch,
|
patch: patch,
|
||||||
ts: ts,
|
ts: ts,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn now(patch: json_patch::PatchOperation) -> Delta {
|
fn now(patch: json_patch::PatchOperation) -> Delta {
|
||||||
Self::new(patch, std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_secs().try_into().unwrap())
|
Self::new(
|
||||||
}
|
patch,
|
||||||
|
std::time::SystemTime::now()
|
||||||
|
.duration_since(std::time::UNIX_EPOCH)
|
||||||
|
.unwrap()
|
||||||
|
.as_secs()
|
||||||
|
.try_into()
|
||||||
|
.unwrap(),
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
@@ -201,10 +247,24 @@ impl Events {
|
|||||||
Err(msg) => Err(format!("failed to read dir {}: {}", Self::dir(&file), msg)),
|
Err(msg) => Err(format!("failed to read dir {}: {}", Self::dir(&file), msg)),
|
||||||
}?;
|
}?;
|
||||||
|
|
||||||
let mut result = vec![];
|
let mut result: Vec<Delta> = vec![];
|
||||||
for log in logs.iter() {
|
for log in logs.iter() {
|
||||||
panic!("{:?}", log);
|
match std::fs::File::open(&log) {
|
||||||
|
Ok(f) => {
|
||||||
|
for line in std::io::BufReader::new(f).lines() {
|
||||||
|
let line = line.unwrap();
|
||||||
|
let delta = match serde_json::from_str(&line) {
|
||||||
|
Ok(v) => Ok(v),
|
||||||
|
Err(msg) => Err(format!("failed to parse line {}: {}", &line, msg)),
|
||||||
|
}?;
|
||||||
|
result.push(delta);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Err(msg) => Err(format!("failed to read {}: {}", &log, msg)),
|
||||||
|
}?;
|
||||||
}
|
}
|
||||||
|
result.sort_by(|a, b| a.ts.cmp(&b.ts));
|
||||||
Ok(Events(result))
|
Ok(Events(result))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -231,10 +291,19 @@ impl Events {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn snapshot(&self) -> Result<Vec<Task>, String> {
|
fn snapshot(&self) -> Result<Vec<Task>, String> {
|
||||||
let mut result = vec![];
|
let mut result = serde_json::json!([]);
|
||||||
for event in self.0.iter() {
|
for event in self.0.iter() {
|
||||||
panic!("not impl: {:?}", event)
|
match json_patch::patch(&mut result, vec![event.patch.clone()].as_slice()) {
|
||||||
|
Ok(_) => Ok(()),
|
||||||
|
Err(msg) => Err(format!(
|
||||||
|
"failed to patch {} onto {}: {}",
|
||||||
|
&event.patch, &result, msg
|
||||||
|
)),
|
||||||
|
}?;
|
||||||
|
}
|
||||||
|
match serde_json::from_str(serde_json::to_string(&result).unwrap().as_str()) {
|
||||||
|
Ok(v) => Ok(v),
|
||||||
|
Err(msg) => Err(format!("failed turning patched into events: {}", msg)),
|
||||||
}
|
}
|
||||||
Ok(result)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
2
pttodoest/src/testdata/.root.yaml.Bels-MacBook-Pro.local
vendored
Normal file
2
pttodoest/src/testdata/.root.yaml.Bels-MacBook-Pro.local
vendored
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
{"ts":1762884455,"patch":{"op":"replace","path":"","value":["read; https://topicpartition.io/blog/postgres-pubsub-queue-benchmarks","pglogical vs ha\n\n# api.git#breel/keys-620-pglogical-always-set-cr/2-user-survives-cr\n$ mise run pulsegres-new ^logical/toggl\n","drive; VERIFY spoc posts daily summary w/ unresolved","drive; VERIFY spoc refreshes summary w/ thread comment contianing 'refresh'","637; reconcile deploy if replicas wrong; https://github.com/renderinc/api/pull/26540/files","https://linear.app/render-com/issue/KEYS-633/add-3-when-max-connections-overridden-for-3-superuser-connections","https://linear.app/render-com/issue/KEYS-637/billing-resume-should-1-unsuspend-pg-in-cloudsql-2-unsuspend-pg-in-cr","https://linear.app/render-com/issue/KEYS-638/pgoperator-generates-new-ha-patroni-cert-every-reconcile-no-matter","pg; how2partition; https://renderinc.slack.com/archives/C0319NYCSSG/p1756357545556659?thread_ts=1756357467.613369&cid=C0319NYCSSG","pitr; backup purge cronjob for PL types","pg11 pgbackup doesnt write to envsetting mucked env key","incident io; teach spocbotvr to read slacks","userdb to internal; peer packages can use internal as userdb","fcr; cannot pitr because pgbackrest doesnt know wal spans thus pgexporter and friends cant know pitr works","etcd statefulset of 1 (for no random podname, no conflict, k8s ensures pod replace)\npatroni always\n","maher; https://slab.render.com/posts/hopes-and-dreams-blegf8fx#hdsyt-valkey-bundle","maher; shadow lizhi pm loops","maher; get more interviewers","maher; get concrete career and project plans so i can get promo in 2y; no manager to advocate","read; https://trychroma.com/engineering/wal3","read; https://github.com/renderinc/dashboard/pull/8883","read; https://litestream.io/getting-started/","kr\nto del gcloud old key\nie https://console.cloud.google.com/iam-admin/serviceaccounts/details/104206017956912104938/keys?hl=en&project=render-prod\n",{"subtasks":["","pitr\nhttps://slab.render.com/posts/pitr-as-a-service-health-abvnqx11\nmore aggressive alert autotune backup cores\nmore aggressive alert on MOAR backup cores\ncreate alert autotune archive-push cores\ncreate alert MOAR archive-push cores\n","cr; frontend","cr; cli.git","cr; public-api-schema.git; https://github.com/renderinc/public-api-schema/pull/407 STILL NEED EVENTS","cr; website.git","cr; changelog","ops; pgproxy rate limits 50ps 100burst; https://github.com/renderinc/dbproxy/pull/91","2873; no conn patroni if upgradeInProgressWithoutHA; https://github.com/renderinc/api/pull/26328","2733; only EnvSettings; https://github.com/renderinc/api/pull/25322/files","pg18; after cred rotation works, re enable e2e","655; pg18; pub api sch; https://github.com/renderinc/public-api-schema/pull/421","655; pg18; go generate pub api sch; https://github.com/renderinc/api/pull/26694","663; das; show status in /info; https://github.com/renderinc/dashboard/pull/9616","664; pg18; go gen terraform; https://github.com/renderinc/api/pull/26701","664; pg18; ga; push terraform.git#breel/keys-664-pg18","656; pg18; website; https://github.com/renderinc/website/pull/985/files","663; das; note disk cannot decrease even if autoscaled; https://github.com/renderinc/dashboard/pull/9621","pulsegres; pls let me keep my test emails; https://github.com/renderinc/api/pull/26741","pgup; restore view owner; https://github.com/renderinc/api/pull/26814","pgup; resync if missing resync; https://github.com/renderinc/api/pull/26817","pgup; replicas use $RESYNC; https://github.com/renderinc/api/pull/26878"],"todo":"blocked"}]}}
|
||||||
|
{"ts":1762885026,"patch":{"op":"add","path":"/-","value":"hi"}}
|
||||||
17
pttodoest/src/testdata/root.yaml
vendored
17
pttodoest/src/testdata/root.yaml
vendored
@@ -15,16 +15,14 @@
|
|||||||
- pg11 pgbackup doesnt write to envsetting mucked env key
|
- pg11 pgbackup doesnt write to envsetting mucked env key
|
||||||
- incident io; teach spocbotvr to read slacks
|
- incident io; teach spocbotvr to read slacks
|
||||||
- userdb to internal; peer packages can use internal as userdb
|
- userdb to internal; peer packages can use internal as userdb
|
||||||
- fcr; cannot pitr because pgbackrest doesnt know wal spans thus pgexporter and friends
|
- fcr; cannot pitr because pgbackrest doesnt know wal spans thus pgexporter and friends cant know pitr works
|
||||||
cant know pitr works
|
|
||||||
- |
|
- |
|
||||||
etcd statefulset of 1 (for no random podname, no conflict, k8s ensures pod replace)
|
etcd statefulset of 1 (for no random podname, no conflict, k8s ensures pod replace)
|
||||||
patroni always
|
patroni always
|
||||||
- maher; https://slab.render.com/posts/hopes-and-dreams-blegf8fx#hdsyt-valkey-bundle
|
- maher; https://slab.render.com/posts/hopes-and-dreams-blegf8fx#hdsyt-valkey-bundle
|
||||||
- maher; shadow lizhi pm loops
|
- maher; shadow lizhi pm loops
|
||||||
- maher; get more interviewers
|
- maher; get more interviewers
|
||||||
- maher; get concrete career and project plans so i can get promo in 2y; no manager
|
- maher; get concrete career and project plans so i can get promo in 2y; no manager to advocate
|
||||||
to advocate
|
|
||||||
- read; https://trychroma.com/engineering/wal3
|
- read; https://trychroma.com/engineering/wal3
|
||||||
- read; https://github.com/renderinc/dashboard/pull/8883
|
- read; https://github.com/renderinc/dashboard/pull/8883
|
||||||
- read; https://litestream.io/getting-started/
|
- read; https://litestream.io/getting-started/
|
||||||
@@ -32,9 +30,8 @@
|
|||||||
kr
|
kr
|
||||||
to del gcloud old key
|
to del gcloud old key
|
||||||
ie https://console.cloud.google.com/iam-admin/serviceaccounts/details/104206017956912104938/keys?hl=en&project=render-prod
|
ie https://console.cloud.google.com/iam-admin/serviceaccounts/details/104206017956912104938/keys?hl=en&project=render-prod
|
||||||
- todo: blocked
|
- subtasks:
|
||||||
subtasks:
|
- ''
|
||||||
- ""
|
|
||||||
- |
|
- |
|
||||||
pitr
|
pitr
|
||||||
https://slab.render.com/posts/pitr-as-a-service-health-abvnqx11
|
https://slab.render.com/posts/pitr-as-a-service-health-abvnqx11
|
||||||
@@ -44,8 +41,7 @@
|
|||||||
create alert MOAR archive-push cores
|
create alert MOAR archive-push cores
|
||||||
- cr; frontend
|
- cr; frontend
|
||||||
- cr; cli.git
|
- cr; cli.git
|
||||||
- cr; public-api-schema.git; https://github.com/renderinc/public-api-schema/pull/407
|
- cr; public-api-schema.git; https://github.com/renderinc/public-api-schema/pull/407 STILL NEED EVENTS
|
||||||
STILL NEED EVENTS
|
|
||||||
- cr; website.git
|
- cr; website.git
|
||||||
- cr; changelog
|
- cr; changelog
|
||||||
- ops; pgproxy rate limits 50ps 100burst; https://github.com/renderinc/dbproxy/pull/91
|
- ops; pgproxy rate limits 50ps 100burst; https://github.com/renderinc/dbproxy/pull/91
|
||||||
@@ -63,3 +59,6 @@
|
|||||||
- pgup; restore view owner; https://github.com/renderinc/api/pull/26814
|
- pgup; restore view owner; https://github.com/renderinc/api/pull/26814
|
||||||
- pgup; resync if missing resync; https://github.com/renderinc/api/pull/26817
|
- pgup; resync if missing resync; https://github.com/renderinc/api/pull/26817
|
||||||
- pgup; replicas use $RESYNC; https://github.com/renderinc/api/pull/26878
|
- pgup; replicas use $RESYNC; https://github.com/renderinc/api/pull/26878
|
||||||
|
todo: blocked
|
||||||
|
- hi
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user