24 Commits

Author SHA1 Message Date
Bel LaPointe
060a8dfb3b new tests 2025-12-02 17:47:12 -07:00
Bel LaPointe
3d7ebcf9bc drop one debug 2025-12-02 17:42:36 -07:00
Bel LaPointe
51f10b7944 shush warn 2025-12-02 17:40:25 -07:00
Bel LaPointe
9ed6b48806 shhhh warning 2025-12-02 17:36:29 -07:00
Bel LaPointe
a867809cb8 use to shush warn 2025-12-02 17:35:57 -07:00
Bel LaPointe
053071f4be hrm an enqueu eoperator is hard because a cron fires many times but looks removed but isnt and hrm 2025-12-02 17:34:04 -07:00
Bel LaPointe
eccaa06d98 can add, add past, add future 2025-12-02 17:30:29 -07:00
Bel LaPointe
ee9377d6da reconcile on add too 2025-12-02 17:21:51 -07:00
Bel LaPointe
7da6aa8ae9 still unsure about triggered vs snapshot time... 2025-12-02 17:17:31 -07:00
Bel LaPointe
a5553d75f4 add w schedule via ts after now 2025-12-02 17:03:06 -07:00
Bel LaPointe
fe8a55b4c1 WIP test add-scheduled, snapshot, scheduled-due 2025-12-01 18:26:41 -07:00
Bel LaPointe
0a7e6873a3 can -s 2025-01-02 2025-12-01 17:39:17 -07:00
Bel LaPointe
1a61701c53 panik todo 2025-12-01 17:35:18 -07:00
Bel LaPointe
f5b47c4e74 reverse for last snapshot 2025-12-01 17:32:50 -07:00
Bel LaPointe
8728867cc3 testdata to new NOT jsonpatch 2025-12-01 17:24:41 -07:00
Bel LaPointe
bb64b87752 all tests pass woo 2025-12-01 17:23:23 -07:00
Bel LaPointe
4ad0b7d2ff no double persist snapshot 2025-12-01 17:05:07 -07:00
Bel LaPointe
700b2a22cc persist delta of last snapshot and stage 2025-12-01 17:03:56 -07:00
Bel LaPointe
97caaebc09 drop nonfuture 2025-12-01 16:51:36 -07:00
Bel LaPointe
72eb29d766 step 1 2025-11-20 14:29:01 -07:00
Bel LaPointe
56d0628ece gr 2025-11-13 16:04:26 -07:00
Bel LaPointe
4884d551e2 oh almost there 2025-11-13 09:45:40 -07:00
Bel LaPointe
b37e61a223 closer 2025-11-12 15:17:21 -07:00
Bel LaPointe
c06091d576 test schedule can pass 2025-11-12 14:44:35 -07:00
3 changed files with 300 additions and 91 deletions

View File

@@ -8,29 +8,33 @@ fn main() {
let files = flags.files().expect("failed to files"); let files = flags.files().expect("failed to files");
if !flags.dry_run { if !flags.dry_run {
for file in files.files.iter() { files.reconcile().expect("failed to reconcile");
file.persist_stage()
.expect("failed to persist staged changes to log file");
file.stage_persisted().expect("failed to stage log files");
}
if let Some(add) = flags.add { if let Some(add) = flags.add {
let task = Task(serde_yaml::Value::String(add)); let task = match flags.add_schedule.clone() {
files.files[0] None => Task(serde_yaml::Value::String(add)),
.append(Delta::add(task)) Some(add_schedule) => {
.expect("failed to add"); let mut m = serde_yaml::Mapping::new();
if !flags.enqueue_add { m.insert("schedule".into(), add_schedule.into());
files.files[0] m.insert("do".into(), add.into());
.stage_persisted() Task(serde_yaml::Value::Mapping(m))
.expect("failed to stage added");
} }
};
let now = Delta::now_time();
files.files[0]
.append(match task.next_due(now.clone()) {
None => Delta::add(task),
Some(due) => Delta::add_at(task, if due > now { due } else { now }),
})
.expect("failed to add");
files.reconcile().expect("failed to reconcile");
} }
} }
for file in files.files.iter() { for file in files.files.iter() {
println!( println!(
"{} => {}", "{}",
file.file,
serde_yaml::to_string(&file.events().unwrap().snapshot().unwrap()).unwrap(), serde_yaml::to_string(&file.events().unwrap().snapshot().unwrap()).unwrap(),
); );
} }
@@ -54,8 +58,8 @@ struct Flags {
#[arg(short = 'd', long = "dry-run", default_value = "false")] #[arg(short = 'd', long = "dry-run", default_value = "false")]
dry_run: bool, dry_run: bool,
#[arg(short = 'q', long = "enqueue", default_value = "false")] #[arg(short = 's', long = "add-schedule")]
enqueue_add: bool, add_schedule: Option<String>,
} }
impl Flags { impl Flags {
@@ -116,7 +120,7 @@ mod test_flags {
add: None, add: None,
edit: false, edit: false,
dry_run: true, dry_run: true,
enqueue_add: false, add_schedule: None,
}; };
let files = flags.files().expect("failed to files from dir"); let files = flags.files().expect("failed to files from dir");
assert_eq!(1, files.files.len()); assert_eq!(1, files.files.len());
@@ -137,6 +141,14 @@ impl Files {
files: files.into_iter().map(|x| File::new(&x)).collect(), files: files.into_iter().map(|x| File::new(&x)).collect(),
} }
} }
pub fn reconcile(&self) -> Result<(), String> {
for file in self.files.iter() {
file.persist_stage()?;
file.stage_persisted()?;
}
Ok(())
}
} }
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
@@ -153,8 +165,8 @@ impl File {
Events::new(&self.file) Events::new(&self.file)
} }
pub fn persist_unpersisted_stage(&self) -> Result<(), String> { pub fn persist_stage(&self) -> Result<(), String> {
let events = self.events()?; let old_snapshot = self.events()?.last_snapshot();
let stage_mod_time = std::fs::metadata(&self.file) let stage_mod_time = std::fs::metadata(&self.file)
.unwrap() .unwrap()
.modified() .modified()
@@ -162,36 +174,20 @@ impl File {
.duration_since(std::time::UNIX_EPOCH) .duration_since(std::time::UNIX_EPOCH)
.unwrap() .unwrap()
.as_secs(); .as_secs();
let old_persisted: Vec<Delta> = events self.persist_delta_at(old_snapshot, self.stage()?, stage_mod_time)
.0
.iter()
.filter(|x| x.ts < stage_mod_time)
.map(|x| x.clone())
.collect();
let old_events = Events(old_persisted);
let old_snapshot = old_events.snapshot()?;
self.persist_delta_at(old_snapshot, self.stage()?, stage_mod_time)?;
self.stage_persisted()
} }
pub fn stage_persisted(&self) -> Result<(), String> { pub fn stage_persisted(&self) -> Result<(), String> {
let stage = self.events()?.snapshot()?; let persisted_as_snapshot = self.events()?.snapshot()?;
let plaintext = serde_yaml::to_string(&stage).unwrap(); if persisted_as_snapshot != self.events()?.last_snapshot() {
self.append(Delta::snapshot(persisted_as_snapshot.clone()))?;
}
let plaintext = serde_yaml::to_string(&persisted_as_snapshot).unwrap();
let mut f = std::fs::File::create(&self.file).expect("failed to open file for writing"); let mut f = std::fs::File::create(&self.file).expect("failed to open file for writing");
writeln!(f, "{}", plaintext).expect("failed to write"); writeln!(f, "{}", plaintext).expect("failed to write");
Ok(()) Ok(())
} }
pub fn persist_stage(&self) -> Result<(), String> {
self.persist_unpersisted_stage()?;
let persisted = self.events()?.snapshot()?;
let stage = self.stage()?;
self.persist_delta(persisted, stage)
}
pub fn persist_delta(&self, before: Vec<Task>, after: Vec<Task>) -> Result<(), String> { pub fn persist_delta(&self, before: Vec<Task>, after: Vec<Task>) -> Result<(), String> {
self.persist_delta_at(before, after, Delta::now_time()) self.persist_delta_at(before, after, Delta::now_time())
} }
@@ -205,6 +201,11 @@ impl File {
for before in before.iter() { for before in before.iter() {
if !after.contains(before) { if !after.contains(before) {
self.append(Delta::remove_at(before.clone(), now))?; self.append(Delta::remove_at(before.clone(), now))?;
let now = Delta::now_time();
let due = before.must_next_due(now.clone());
if due >= now {
self.append(Delta::add_at(before.clone(), due))?;
}
} }
} }
for after in after.iter() { for after in after.iter() {
@@ -310,7 +311,7 @@ mod test_file {
tests::file_contains(&d, "plain", "world"); tests::file_contains(&d, "plain", "world");
f.stage_persisted().unwrap(); f.stage_persisted().unwrap();
assert_eq!(2, f.events().unwrap().0.len()); assert_eq!(3, f.events().unwrap().0.len());
assert_eq!(2, f.stage().unwrap().len()); assert_eq!(2, f.stage().unwrap().len());
tests::file_contains(&d, "plain", "- hello\n- world"); tests::file_contains(&d, "plain", "- hello\n- world");
}); });
@@ -326,6 +327,7 @@ mod test_file {
r#" r#"
{"ts":1, "op":"Add", "task": "initial"} {"ts":1, "op":"Add", "task": "initial"}
{"ts":3, "op":"Add", "task": {"k":"v"}} {"ts":3, "op":"Add", "task": {"k":"v"}}
{"ts":3, "op":"Snapshot", "task": null, "tasks": ["initial", 1, {"k":"v"}]}
"#, "#,
); );
tests::write_file( tests::write_file(
@@ -338,19 +340,13 @@ mod test_file {
let f = File::new(&d.path().join("plain").to_str().unwrap().to_string()); let f = File::new(&d.path().join("plain").to_str().unwrap().to_string());
assert_eq!(3, f.events().unwrap().0.len()); assert_eq!(4, f.events().unwrap().0.len());
assert_eq!(0, f.stage().unwrap().len()); assert_eq!(0, f.stage().unwrap().len());
tests::file_contains(&d, "plain", "[]"); tests::file_contains(&d, "plain", "[]");
f.persist_stage().unwrap(); f.persist_stage().unwrap();
assert_eq!(6, f.events().unwrap().0.len()); assert_eq!(7, f.events().unwrap().0.len());
assert_eq!(0, f.stage().unwrap().len()); assert_eq!(0, f.stage().unwrap().len());
eprintln!("persist_stage | events | {:?}", f.events().unwrap().0);
eprintln!(
"persist_stage | events.snapshot | {:?}",
f.events().unwrap().snapshot()
);
eprintln!("persist_stage | stage | {:?}", f.stage().unwrap());
tests::file_contains(&d, "plain", "[]"); tests::file_contains(&d, "plain", "[]");
f.stage_persisted().unwrap(); f.stage_persisted().unwrap();
@@ -360,12 +356,7 @@ mod test_file {
"{:?}", "{:?}",
f.events().unwrap().snapshot().unwrap(), f.events().unwrap().snapshot().unwrap(),
); );
assert_eq!( assert_eq!(8, f.events().unwrap().0.len(), "{:?}", f.events().unwrap());
6,
f.events().unwrap().0.len(),
"{:?}",
f.events().unwrap().0
);
assert_eq!(0, f.stage().unwrap().len(), "{:?}", f.stage().unwrap()); assert_eq!(0, f.stage().unwrap().len(), "{:?}", f.stage().unwrap());
tests::file_contains(&d, "plain", "[]"); tests::file_contains(&d, "plain", "[]");
}); });
@@ -388,12 +379,13 @@ mod test_file {
".plain.host_b", ".plain.host_b",
r#" r#"
{"ts":2, "op":"Add", "task": 1} {"ts":2, "op":"Add", "task": 1}
{"ts":2, "op":"Snapshot", "task": null, "tasks": ["initial", 1]}
"#, "#,
); );
let f = File::new(&d.path().join("plain").to_str().unwrap().to_string()); let f = File::new(&d.path().join("plain").to_str().unwrap().to_string());
assert_eq!(3, f.events().unwrap().0.len()); assert_eq!(4, f.events().unwrap().0.len());
assert_eq!(2, f.stage().unwrap().len()); assert_eq!(2, f.stage().unwrap().len());
tests::file_contains(&d, "plain", "- initial\n- 1"); tests::file_contains(&d, "plain", "- initial\n- 1");
@@ -403,14 +395,14 @@ mod test_file {
tests::file_contains(&d, "plain", "- initial\n- 1"); tests::file_contains(&d, "plain", "- initial\n- 1");
f.stage_persisted().unwrap(); f.stage_persisted().unwrap();
assert_eq!(4, f.events().unwrap().0.len()); assert_eq!(5, f.events().unwrap().0.len());
assert_eq!(2, f.stage().unwrap().len()); assert_eq!(3, f.stage().unwrap().len());
tests::file_contains(&d, "plain", "- initial\n- 1"); tests::file_contains(&d, "plain", "- initial\n- 1\n- k: v");
}); });
} }
#[test] #[test]
fn test_persist_unpersisted_stage() { fn test_persist_stage() {
tests::with_dir(|d| { tests::with_dir(|d| {
tests::write_file(&d, "plain", "- old\n- new"); tests::write_file(&d, "plain", "- old\n- new");
tests::write_file( tests::write_file(
@@ -420,37 +412,149 @@ mod test_file {
r#" r#"
{{"ts":1, "op":"Add", "task": "removed"}} {{"ts":1, "op":"Add", "task": "removed"}}
{{"ts":2, "op":"Add", "task": "old"}} {{"ts":2, "op":"Add", "task": "old"}}
{{"ts":{}, "op":"Add", "task": "enqueued for persistence"}} {{"ts":2, "op":"Snapshot", "task": null, "tasks": ["removed", "old"]}}
{{"ts":{}, "op":"Add", "task": "persisted but not snapshotted"}}
{{"ts":{}, "op":"Add", "task": "doesnt exist yet"}}
"#, "#,
2147483647, Delta::now_time() - 50,
Delta::now_time() + 50,
) )
.as_str(), .as_str(),
); );
let f = File::new(&d.path().join("plain").to_str().unwrap().to_string()); let f = File::new(&d.path().join("plain").to_str().unwrap().to_string());
assert_eq!(3, f.events().unwrap().0.len()); assert_eq!(5, f.events().unwrap().0.len());
assert_eq!(2, f.stage().unwrap().len()); assert_eq!(2, f.stage().unwrap().len());
tests::file_contains(&d, "plain", "old"); tests::file_contains(&d, "plain", "old");
tests::file_contains(&d, "plain", "new"); tests::file_contains(&d, "plain", "new");
f.persist_stage().unwrap(); f.persist_stage().unwrap();
assert_eq!(5, f.events().unwrap().0.len()); assert_eq!(
assert_eq!(3, f.stage().unwrap().len()); 7,
tests::file_contains(&d, "plain", "enqueued"); f.events().unwrap().0.len(),
"events: {:?}",
f.events().unwrap()
);
assert_eq!(2, f.stage().unwrap().len());
tests::file_contains(&d, "plain", "new"); tests::file_contains(&d, "plain", "new");
f.stage_persisted().unwrap(); f.stage_persisted().unwrap();
assert_eq!( assert_eq!(8, f.events().unwrap().0.len(), "{:?}", f.events().unwrap());
5,
f.events().unwrap().0.len(),
"{:?}",
f.events().unwrap().0
);
assert_eq!(3, f.stage().unwrap().len(), "{:?}", f.stage().unwrap()); assert_eq!(3, f.stage().unwrap().len(), "{:?}", f.stage().unwrap());
tests::file_contains(&d, "plain", "enqueued");
tests::file_contains(&d, "plain", "new"); tests::file_contains(&d, "plain", "new");
tests::file_contains(&d, "plain", "old"); tests::file_contains(&d, "plain", "old");
tests::file_contains(&d, "plain", "persisted but not snapshotted");
});
}
#[test]
fn test_schedule_date_future() {
tests::with_dir(|d| {
tests::write_file(&d, "plain", "[]");
let f = File::new(&d.path().join("plain").to_str().unwrap().to_string());
let mut m = serde_yaml::Mapping::new();
m.insert("schedule".into(), "2036-01-02".into());
let task = Task(serde_yaml::Value::Mapping(m));
f.append(Delta::add(task)).unwrap();
assert_eq!(1, f.events().unwrap().0.len(), "{:?}", f.events().unwrap());
assert_eq!(0, f.stage().unwrap().len(), "{:?}", f.stage());
f.persist_stage().unwrap();
assert_eq!(1, f.events().unwrap().0.len(), "{:?}", f.events().unwrap());
assert_eq!(0, f.stage().unwrap().len(), "{:?}", f.stage());
f.stage_persisted().unwrap();
assert_eq!(1, f.events().unwrap().0.len(), "{:?}", f.events().unwrap());
assert_eq!(0, f.stage().unwrap().len(), "{:?}", f.stage());
});
}
#[test]
fn test_schedule_cron_resolve_reschedules() {
panic!("not impl");
}
#[test]
fn test_schedule_duration_resolve_reschedules() {
panic!("not impl");
}
#[test]
fn test_schedule_date_resolve_does_not_reschedule() {
panic!("not impl");
}
#[test]
fn test_schedule_date_future_with_snapshot_between_scheduled_and_fired() {
tests::with_dir(|d| {
tests::write_file(&d, "plain", "- stage");
tests::write_file(
&d,
".plain.host",
format!(
r#"
{{"ts":3, "op":"Add", "task": "scheduled add for after snapshot"}}
{{"ts":2, "op":"Snapshot", "task": null, "tasks": ["removed"]}}
"#,
)
.as_str(),
);
let f = File::new(&d.path().join("plain").to_str().unwrap().to_string());
assert_eq!(2, f.events().unwrap().0.len(), "{:?}", f.events().unwrap());
assert_eq!(1, f.stage().unwrap().len(), "{:?}", f.stage());
f.persist_stage().unwrap();
assert_eq!(4, f.events().unwrap().0.len(), "{:?}", f.events().unwrap());
assert_eq!(1, f.stage().unwrap().len(), "{:?}", f.stage());
f.stage_persisted().unwrap();
assert_eq!(5, f.events().unwrap().0.len(), "{:?}", f.events().unwrap());
assert_eq!(2, f.stage().unwrap().len(), "{:?}", f.stage());
});
}
#[test]
fn test_schedule_date_past() {
tests::with_dir(|d| {
tests::write_file(&d, "plain", "[]");
let f = File::new(&d.path().join("plain").to_str().unwrap().to_string());
let mut m = serde_yaml::Mapping::new();
m.insert("schedule".into(), "2006-01-02".into());
let task = Task(serde_yaml::Value::Mapping(m));
f.append(Delta::add(task)).unwrap();
assert_eq!(1, f.events().unwrap().0.len(), "{:?}", f.events().unwrap());
assert_eq!(0, f.stage().unwrap().len(), "{:?}", f.stage());
f.persist_stage().unwrap();
assert_eq!(
1,
f.events().unwrap().0.len(),
"events after 1 add scheduled: {:?}",
f.events().unwrap()
);
assert_eq!(
1,
f.events().unwrap().snapshot().unwrap().len(),
"events.snapshot after 1 add scheduled: {:?}",
f.events().unwrap().snapshot().unwrap(),
);
tests::file_contains(&d, "plain", "[]");
assert_eq!(
0,
f.stage().unwrap().len(),
"stage after 1 add scheduled: {:?}",
f.stage()
);
f.stage_persisted().unwrap();
assert_eq!(2, f.events().unwrap().0.len(), "{:?}", f.events().unwrap());
assert_eq!(1, f.stage().unwrap().len(), "{:?}", f.stage());
}); });
} }
} }
@@ -460,12 +564,14 @@ struct Delta {
ts: u64, ts: u64,
op: Op, op: Op,
task: Task, task: Task,
tasks: Option<Vec<Task>>,
} }
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
enum Op { enum Op {
Add, Add,
Remove, Remove,
Snapshot,
} }
impl Delta { impl Delta {
@@ -474,6 +580,16 @@ impl Delta {
ts: ts, ts: ts,
op: op, op: op,
task: task, task: task,
tasks: None,
}
}
pub fn snapshot(tasks: Vec<Task>) -> Delta {
Delta {
ts: Self::now_time(),
op: Op::Snapshot,
task: Task(serde_yaml::Value::Null),
tasks: Some(tasks),
} }
} }
@@ -503,17 +619,21 @@ impl Delta {
struct Task(serde_yaml::Value); struct Task(serde_yaml::Value);
impl Task { impl Task {
pub fn due(&self, after: u64) -> bool { pub fn _due(&self, after: u64) -> bool {
match self.next_due(after) { match self.next_due(after) {
Some(ts) => Delta::now_time() > ts, Some(ts) => Delta::now_time() > ts,
None => true, None => true,
} }
} }
pub fn must_next_due(&self, after: u64) -> u64 {
self.next_due(after).unwrap_or(1)
}
pub fn next_due(&self, after: u64) -> Option<u64> { pub fn next_due(&self, after: u64) -> Option<u64> {
match self.schedule() { match self.schedule() {
Some(schedule) => self.parse_schedule_next(schedule, after), Some(schedule) => self.parse_schedule_next(schedule, after),
None => Some(1), None => None,
} }
} }
@@ -591,8 +711,8 @@ mod test_task {
fn test_unscheduled() { fn test_unscheduled() {
let task = Task(serde_yaml::Value::String("hello world".to_string())); let task = Task(serde_yaml::Value::String("hello world".to_string()));
assert_eq!(None, task.schedule()); assert_eq!(None, task.schedule());
assert_eq!(Some(1 as u64), task.next_due(100)); assert_eq!(1 as u64, task.must_next_due(100));
assert!(task.due(100)); assert!(task._due(100));
} }
#[test] #[test]
@@ -602,7 +722,7 @@ mod test_task {
let task = Task(serde_yaml::Value::Mapping(m)); let task = Task(serde_yaml::Value::Mapping(m));
assert_eq!(Some("2006-01-02".to_string()), task.schedule()); assert_eq!(Some("2006-01-02".to_string()), task.schedule());
assert_eq!(Some(1136160000 as u64), task.next_due(100)); assert_eq!(Some(1136160000 as u64), task.next_due(100));
assert!(task.due(100)); assert!(task._due(100));
} }
#[test] #[test]
@@ -612,7 +732,7 @@ mod test_task {
let task = Task(serde_yaml::Value::Mapping(m)); let task = Task(serde_yaml::Value::Mapping(m));
assert_eq!(Some("2036-01-02".to_string()), task.schedule()); assert_eq!(Some("2036-01-02".to_string()), task.schedule());
assert_eq!(Some(2082844800 as u64), task.next_due(100)); assert_eq!(Some(2082844800 as u64), task.next_due(100));
assert!(!task.due(100)); assert!(!task._due(100));
} }
#[test] #[test]
@@ -622,7 +742,7 @@ mod test_task {
let task = Task(serde_yaml::Value::Mapping(m)); let task = Task(serde_yaml::Value::Mapping(m));
assert_eq!(Some("2036-01-02T16".to_string()), task.schedule()); assert_eq!(Some("2036-01-02T16".to_string()), task.schedule());
assert_eq!(Some(2082902400 as u64), task.next_due(100)); assert_eq!(Some(2082902400 as u64), task.next_due(100));
assert!(!task.due(100)); assert!(!task._due(100));
} }
#[test] #[test]
@@ -632,7 +752,7 @@ mod test_task {
let task = Task(serde_yaml::Value::Mapping(m)); let task = Task(serde_yaml::Value::Mapping(m));
assert_eq!(Some("1h".to_string()), task.schedule()); assert_eq!(Some("1h".to_string()), task.schedule());
assert_eq!(Some(3700), task.next_due(100)); assert_eq!(Some(3700), task.next_due(100));
assert!(task.due(100)); assert!(task._due(100));
} }
#[test] #[test]
@@ -642,13 +762,23 @@ mod test_task {
let task = Task(serde_yaml::Value::Mapping(m)); let task = Task(serde_yaml::Value::Mapping(m));
assert_eq!(Some("* * * * *".to_string()), task.schedule()); assert_eq!(Some("* * * * *".to_string()), task.schedule());
assert_eq!(Some(120 as u64), task.next_due(100)); assert_eq!(Some(120 as u64), task.next_due(100));
assert!(task.due(100)); assert!(task._due(100));
} }
} }
#[derive(Debug, Clone)] #[derive(Clone)]
struct Events(Vec<Delta>); struct Events(Vec<Delta>);
impl std::fmt::Debug for Events {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut arr = vec![];
for i in self.0.iter() {
arr.push(format!("{:?}", i.clone()));
}
write!(f, "[\n {}\n]", arr.join("\n "))
}
}
impl Events { impl Events {
pub fn new(file: &String) -> Result<Events, String> { pub fn new(file: &String) -> Result<Events, String> {
let logs = match std::fs::read_dir(Self::dir(&file)) { let logs = match std::fs::read_dir(Self::dir(&file)) {
@@ -710,11 +840,32 @@ impl Events {
.to_string() .to_string()
} }
fn last_snapshot(&self) -> Vec<Task> {
let reversed_events = {
let mut e = self.0.clone();
e.reverse();
e
};
for event in reversed_events.iter() {
match &event.op {
Op::Snapshot => return event.tasks.clone().unwrap(),
_ => {}
};
}
vec![]
}
fn snapshot(&self) -> Result<Vec<Task>, String> { fn snapshot(&self) -> Result<Vec<Task>, String> {
let mut result = vec![]; let mut result = vec![];
for event in self.0.iter() { for event in self.0.iter().filter(|t| t.ts <= Delta::now_time()) {
match event.op { match &event.op {
Op::Add => result.push(event.task.clone()), Op::Add => match event.task.next_due(event.ts) {
Some(next_due) => match next_due <= Delta::now_time() {
true => result.push(event.task.clone()),
false => {}
},
None => result.push(event.task.clone()),
},
Op::Remove => { Op::Remove => {
let mut i = (result.len() - 1) as i32; let mut i = (result.len() - 1) as i32;
while i >= 0 { while i >= 0 {
@@ -728,6 +879,7 @@ impl Events {
} }
} }
} }
Op::Snapshot => result = event.tasks.clone().unwrap(),
}; };
} }
Ok(result) Ok(result)
@@ -738,6 +890,31 @@ impl Events {
mod test_events { mod test_events {
use super::*; use super::*;
#[test]
fn test_events_op_snapshot() {
tests::with_dir(|d| {
tests::write_file(&d, "plain", "- who cares");
tests::write_file(
&d,
".plain.some_host",
r#"
{"ts":1, "op":"Snapshot", "task":"", "tasks":["snapshotted"]}
"#,
);
let events =
Events::new(&d.path().join("plain").to_str().unwrap().to_string()).unwrap();
assert_eq!(1, events.0.len(), "events: {:?}", events);
let snapshot = events.snapshot().unwrap();
assert_eq!(1, snapshot.len());
assert_eq!(
serde_yaml::Value::String("snapshotted".to_string()),
snapshot[0].0
);
});
}
#[test] #[test]
fn test_events_oplog_to_snapshot_one() { fn test_events_oplog_to_snapshot_one() {
tests::with_dir(|d| { tests::with_dir(|d| {
@@ -826,6 +1003,7 @@ mod tests {
f.sync_all().unwrap(); f.sync_all().unwrap();
} }
#[allow(dead_code)]
pub fn file_contains(d: &tempdir::TempDir, fname: &str, content: &str) { pub fn file_contains(d: &tempdir::TempDir, fname: &str, content: &str) {
let p = d.path().join(&fname); let p = d.path().join(&fname);
let file_content = file_content(&p.to_str().unwrap().to_string()); let file_content = file_content(&p.to_str().unwrap().to_string());

View File

@@ -1,4 +1,32 @@
{"ts":1762884455,"patch":{"op":"replace","path":"","value":["read; https://topicpartition.io/blog/postgres-pubsub-queue-benchmarks","pglogical vs ha\n\n# api.git#breel/keys-620-pglogical-always-set-cr/2-user-survives-cr\n$ mise run pulsegres-new ^logical/toggl\n","drive; VERIFY spoc posts daily summary w/ unresolved","drive; VERIFY spoc refreshes summary w/ thread comment contianing 'refresh'","637; reconcile deploy if replicas wrong; https://github.com/renderinc/api/pull/26540/files","https://linear.app/render-com/issue/KEYS-633/add-3-when-max-connections-overridden-for-3-superuser-connections","https://linear.app/render-com/issue/KEYS-637/billing-resume-should-1-unsuspend-pg-in-cloudsql-2-unsuspend-pg-in-cr","https://linear.app/render-com/issue/KEYS-638/pgoperator-generates-new-ha-patroni-cert-every-reconcile-no-matter","pg; how2partition; https://renderinc.slack.com/archives/C0319NYCSSG/p1756357545556659?thread_ts=1756357467.613369&cid=C0319NYCSSG","pitr; backup purge cronjob for PL types","pg11 pgbackup doesnt write to envsetting mucked env key","incident io; teach spocbotvr to read slacks","userdb to internal; peer packages can use internal as userdb","fcr; cannot pitr because pgbackrest doesnt know wal spans thus pgexporter and friends cant know pitr works","etcd statefulset of 1 (for no random podname, no conflict, k8s ensures pod replace)\npatroni always\n","maher; https://slab.render.com/posts/hopes-and-dreams-blegf8fx#hdsyt-valkey-bundle","maher; shadow lizhi pm loops","maher; get more interviewers","maher; get concrete career and project plans so i can get promo in 2y; no manager to advocate","read; https://trychroma.com/engineering/wal3","read; https://github.com/renderinc/dashboard/pull/8883","read; https://litestream.io/getting-started/","kr\nto del gcloud old key\nie https://console.cloud.google.com/iam-admin/serviceaccounts/details/104206017956912104938/keys?hl=en&project=render-prod\n",{"subtasks":["","pitr\nhttps://slab.render.com/posts/pitr-as-a-service-health-abvnqx11\nmore aggressive alert autotune backup cores\nmore aggressive alert on MOAR backup cores\ncreate alert autotune archive-push cores\ncreate alert MOAR archive-push cores\n","cr; frontend","cr; cli.git","cr; public-api-schema.git; https://github.com/renderinc/public-api-schema/pull/407 STILL NEED EVENTS","cr; website.git","cr; changelog","ops; pgproxy rate limits 50ps 100burst; https://github.com/renderinc/dbproxy/pull/91","2873; no conn patroni if upgradeInProgressWithoutHA; https://github.com/renderinc/api/pull/26328","2733; only EnvSettings; https://github.com/renderinc/api/pull/25322/files","pg18; after cred rotation works, re enable e2e","655; pg18; pub api sch; https://github.com/renderinc/public-api-schema/pull/421","655; pg18; go generate pub api sch; https://github.com/renderinc/api/pull/26694","663; das; show status in /info; https://github.com/renderinc/dashboard/pull/9616","664; pg18; go gen terraform; https://github.com/renderinc/api/pull/26701","664; pg18; ga; push terraform.git#breel/keys-664-pg18","656; pg18; website; https://github.com/renderinc/website/pull/985/files","663; das; note disk cannot decrease even if autoscaled; https://github.com/renderinc/dashboard/pull/9621","pulsegres; pls let me keep my test emails; https://github.com/renderinc/api/pull/26741","pgup; restore view owner; https://github.com/renderinc/api/pull/26814","pgup; resync if missing resync; https://github.com/renderinc/api/pull/26817","pgup; replicas use $RESYNC; https://github.com/renderinc/api/pull/26878"],"todo":"blocked"}]}} {"ts":1762915973,"op":"Add","task":"read; https://topicpartition.io/blog/postgres-pubsub-queue-benchmarks","tasks":null}
{"ts":1762885026,"patch":{"op":"add","path":"/-","value":"hi"}} {"ts":1762915973,"op":"Add","task":"pglogical vs ha\n\n# api.git#breel/keys-620-pglogical-always-set-cr/2-user-survives-cr\n$ mise run pulsegres-new ^logical/toggl\n","tasks":null}
{"ts":1762915959,"patch":{"op":"add","path":"/-","value":"enqueued add"}} {"ts":1762915973,"op":"Add","task":"drive; VERIFY spoc posts daily summary w/ unresolved","tasks":null}
{"ts":1762915973,"patch":{"op":"remove","path":"/25"}} {"ts":1762915973,"op":"Add","task":"drive; VERIFY spoc refreshes summary w/ thread comment contianing 'refresh'","tasks":null}
{"ts":1762915973,"op":"Add","task":"637; reconcile deploy if replicas wrong; https://github.com/renderinc/api/pull/26540/files","tasks":null}
{"ts":1762915973,"op":"Add","task":"https://linear.app/render-com/issue/KEYS-633/add-3-when-max-connections-overridden-for-3-superuser-connections","tasks":null}
{"ts":1762915973,"op":"Add","task":"https://linear.app/render-com/issue/KEYS-637/billing-resume-should-1-unsuspend-pg-in-cloudsql-2-unsuspend-pg-in-cr","tasks":null}
{"ts":1762915973,"op":"Add","task":"https://linear.app/render-com/issue/KEYS-638/pgoperator-generates-new-ha-patroni-cert-every-reconcile-no-matter","tasks":null}
{"ts":1762915973,"op":"Add","task":"pg; how2partition; https://renderinc.slack.com/archives/C0319NYCSSG/p1756357545556659?thread_ts=1756357467.613369&cid=C0319NYCSSG","tasks":null}
{"ts":1762915973,"op":"Add","task":"pitr; backup purge cronjob for PL types","tasks":null}
{"ts":1762915973,"op":"Add","task":"pg11 pgbackup doesnt write to envsetting mucked env key","tasks":null}
{"ts":1762915973,"op":"Add","task":"incident io; teach spocbotvr to read slacks","tasks":null}
{"ts":1762915973,"op":"Add","task":"userdb to internal; peer packages can use internal as userdb","tasks":null}
{"ts":1762915973,"op":"Add","task":"fcr; cannot pitr because pgbackrest doesnt know wal spans thus pgexporter and friends cant know pitr works","tasks":null}
{"ts":1762915973,"op":"Add","task":"etcd statefulset of 1 (for no random podname, no conflict, k8s ensures pod replace)\npatroni always\n","tasks":null}
{"ts":1762915973,"op":"Add","task":"maher; https://slab.render.com/posts/hopes-and-dreams-blegf8fx#hdsyt-valkey-bundle","tasks":null}
{"ts":1762915973,"op":"Add","task":"maher; shadow lizhi pm loops","tasks":null}
{"ts":1762915973,"op":"Add","task":"maher; get more interviewers","tasks":null}
{"ts":1762915973,"op":"Add","task":"maher; get concrete career and project plans so i can get promo in 2y; no manager to advocate","tasks":null}
{"ts":1762915973,"op":"Add","task":"read; https://trychroma.com/engineering/wal3","tasks":null}
{"ts":1762915973,"op":"Add","task":"read; https://github.com/renderinc/dashboard/pull/8883","tasks":null}
{"ts":1762915973,"op":"Add","task":"read; https://litestream.io/getting-started/","tasks":null}
{"ts":1762915973,"op":"Add","task":"kr\nto del gcloud old key\nie https://console.cloud.google.com/iam-admin/serviceaccounts/details/104206017956912104938/keys?hl=en&project=render-prod\n","tasks":null}
{"ts":1762915973,"op":"Add","task":{"subtasks":["","pitr\nhttps://slab.render.com/posts/pitr-as-a-service-health-abvnqx11\nmore aggressive alert autotune backup cores\nmore aggressive alert on MOAR backup cores\ncreate alert autotune archive-push cores\ncreate alert MOAR archive-push cores\n","cr; frontend","cr; cli.git","cr; public-api-schema.git; https://github.com/renderinc/public-api-schema/pull/407 STILL NEED EVENTS","cr; website.git","cr; changelog","ops; pgproxy rate limits 50ps 100burst; https://github.com/renderinc/dbproxy/pull/91","2873; no conn patroni if upgradeInProgressWithoutHA; https://github.com/renderinc/api/pull/26328","2733; only EnvSettings; https://github.com/renderinc/api/pull/25322/files","pg18; after cred rotation works, re enable e2e","655; pg18; pub api sch; https://github.com/renderinc/public-api-schema/pull/421","655; pg18; go generate pub api sch; https://github.com/renderinc/api/pull/26694","663; das; show status in /info; https://github.com/renderinc/dashboard/pull/9616","664; pg18; go gen terraform; https://github.com/renderinc/api/pull/26701","664; pg18; ga; push terraform.git#breel/keys-664-pg18","656; pg18; website; https://github.com/renderinc/website/pull/985/files","663; das; note disk cannot decrease even if autoscaled; https://github.com/renderinc/dashboard/pull/9621","pulsegres; pls let me keep my test emails; https://github.com/renderinc/api/pull/26741","pgup; restore view owner; https://github.com/renderinc/api/pull/26814","pgup; resync if missing resync; https://github.com/renderinc/api/pull/26817","pgup; replicas use $RESYNC; https://github.com/renderinc/api/pull/26878"],"todo":"blocked"},"tasks":null}
{"ts":1762915973,"op":"Add","task":"hi","tasks":null}
{"ts":1764635053,"op":"Snapshot","task":null,"tasks":["read; https://topicpartition.io/blog/postgres-pubsub-queue-benchmarks","pglogical vs ha\n\n# api.git#breel/keys-620-pglogical-always-set-cr/2-user-survives-cr\n$ mise run pulsegres-new ^logical/toggl\n","drive; VERIFY spoc posts daily summary w/ unresolved","drive; VERIFY spoc refreshes summary w/ thread comment contianing 'refresh'","637; reconcile deploy if replicas wrong; https://github.com/renderinc/api/pull/26540/files","https://linear.app/render-com/issue/KEYS-633/add-3-when-max-connections-overridden-for-3-superuser-connections","https://linear.app/render-com/issue/KEYS-637/billing-resume-should-1-unsuspend-pg-in-cloudsql-2-unsuspend-pg-in-cr","https://linear.app/render-com/issue/KEYS-638/pgoperator-generates-new-ha-patroni-cert-every-reconcile-no-matter","pg; how2partition; https://renderinc.slack.com/archives/C0319NYCSSG/p1756357545556659?thread_ts=1756357467.613369&cid=C0319NYCSSG","pitr; backup purge cronjob for PL types","pg11 pgbackup doesnt write to envsetting mucked env key","incident io; teach spocbotvr to read slacks","userdb to internal; peer packages can use internal as userdb","fcr; cannot pitr because pgbackrest doesnt know wal spans thus pgexporter and friends cant know pitr works","etcd statefulset of 1 (for no random podname, no conflict, k8s ensures pod replace)\npatroni always\n","maher; https://slab.render.com/posts/hopes-and-dreams-blegf8fx#hdsyt-valkey-bundle","maher; shadow lizhi pm loops","maher; get more interviewers","maher; get concrete career and project plans so i can get promo in 2y; no manager to advocate","read; https://trychroma.com/engineering/wal3","read; https://github.com/renderinc/dashboard/pull/8883","read; https://litestream.io/getting-started/","kr\nto del gcloud old key\nie https://console.cloud.google.com/iam-admin/serviceaccounts/details/104206017956912104938/keys?hl=en&project=render-prod\n",{"subtasks":["","pitr\nhttps://slab.render.com/posts/pitr-as-a-service-health-abvnqx11\nmore aggressive alert autotune backup cores\nmore aggressive alert on MOAR backup cores\ncreate alert autotune archive-push cores\ncreate alert MOAR archive-push cores\n","cr; frontend","cr; cli.git","cr; public-api-schema.git; https://github.com/renderinc/public-api-schema/pull/407 STILL NEED EVENTS","cr; website.git","cr; changelog","ops; pgproxy rate limits 50ps 100burst; https://github.com/renderinc/dbproxy/pull/91","2873; no conn patroni if upgradeInProgressWithoutHA; https://github.com/renderinc/api/pull/26328","2733; only EnvSettings; https://github.com/renderinc/api/pull/25322/files","pg18; after cred rotation works, re enable e2e","655; pg18; pub api sch; https://github.com/renderinc/public-api-schema/pull/421","655; pg18; go generate pub api sch; https://github.com/renderinc/api/pull/26694","663; das; show status in /info; https://github.com/renderinc/dashboard/pull/9616","664; pg18; go gen terraform; https://github.com/renderinc/api/pull/26701","664; pg18; ga; push terraform.git#breel/keys-664-pg18","656; pg18; website; https://github.com/renderinc/website/pull/985/files","663; das; note disk cannot decrease even if autoscaled; https://github.com/renderinc/dashboard/pull/9621","pulsegres; pls let me keep my test emails; https://github.com/renderinc/api/pull/26741","pgup; restore view owner; https://github.com/renderinc/api/pull/26814","pgup; resync if missing resync; https://github.com/renderinc/api/pull/26817","pgup; replicas use $RESYNC; https://github.com/renderinc/api/pull/26878"],"todo":"blocked"},"hi"]}
{"ts":1764636274,"op":"Add","task":{"schedule":"2026-01-01","todo":"not yet due"},"tasks":null}
{"ts":1764721753,"op":"Add","task":"just_add","tasks":null}
{"ts":1764721753,"op":"Snapshot","task":null,"tasks":["read; https://topicpartition.io/blog/postgres-pubsub-queue-benchmarks","pglogical vs ha\n\n# api.git#breel/keys-620-pglogical-always-set-cr/2-user-survives-cr\n$ mise run pulsegres-new ^logical/toggl\n","drive; VERIFY spoc posts daily summary w/ unresolved","drive; VERIFY spoc refreshes summary w/ thread comment contianing 'refresh'","637; reconcile deploy if replicas wrong; https://github.com/renderinc/api/pull/26540/files","https://linear.app/render-com/issue/KEYS-633/add-3-when-max-connections-overridden-for-3-superuser-connections","https://linear.app/render-com/issue/KEYS-637/billing-resume-should-1-unsuspend-pg-in-cloudsql-2-unsuspend-pg-in-cr","https://linear.app/render-com/issue/KEYS-638/pgoperator-generates-new-ha-patroni-cert-every-reconcile-no-matter","pg; how2partition; https://renderinc.slack.com/archives/C0319NYCSSG/p1756357545556659?thread_ts=1756357467.613369&cid=C0319NYCSSG","pitr; backup purge cronjob for PL types","pg11 pgbackup doesnt write to envsetting mucked env key","incident io; teach spocbotvr to read slacks","userdb to internal; peer packages can use internal as userdb","fcr; cannot pitr because pgbackrest doesnt know wal spans thus pgexporter and friends cant know pitr works","etcd statefulset of 1 (for no random podname, no conflict, k8s ensures pod replace)\npatroni always\n","maher; https://slab.render.com/posts/hopes-and-dreams-blegf8fx#hdsyt-valkey-bundle","maher; shadow lizhi pm loops","maher; get more interviewers","maher; get concrete career and project plans so i can get promo in 2y; no manager to advocate","read; https://trychroma.com/engineering/wal3","read; https://github.com/renderinc/dashboard/pull/8883","read; https://litestream.io/getting-started/","kr\nto del gcloud old key\nie https://console.cloud.google.com/iam-admin/serviceaccounts/details/104206017956912104938/keys?hl=en&project=render-prod\n",{"subtasks":["","pitr\nhttps://slab.render.com/posts/pitr-as-a-service-health-abvnqx11\nmore aggressive alert autotune backup cores\nmore aggressive alert on MOAR backup cores\ncreate alert autotune archive-push cores\ncreate alert MOAR archive-push cores\n","cr; frontend","cr; cli.git","cr; public-api-schema.git; https://github.com/renderinc/public-api-schema/pull/407 STILL NEED EVENTS","cr; website.git","cr; changelog","ops; pgproxy rate limits 50ps 100burst; https://github.com/renderinc/dbproxy/pull/91","2873; no conn patroni if upgradeInProgressWithoutHA; https://github.com/renderinc/api/pull/26328","2733; only EnvSettings; https://github.com/renderinc/api/pull/25322/files","pg18; after cred rotation works, re enable e2e","655; pg18; pub api sch; https://github.com/renderinc/public-api-schema/pull/421","655; pg18; go generate pub api sch; https://github.com/renderinc/api/pull/26694","663; das; show status in /info; https://github.com/renderinc/dashboard/pull/9616","664; pg18; go gen terraform; https://github.com/renderinc/api/pull/26701","664; pg18; ga; push terraform.git#breel/keys-664-pg18","656; pg18; website; https://github.com/renderinc/website/pull/985/files","663; das; note disk cannot decrease even if autoscaled; https://github.com/renderinc/dashboard/pull/9621","pulsegres; pls let me keep my test emails; https://github.com/renderinc/api/pull/26741","pgup; restore view owner; https://github.com/renderinc/api/pull/26814","pgup; resync if missing resync; https://github.com/renderinc/api/pull/26817","pgup; replicas use $RESYNC; https://github.com/renderinc/api/pull/26878"],"todo":"blocked"},"hi","just_add"]}
{"ts":1764721753,"op":"Add","task":{"schedule":"2000-01-01","do":"add_past"},"tasks":null}
{"ts":1764721753,"op":"Snapshot","task":null,"tasks":["read; https://topicpartition.io/blog/postgres-pubsub-queue-benchmarks","pglogical vs ha\n\n# api.git#breel/keys-620-pglogical-always-set-cr/2-user-survives-cr\n$ mise run pulsegres-new ^logical/toggl\n","drive; VERIFY spoc posts daily summary w/ unresolved","drive; VERIFY spoc refreshes summary w/ thread comment contianing 'refresh'","637; reconcile deploy if replicas wrong; https://github.com/renderinc/api/pull/26540/files","https://linear.app/render-com/issue/KEYS-633/add-3-when-max-connections-overridden-for-3-superuser-connections","https://linear.app/render-com/issue/KEYS-637/billing-resume-should-1-unsuspend-pg-in-cloudsql-2-unsuspend-pg-in-cr","https://linear.app/render-com/issue/KEYS-638/pgoperator-generates-new-ha-patroni-cert-every-reconcile-no-matter","pg; how2partition; https://renderinc.slack.com/archives/C0319NYCSSG/p1756357545556659?thread_ts=1756357467.613369&cid=C0319NYCSSG","pitr; backup purge cronjob for PL types","pg11 pgbackup doesnt write to envsetting mucked env key","incident io; teach spocbotvr to read slacks","userdb to internal; peer packages can use internal as userdb","fcr; cannot pitr because pgbackrest doesnt know wal spans thus pgexporter and friends cant know pitr works","etcd statefulset of 1 (for no random podname, no conflict, k8s ensures pod replace)\npatroni always\n","maher; https://slab.render.com/posts/hopes-and-dreams-blegf8fx#hdsyt-valkey-bundle","maher; shadow lizhi pm loops","maher; get more interviewers","maher; get concrete career and project plans so i can get promo in 2y; no manager to advocate","read; https://trychroma.com/engineering/wal3","read; https://github.com/renderinc/dashboard/pull/8883","read; https://litestream.io/getting-started/","kr\nto del gcloud old key\nie https://console.cloud.google.com/iam-admin/serviceaccounts/details/104206017956912104938/keys?hl=en&project=render-prod\n",{"subtasks":["","pitr\nhttps://slab.render.com/posts/pitr-as-a-service-health-abvnqx11\nmore aggressive alert autotune backup cores\nmore aggressive alert on MOAR backup cores\ncreate alert autotune archive-push cores\ncreate alert MOAR archive-push cores\n","cr; frontend","cr; cli.git","cr; public-api-schema.git; https://github.com/renderinc/public-api-schema/pull/407 STILL NEED EVENTS","cr; website.git","cr; changelog","ops; pgproxy rate limits 50ps 100burst; https://github.com/renderinc/dbproxy/pull/91","2873; no conn patroni if upgradeInProgressWithoutHA; https://github.com/renderinc/api/pull/26328","2733; only EnvSettings; https://github.com/renderinc/api/pull/25322/files","pg18; after cred rotation works, re enable e2e","655; pg18; pub api sch; https://github.com/renderinc/public-api-schema/pull/421","655; pg18; go generate pub api sch; https://github.com/renderinc/api/pull/26694","663; das; show status in /info; https://github.com/renderinc/dashboard/pull/9616","664; pg18; go gen terraform; https://github.com/renderinc/api/pull/26701","664; pg18; ga; push terraform.git#breel/keys-664-pg18","656; pg18; website; https://github.com/renderinc/website/pull/985/files","663; das; note disk cannot decrease even if autoscaled; https://github.com/renderinc/dashboard/pull/9621","pulsegres; pls let me keep my test emails; https://github.com/renderinc/api/pull/26741","pgup; restore view owner; https://github.com/renderinc/api/pull/26814","pgup; resync if missing resync; https://github.com/renderinc/api/pull/26817","pgup; replicas use $RESYNC; https://github.com/renderinc/api/pull/26878"],"todo":"blocked"},"hi","just_add",{"schedule":"2000-01-01","do":"add_past"}]}
{"ts":2051222400,"op":"Add","task":{"schedule":"2035-01-01","do":"add_future"},"tasks":null}

View File

@@ -61,4 +61,7 @@
- pgup; replicas use $RESYNC; https://github.com/renderinc/api/pull/26878 - pgup; replicas use $RESYNC; https://github.com/renderinc/api/pull/26878
todo: blocked todo: blocked
- hi - hi
- just_add
- schedule: 2000-01-01
do: add_past