Compare commits
166 Commits
2911b2f884
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
31fe916000 | ||
|
|
d31042e971 | ||
|
|
2e7d58fd13 | ||
|
|
93672e67a6 | ||
|
|
1b06f727fd | ||
|
|
3ae62390cf | ||
|
|
5cfb89bc64 | ||
|
|
b554be6282 | ||
|
|
5785ea37ae | ||
|
|
f27d416a5a | ||
|
|
81793876f8 | ||
|
|
6d81164161 | ||
|
|
20256bd6b4 | ||
|
|
e5e98e2890 | ||
|
|
4fb26ec775 | ||
|
|
782b9ec3cf | ||
|
|
9d7f69bd8a | ||
|
|
12de99da57 | ||
|
|
81fe8070ca | ||
|
|
79de56e236 | ||
|
|
f485b5ea88 | ||
|
|
894536d209 | ||
|
|
f8861a73b5 | ||
|
|
14de286415 | ||
|
|
8557ddc522 | ||
|
|
1e43c2a14e | ||
|
|
04c574ffec | ||
|
|
b2f64037e2 | ||
|
|
fbd151f9ef | ||
|
|
5f21098fdc | ||
|
|
7c2d663401 | ||
|
|
95b0394199 | ||
|
|
d9d91193dd | ||
|
|
2033cefc2a | ||
|
|
600a2e0111 | ||
|
|
4f3b8ec866 | ||
|
|
c4a7eaf04a | ||
|
|
5557c0920a | ||
|
|
c7f5cdb040 | ||
|
|
a8e8fdc451 | ||
|
|
d88a8bb23a | ||
|
|
39c0056190 | ||
|
|
d87af2fadc | ||
|
|
9bc47bfde6 | ||
|
|
098986eb07 | ||
|
|
5bc068451f | ||
|
|
5fa21d0cd9 | ||
|
|
709f2ac254 | ||
|
|
ba06796b8c | ||
|
|
f38c183fe8 | ||
|
|
8ae8f47753 | ||
|
|
e372be4288 | ||
|
|
acfd95e5af | ||
|
|
d70a0e313f | ||
|
|
0cecd5ea04 | ||
|
|
a7d5d021d6 | ||
|
|
44db0c6939 | ||
|
|
dd98aedb5d | ||
|
|
254cb1ec0a | ||
|
|
38a68de67f | ||
|
|
3c62411927 | ||
|
|
c84d80e8d3 | ||
|
|
74477fc09c | ||
|
|
1fd4b72b22 | ||
|
|
d9244e4e1c | ||
|
|
c9d3b4998b | ||
|
|
c5e1556f61 | ||
|
|
d76f8e2c15 | ||
|
|
ff280997b1 | ||
|
|
83c0ee3f53 | ||
|
|
9d7a175c62 | ||
|
|
1dcffdd956 | ||
|
|
580068d98b | ||
|
|
eec5c39725 | ||
|
|
9848492b1e | ||
|
|
a674022357 | ||
|
|
80df07089f | ||
|
|
d792626c2f | ||
|
|
acac2a60b0 | ||
|
|
eef78d6e39 | ||
|
|
42c5b7d7ad | ||
|
|
e85a2d25a1 | ||
|
|
8193bf7377 | ||
|
|
2f3739b24f | ||
|
|
d38352f050 | ||
|
|
ba833fa315 | ||
|
|
d7cbcb9926 | ||
|
|
961be827d0 | ||
|
|
6fbafe6700 | ||
|
|
7df7528ccf | ||
|
|
a91da082c7 | ||
|
|
af2ad44109 | ||
|
|
cabc5c00b7 | ||
|
|
84dec31e53 | ||
|
|
f2a23e5d8a | ||
|
|
a8270b524c | ||
|
|
902ab96b2d | ||
|
|
60017a8d3a | ||
|
|
39ed9280e1 | ||
|
|
007611fb4f | ||
|
|
f8002053f5 | ||
|
|
4e3818046d | ||
|
|
c89a9a8ada | ||
|
|
ac6bf30042 | ||
|
|
4eb2117f21 | ||
|
|
6411011e62 | ||
|
|
444ca5d0ca | ||
|
|
bb72ff4bfa | ||
|
|
e8d52274e7 | ||
|
|
8a67f505ce | ||
|
|
f4b04e01d3 | ||
|
|
e33d1a6a4b | ||
|
|
4ac55e2eea | ||
|
|
b0c9c1cf9e | ||
|
|
bf34835305 | ||
|
|
a5f332b991 | ||
|
|
258a51af0b | ||
|
|
a3630a8fda | ||
|
|
6b962ea509 | ||
|
|
b1d93a7698 | ||
|
|
4111ce9153 | ||
|
|
85d589a570 | ||
|
|
10630df394 | ||
|
|
1324376399 | ||
|
|
e58fa50656 | ||
|
|
9bfbcf2d70 | ||
|
|
847cd83fd5 | ||
|
|
e7b3418932 | ||
|
|
ab933e3c53 | ||
|
|
d5b09db0c6 | ||
|
|
f288a9b098 | ||
|
|
ecf29d54b8 | ||
|
|
da0125c663 | ||
|
|
5f31a2c572 | ||
|
|
046dc0e1ba | ||
|
|
a48518fea6 | ||
|
|
163f894f3a | ||
|
|
109899f9f8 | ||
|
|
20a9589eb8 | ||
|
|
a8d1d69f63 | ||
|
|
a7e254ff94 | ||
|
|
a2cc8ed2a1 | ||
|
|
d5b84da8f5 | ||
|
|
b6526b8360 | ||
|
|
c0977a6b7a | ||
|
|
feb48ee4bc | ||
|
|
88f6746c85 | ||
|
|
5817dce70e | ||
|
|
04c7a5c9e1 | ||
|
|
83c8fccb78 | ||
|
|
cb44dfd49d | ||
|
|
6ca1f83727 | ||
|
|
02331752fe | ||
|
|
0d3910829d | ||
|
|
8f288cf12e | ||
|
|
1b148092b9 | ||
|
|
24628f4ebb | ||
|
|
c51e580e09 | ||
|
|
82fc7526d0 | ||
|
|
02fecb7eb2 | ||
|
|
6ba23e61c3 | ||
|
|
8883856a63 | ||
|
|
634255725a | ||
|
|
233ba8ae2b | ||
|
|
ccee4a49da | ||
|
|
4080af952d |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1 +1,3 @@
|
||||
/slack-bot-vr
|
||||
**/*.sw*
|
||||
/spoc-bot-vr
|
||||
|
||||
222
.message.go
Normal file
222
.message.go
Normal file
@@ -0,0 +1,222 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrIrrelevantMessage = errors.New("message isnt relevant to spoc bot vr")
|
||||
)
|
||||
|
||||
type Message struct {
|
||||
ID string
|
||||
TS uint64
|
||||
Source string
|
||||
Channel string
|
||||
Thread string
|
||||
EventName string
|
||||
Event string
|
||||
Plaintext string
|
||||
Asset string
|
||||
Resolved bool
|
||||
Datacenter string
|
||||
}
|
||||
|
||||
func (m Message) Empty() bool {
|
||||
return m == (Message{})
|
||||
}
|
||||
|
||||
func (m Message) Time() time.Time {
|
||||
return time.Unix(int64(m.TS), 0)
|
||||
}
|
||||
|
||||
func (m Message) Serialize() []byte {
|
||||
b, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func MustDeserialize(b []byte) Message {
|
||||
m, err := Deserialize(b)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func Deserialize(b []byte) (Message, error) {
|
||||
var m Message
|
||||
err := json.Unmarshal(b, &m)
|
||||
return m, err
|
||||
}
|
||||
|
||||
type (
|
||||
slackMessage struct {
|
||||
slackEvent
|
||||
Type string
|
||||
TS uint64 `json:"event_time"`
|
||||
Event slackEvent
|
||||
MessageTS string `json:"ts"`
|
||||
}
|
||||
|
||||
slackEvent struct {
|
||||
ID string `json:"event_ts"`
|
||||
Channel string
|
||||
// rewrites
|
||||
Nested *slackEvent `json:"message"`
|
||||
PreviousMessage *slackEvent `json:"previous_message"`
|
||||
// human
|
||||
ParentID string `json:"thread_ts"`
|
||||
Text string
|
||||
Blocks []slackBlock
|
||||
// bot
|
||||
Bot slackBot `json:"bot_profile"`
|
||||
Attachments []slackAttachment
|
||||
}
|
||||
|
||||
slackBlock struct {
|
||||
Elements []slackElement
|
||||
}
|
||||
|
||||
slackElement struct {
|
||||
Elements []slackElement
|
||||
RichText string `json:"text"`
|
||||
}
|
||||
|
||||
slackBot struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
slackAttachment struct {
|
||||
Color string
|
||||
Title string
|
||||
Text string
|
||||
Fields []slackField
|
||||
Actions []slackAction
|
||||
}
|
||||
|
||||
slackField struct {
|
||||
Value string
|
||||
Title string
|
||||
}
|
||||
|
||||
slackAction struct{}
|
||||
)
|
||||
|
||||
func ParseSlack(b []byte, assetPattern, datacenterPattern, eventNamePattern string) (Message, error) {
|
||||
return ParseSlackFromChannel(b, assetPattern, datacenterPattern, eventNamePattern, "")
|
||||
}
|
||||
|
||||
func ParseSlackFromChannel(b []byte, assetPattern, datacenterPattern, eventNamePattern string, ch string) (Message, error) {
|
||||
m, err := parseSlackJSON(b, ch)
|
||||
if err != nil {
|
||||
return Message{}, err
|
||||
}
|
||||
|
||||
for pattern, ptr := range map[string]*string{
|
||||
assetPattern: &m.Asset,
|
||||
datacenterPattern: &m.Datacenter,
|
||||
eventNamePattern: &m.EventName,
|
||||
} {
|
||||
r := regexp.MustCompile(pattern)
|
||||
parsed := r.FindString(*ptr)
|
||||
for i, name := range r.SubexpNames() {
|
||||
if i > 0 && name != "" {
|
||||
parsed = r.FindStringSubmatch(*ptr)[i]
|
||||
}
|
||||
}
|
||||
*ptr = parsed
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func parseSlackJSON(b []byte, ch string) (Message, error) {
|
||||
s, err := _parseSlackJSON(b)
|
||||
if err != nil {
|
||||
return Message{}, err
|
||||
}
|
||||
|
||||
if ch != "" {
|
||||
s.Event.Channel = ch
|
||||
}
|
||||
|
||||
if s.Event.Bot.Name != "" {
|
||||
if len(s.Event.Attachments) == 0 {
|
||||
return Message{}, ErrIrrelevantMessage
|
||||
} else if !strings.Contains(s.Event.Attachments[0].Title, ": Firing: ") {
|
||||
return Message{}, ErrIrrelevantMessage
|
||||
}
|
||||
var tagsField string
|
||||
for _, field := range s.Event.Attachments[0].Fields {
|
||||
if field.Title == "Tags" {
|
||||
tagsField = field.Value
|
||||
}
|
||||
}
|
||||
return Message{
|
||||
ID: fmt.Sprintf("%s/%v", s.Event.ID, s.TS),
|
||||
TS: s.TS,
|
||||
Source: fmt.Sprintf(`https://renderinc.slack.com/archives/%s/p%s`, s.Event.Channel, strings.ReplaceAll(s.Event.ID, ".", "")),
|
||||
Channel: s.Event.Channel,
|
||||
Thread: s.Event.ID,
|
||||
EventName: strings.Split(s.Event.Attachments[0].Title, ": Firing: ")[1],
|
||||
Event: strings.TrimPrefix(strings.Split(s.Event.Attachments[0].Title, ":")[0], "#"),
|
||||
Plaintext: s.Event.Attachments[0].Text,
|
||||
Asset: s.Event.Attachments[0].Text,
|
||||
Resolved: !strings.HasPrefix(s.Event.Attachments[0].Color, "F"),
|
||||
Datacenter: tagsField,
|
||||
}, nil
|
||||
}
|
||||
|
||||
if s.Event.ParentID == "" {
|
||||
return Message{}, ErrIrrelevantMessage
|
||||
}
|
||||
return Message{
|
||||
ID: fmt.Sprintf("%s/%v", s.Event.ParentID, s.TS),
|
||||
TS: s.TS,
|
||||
Source: fmt.Sprintf(`https://renderinc.slack.com/archives/%s/p%s`, s.Event.Channel, strings.ReplaceAll(s.Event.ParentID, ".", "")),
|
||||
Channel: s.Event.Channel,
|
||||
Thread: s.Event.ParentID,
|
||||
EventName: "",
|
||||
Event: "",
|
||||
Plaintext: s.Event.Text,
|
||||
Asset: "",
|
||||
Datacenter: "",
|
||||
}, nil
|
||||
}
|
||||
|
||||
func _parseSlackJSON(b []byte) (slackMessage, error) {
|
||||
var result slackMessage
|
||||
err := json.Unmarshal(b, &result)
|
||||
switch result.Type {
|
||||
case "message":
|
||||
result.Event = result.slackEvent
|
||||
result.TS, _ = strconv.ParseUint(strings.Split(result.MessageTS, ".")[0], 10, 64)
|
||||
result.Event.ID = result.MessageTS
|
||||
}
|
||||
if result.Event.Nested != nil && !result.Event.Nested.Empty() {
|
||||
result.Event.Blocks = result.Event.Nested.Blocks
|
||||
result.Event.Bot = result.Event.Nested.Bot
|
||||
result.Event.Attachments = result.Event.Nested.Attachments
|
||||
result.Event.Nested = nil
|
||||
}
|
||||
if result.Event.PreviousMessage != nil {
|
||||
if result.Event.PreviousMessage.ID != "" {
|
||||
result.Event.ID = result.Event.PreviousMessage.ID
|
||||
}
|
||||
result.Event.PreviousMessage = nil
|
||||
}
|
||||
return result, err
|
||||
}
|
||||
|
||||
func (this slackEvent) Empty() bool {
|
||||
return fmt.Sprintf("%+v", this) == fmt.Sprintf("%+v", slackEvent{})
|
||||
}
|
||||
148
.report.go
Normal file
148
.report.go
Normal file
@@ -0,0 +1,148 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"slices"
|
||||
"sort"
|
||||
"text/template"
|
||||
"time"
|
||||
)
|
||||
|
||||
//go:embed report.tmpl
|
||||
var reportTMPL string
|
||||
|
||||
func ReportSince(ctx context.Context, w io.Writer, s Storage, t time.Time) error {
|
||||
tmpl := template.New("report").Funcs(map[string]any{
|
||||
"time": func(foo string, args ...any) (any, error) {
|
||||
switch foo {
|
||||
case "Unix":
|
||||
seconds, _ := args[0].(uint64)
|
||||
return time.Unix(int64(seconds), 0), nil
|
||||
case "Time.Format":
|
||||
t, _ := args[1].(time.Time)
|
||||
return t.Format(args[0].(string)), nil
|
||||
}
|
||||
return nil, errors.New("not impl")
|
||||
},
|
||||
"json": func(foo string, args ...any) (any, error) {
|
||||
switch foo {
|
||||
case "Marshal":
|
||||
b, err := json.Marshal(args[0])
|
||||
return string(b), err
|
||||
}
|
||||
return nil, errors.New("not impl")
|
||||
},
|
||||
})
|
||||
tmpl, err := tmpl.Parse(reportTMPL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
messages, err := s.MessagesSince(ctx, t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
eventNames, err := s.EventNamesSince(ctx, t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
eventIDs, err := s.EventsSince(ctx, t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
type aThread struct {
|
||||
Thread string
|
||||
Messages []Message
|
||||
First Message
|
||||
Last Message
|
||||
}
|
||||
type anEvent struct {
|
||||
Event string
|
||||
Threads []aThread
|
||||
First Message
|
||||
Last Message
|
||||
}
|
||||
type someEvents struct {
|
||||
Events []anEvent
|
||||
}
|
||||
|
||||
return tmpl.Execute(w, map[string]any{
|
||||
"since": t.Format("2006-01-02"),
|
||||
"events": func() someEvents {
|
||||
events := make([]anEvent, len(eventIDs))
|
||||
for i, event := range eventIDs {
|
||||
events[i] = func() anEvent {
|
||||
threadNames := []string{}
|
||||
for _, m := range messages {
|
||||
if m.Event == event {
|
||||
threadNames = append(threadNames, m.Thread)
|
||||
}
|
||||
}
|
||||
slices.Sort(threadNames)
|
||||
slices.Compact(threadNames)
|
||||
threads := make([]aThread, len(threadNames))
|
||||
for i, thread := range threadNames {
|
||||
threads[i] = func() aThread {
|
||||
someMessages := []Message{}
|
||||
for _, m := range messages {
|
||||
if m.Thread == thread {
|
||||
someMessages = append(someMessages, m)
|
||||
}
|
||||
}
|
||||
sort.Slice(someMessages, func(i, j int) bool {
|
||||
return someMessages[i].TS < someMessages[j].TS
|
||||
})
|
||||
return aThread{
|
||||
Thread: thread,
|
||||
Messages: someMessages,
|
||||
First: func() Message {
|
||||
if len(someMessages) == 0 {
|
||||
return Message{}
|
||||
}
|
||||
return someMessages[0]
|
||||
}(),
|
||||
Last: func() Message {
|
||||
if len(someMessages) == 0 {
|
||||
return Message{}
|
||||
}
|
||||
return someMessages[len(someMessages)-1]
|
||||
}(),
|
||||
}
|
||||
}()
|
||||
}
|
||||
sort.Slice(threads, func(i, j int) bool {
|
||||
return threads[i].First.TS < threads[j].First.TS
|
||||
})
|
||||
return anEvent{
|
||||
Event: event,
|
||||
Threads: threads,
|
||||
First: func() Message {
|
||||
if len(threads) == 0 {
|
||||
return Message{}
|
||||
}
|
||||
return threads[0].First
|
||||
}(),
|
||||
Last: func() Message {
|
||||
if len(threads) == 0 {
|
||||
return Message{}
|
||||
}
|
||||
return threads[len(threads)-1].Last
|
||||
}(),
|
||||
}
|
||||
}()
|
||||
}
|
||||
return someEvents{
|
||||
Events: events,
|
||||
}
|
||||
}(),
|
||||
"messages": messages,
|
||||
"eventNames": eventNames,
|
||||
})
|
||||
}
|
||||
227
.report.tmpl
Normal file
227
.report.tmpl
Normal file
@@ -0,0 +1,227 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/water.css@2/out/water.css">
|
||||
<script src="https://code.highcharts.com/10/highcharts.js"></script>
|
||||
<script type="module">
|
||||
const allMessages = {{ json "Marshal" .messages }};
|
||||
|
||||
function fillForm() {
|
||||
const filterableFields = [
|
||||
"Asset",
|
||||
"Channel",
|
||||
"Event",
|
||||
"EventName",
|
||||
"Resolved",
|
||||
"Thread",
|
||||
];
|
||||
const fieldsToOptions = {};
|
||||
filterableFields.map((field) => {fieldsToOptions[field] = {}});
|
||||
allMessages.map((message) => {
|
||||
Object.keys(fieldsToOptions).map((field) => {fieldsToOptions[field][message[field]] = true});
|
||||
});
|
||||
Object.keys(fieldsToOptions).map((field) => {fieldsToOptions[field] = Object.keys(fieldsToOptions[field]); fieldsToOptions[field].sort();});
|
||||
|
||||
document.getElementById("form").innerHTML = Object.keys(fieldsToOptions).map((field) => {
|
||||
return `
|
||||
<label for="${field}">${field}</label>
|
||||
<select name="${field}" multiple ${fieldsToOptions[field].length > 10 ? "size=10" : `size=${fieldsToOptions[field].length}`}>
|
||||
${fieldsToOptions[field].map((option) => `
|
||||
<option selected>${option}</option>
|
||||
`)}
|
||||
</select>
|
||||
`
|
||||
}).join("\n");
|
||||
}
|
||||
window.fillForm = fillForm;
|
||||
|
||||
function drawAll() {
|
||||
const messages = filterMessages(allMessages)
|
||||
|
||||
dumpEvents(messages);
|
||||
drawEventVolume(messages)
|
||||
drawEventVolumeByHour(messages)
|
||||
drawEventVolumeByAsset(messages)
|
||||
}
|
||||
window.drawAll = drawAll;
|
||||
|
||||
function dumpEvents(messages) {
|
||||
const eventToThreads = {};
|
||||
for(var m of messages) {
|
||||
if (!eventToThreads[m.Event])
|
||||
eventToThreads[m.Event] = [];
|
||||
eventToThreads[m.Event].push(m.Thread);
|
||||
}
|
||||
|
||||
const threadToMessages = {};
|
||||
for(var m of messages) {
|
||||
if (!threadToMessages[m.Thread])
|
||||
threadToMessages[m.Thread] = [];
|
||||
threadToMessages[m.Thread].push(m);
|
||||
}
|
||||
|
||||
const eventToMessages = {};
|
||||
for(var e in eventToThreads) {
|
||||
if (!eventToMessages[e])
|
||||
eventToMessages[e] = [];
|
||||
for (var thread of eventToThreads[e])
|
||||
eventToMessages[e] = eventToMessages[e].concat(threadToMessages[thread]);
|
||||
}
|
||||
for(var e in eventToMessages)
|
||||
eventToMessages[e].sort((a, b) => a.TS - b.TS);
|
||||
var events = Object.keys(eventToMessages);
|
||||
events.sort();
|
||||
events.reverse();
|
||||
|
||||
var keys = ["TS", "Event", "EventName", "Latest"];
|
||||
document.getElementById("events").innerHTML = `
|
||||
<tr>
|
||||
<th>TS</th>
|
||||
<th>Event</th>
|
||||
<th>EventName</th>
|
||||
<th>Latest</th>
|
||||
</tr>
|
||||
${events.map((e) => `
|
||||
<tr>
|
||||
<td><a href="${eventToMessages[e][0].Source}">${new Date(eventToMessages[e][0].TS * 1000).toDateString()}</a></td>
|
||||
<td><a href="${eventToMessages[e][0].Source}">${eventToMessages[e][0].Event}</a></td>
|
||||
<td>${eventToMessages[e][0].EventName}</td>
|
||||
<td><a href="${eventToMessages[e].at(-1).Source}">${eventToMessages[e].at(-1).Plaintext}</a></td>
|
||||
</tr>
|
||||
`).join("")}
|
||||
`;
|
||||
}
|
||||
|
||||
function filterMessages(messages) {
|
||||
const selects = document.getElementById("form").getElementsByTagName("select");
|
||||
const fieldsToOptions = {};
|
||||
for(var select of selects) {
|
||||
fieldsToOptions[select.name] = [];
|
||||
for(var option of select.getElementsByTagName("option"))
|
||||
if (option.selected)
|
||||
fieldsToOptions[select.name].push(option.innerHTML);
|
||||
}
|
||||
return messages.map((m) => {
|
||||
for(var k in fieldsToOptions) {
|
||||
if (fieldsToOptions[k].filter((v) => `${v}` == `${m[k]}`).length == 0) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
return m;
|
||||
}).filter((m) => { return m != null });
|
||||
}
|
||||
|
||||
function drawEventVolume(messages) {
|
||||
drawEventVolumeWith(
|
||||
messages,
|
||||
"eventVolume",
|
||||
(ts) => new Date(1000 * ts).
|
||||
toLocaleDateString('en-US', {month: 'numeric', day: 'numeric', weekday: 'short'}),
|
||||
(m) => m.EventName,
|
||||
);
|
||||
}
|
||||
|
||||
function drawEventVolumeWith(messages, documentId, kify, nameify) {
|
||||
const points = [];
|
||||
messages.forEach((m) => {
|
||||
points.push({x: m.TS, name: nameify(m)});
|
||||
});
|
||||
|
||||
var xs = points.map((point) => point.x);
|
||||
if (xs && !isNaN(parseFloat(kify(xs[0])))) {
|
||||
xs = xs.map(kify);
|
||||
xs.sort((a, b) => parseFloat(a) - parseFloat(b));
|
||||
} else {
|
||||
xs.sort();
|
||||
xs = xs.map(kify);
|
||||
}
|
||||
xs = [...new Set(xs)];
|
||||
|
||||
const names = [...new Set(points.map((p) => p.name))];
|
||||
const nameAndData = names.map((name) => {
|
||||
return {
|
||||
name: name,
|
||||
data: xs.map((x) => points.filter((p) => { return p.name == name && kify(p.x) == x }).length),
|
||||
}
|
||||
});
|
||||
draw(documentId, xs, nameAndData);
|
||||
}
|
||||
|
||||
function drawEventVolumeByHour(messages) {
|
||||
drawEventVolumeWith(
|
||||
messages,
|
||||
"eventVolumeByHour",
|
||||
(ts) => new Date(1000 * ts).getHours(),
|
||||
(m) => m.EventName,
|
||||
);
|
||||
}
|
||||
|
||||
function drawEventVolumeByAsset(messages) {}
|
||||
|
||||
function draw(documentId, xs, nameAndData) {
|
||||
document.getElementById(documentId).innerHTML = "";
|
||||
Highcharts.chart(documentId, {
|
||||
chart: { type: 'column' },
|
||||
title: { text: '' },
|
||||
xAxis: { categories: xs },
|
||||
yAxis: { allowDecimals: false, title: { text: '' } },
|
||||
//legend: { enabled: false },
|
||||
series: nameAndData,
|
||||
plotOptions: { column: { stacking: 'normal' } },
|
||||
});
|
||||
}
|
||||
</script>
|
||||
<style>
|
||||
rows {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
flex-grow: 1;
|
||||
}
|
||||
columns {
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
flex-grow: 1;
|
||||
}
|
||||
rows, columns { border: 1px solid red; }
|
||||
</style>
|
||||
</head>
|
||||
<body onload="fillForm(); drawAll();" style="max-width: inherit;">
|
||||
<h1>Report</h1>
|
||||
<columns>
|
||||
<form style="width: 16em; flex-shrink: 0;" onsubmit="drawAll(); return false;">
|
||||
<columns>
|
||||
<button type="submit">Apply</button>
|
||||
</columns>
|
||||
<rows id="form"></rows>
|
||||
</form>
|
||||
<rows>
|
||||
<rows>
|
||||
<rows>
|
||||
<h2>Event Volume</h2>
|
||||
<div id="eventVolume"></div>
|
||||
</rows>
|
||||
<columns>
|
||||
<rows>
|
||||
<h3>by Hour</h3>
|
||||
<div id="eventVolumeByHour"></div>
|
||||
</rows>
|
||||
</columns>
|
||||
<rows>
|
||||
<h3>by Asset</h3>
|
||||
<div>DRAW ME</div>
|
||||
</rows>
|
||||
</rows>
|
||||
<rows>
|
||||
<div>
|
||||
<h2>Events</h2>
|
||||
<table id="events">
|
||||
</table>
|
||||
</div>
|
||||
</rows>
|
||||
</rows>
|
||||
</columns>
|
||||
</body>
|
||||
<footer>
|
||||
</footer>
|
||||
</html>
|
||||
32
.report_test.go
Normal file
32
.report_test.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestReport(t *testing.T) {
|
||||
ctx, can := context.WithTimeout(context.Background(), time.Minute)
|
||||
defer can()
|
||||
|
||||
w := bytes.NewBuffer(nil)
|
||||
|
||||
db := NewRAM()
|
||||
FillWithTestdata(ctx, db, renderAssetPattern, renderDatacenterPattern, renderEventNamePattern)
|
||||
s := NewStorage(db)
|
||||
|
||||
if err := ReportSince(ctx, w, s, time.Now().Add(-1*time.Hour*24*365*20)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p := path.Join(os.TempDir(), "test_report.html")
|
||||
if env := os.Getenv("TEST_REPORT_PATH"); env != "" {
|
||||
p = env
|
||||
}
|
||||
os.WriteFile(p, w.Bytes(), os.ModePerm)
|
||||
t.Log(p)
|
||||
}
|
||||
33
README.md
33
README.md
@@ -1,3 +1,36 @@
|
||||
# Spoc Bot v. Render
|
||||
|
||||
Thank you, [Sean](https://www.linkedin.com/in/sean-moore-1755a619/)
|
||||
|
||||
## TODO
|
||||
|
||||
- what SLO/SLI can I help benoit with
|
||||
- scott; like to keep state in incident.io and zendesk
|
||||
- @spoc -ignore, @spoc -s summary
|
||||
- limit queue retries
|
||||
|
||||
```
|
||||
erDiagram
|
||||
%% thread event eventName
|
||||
EVENT ||--|{ THREAD: "spawns"
|
||||
THREAD ||--|{ MESSAGE: "populated by"
|
||||
|
||||
MESSAGE {
|
||||
ID str
|
||||
URL str
|
||||
TS number
|
||||
Plaintext str
|
||||
}
|
||||
THREAD {
|
||||
ID str
|
||||
URL str
|
||||
Channel str
|
||||
}
|
||||
EVENT {
|
||||
ID str
|
||||
Name str
|
||||
Asset str
|
||||
Resolved bool
|
||||
Datacenter str
|
||||
}
|
||||
```
|
||||
|
||||
56
ai.go
Normal file
56
ai.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/tmc/langchaingo/llms"
|
||||
"github.com/tmc/langchaingo/llms/ollama"
|
||||
)
|
||||
|
||||
type AI interface {
|
||||
Do(context.Context, string) (string, error)
|
||||
}
|
||||
|
||||
type AINoop struct {
|
||||
}
|
||||
|
||||
func NewAINoop() AINoop {
|
||||
return AINoop{}
|
||||
}
|
||||
|
||||
func (ai AINoop) Do(ctx context.Context, prompt string) (string, error) {
|
||||
return ":shrug:", nil
|
||||
}
|
||||
|
||||
type AIOllama struct {
|
||||
model string
|
||||
url string
|
||||
}
|
||||
|
||||
func NewAIOllama(url, model string) AIOllama {
|
||||
return AIOllama{url: url, model: model}
|
||||
}
|
||||
|
||||
func (ai AIOllama) Do(ctx context.Context, prompt string) (string, error) {
|
||||
c := &http.Client{
|
||||
Timeout: time.Hour,
|
||||
Transport: &http.Transport{
|
||||
//DisableKeepAlives: true,
|
||||
IdleConnTimeout: time.Hour,
|
||||
ResponseHeaderTimeout: time.Hour,
|
||||
ExpectContinueTimeout: time.Hour,
|
||||
},
|
||||
}
|
||||
defer c.CloseIdleConnections()
|
||||
llm, err := ollama.New(
|
||||
ollama.WithModel(ai.model),
|
||||
ollama.WithServerURL(ai.url),
|
||||
ollama.WithHTTPClient(c),
|
||||
)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return llms.GenerateFromSinglePrompt(ctx, llm, prompt)
|
||||
}
|
||||
68
ai_test.go
Normal file
68
ai_test.go
Normal file
@@ -0,0 +1,68 @@
|
||||
//go:build ai
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestAINoop(t *testing.T) {
|
||||
t.Parallel()
|
||||
ai := NewAINoop()
|
||||
|
||||
testAI(t, ai)
|
||||
}
|
||||
|
||||
func TestAIOllama(t *testing.T) {
|
||||
t.Parallel()
|
||||
ai := NewAIOllama("http://localhost:11434", "llama3")
|
||||
|
||||
testAI(t, ai)
|
||||
}
|
||||
|
||||
func testAI(t *testing.T, ai AI) {
|
||||
ctx, can := context.WithTimeout(context.Background(), time.Minute)
|
||||
defer can()
|
||||
|
||||
t.Run("mvp", func(t *testing.T) {
|
||||
if result, err := ai.Do(ctx, "Tell me a fun fact."); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if len(result) < 3 {
|
||||
t.Error(result)
|
||||
} else {
|
||||
t.Logf("%s", result)
|
||||
}
|
||||
})
|
||||
|
||||
/*
|
||||
t.Run("simulation", func(t *testing.T) {
|
||||
d := NewRAM()
|
||||
FillWithTestdata(ctx, d, renderAssetPattern, renderDatacenterPattern, renderEventNamePattern)
|
||||
s := NewStorage(d)
|
||||
|
||||
threads, err := s.Threads(ctx)
|
||||
if err != nil || len(threads) < 1 {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
thread, err := s.Thread(ctx, threads[0])
|
||||
if err != nil || len(thread) < 1 {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
input := fmt.Sprintf(`
|
||||
Summarize the following forum converstion.
|
||||
---
|
||||
%s
|
||||
`, thread[0].Plaintext)
|
||||
t.Logf("\n\t%s", input)
|
||||
result, err := ai.Do(ctx, input)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Logf("\n\t%s\n->\n\t%s", input, result)
|
||||
})
|
||||
*/
|
||||
}
|
||||
115
config.go
115
config.go
@@ -1,30 +1,63 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Port int
|
||||
Debug bool
|
||||
InitializeSlack bool
|
||||
SlackToken string
|
||||
SlackChannels string
|
||||
Port int
|
||||
Debug bool
|
||||
InitializeSlack bool
|
||||
SlackToken string
|
||||
SlackChannels []string
|
||||
DriverConn string
|
||||
BasicAuthUser string
|
||||
BasicAuthPassword string
|
||||
FillWithTestdata bool
|
||||
OllamaUrl string
|
||||
OllamaModel string
|
||||
RecapPromptIntro string
|
||||
RecapPrompt string
|
||||
AssetPattern string
|
||||
DatacenterPattern string
|
||||
EventNamePattern string
|
||||
driver Driver
|
||||
storage Storage
|
||||
ai AI
|
||||
slackToModelPipeline Pipeline
|
||||
slackScrapePipeline Pipeline
|
||||
modelToPersistencePipeline Pipeline
|
||||
persistenceToRecapPipeline Pipeline
|
||||
}
|
||||
|
||||
func newConfig() (Config, error) {
|
||||
return newConfigFromEnv(os.Getenv)
|
||||
var (
|
||||
renderAssetPattern = `(dpg|svc|red)-[a-z0-9-]*[a-z0-9]|ip-[0-9]+-[0-9]+-[0-9]+-[0-9]+\.[a-z]+-[a-z]+-[0-9]+\.compute\.internal`
|
||||
renderDatacenterPattern = `[a-z]{4}[a-z]*-[0-9]`
|
||||
renderEventNamePattern = `(\[[^\]]*\] *)?(?P<result>.*)`
|
||||
)
|
||||
|
||||
func newConfig(ctx context.Context) (Config, error) {
|
||||
return newConfigFromEnv(ctx, os.Getenv)
|
||||
}
|
||||
|
||||
func newConfigFromEnv(getEnv func(string) string) (Config, error) {
|
||||
func newConfigFromEnv(ctx context.Context, getEnv func(string) string) (Config, error) {
|
||||
def := Config{
|
||||
Port: 8080,
|
||||
Port: 38080,
|
||||
OllamaModel: "llama3",
|
||||
AssetPattern: renderAssetPattern,
|
||||
DatacenterPattern: renderDatacenterPattern,
|
||||
EventNamePattern: renderEventNamePattern,
|
||||
RecapPromptIntro: "A Slack thread began with the following original post.",
|
||||
RecapPrompt: "What is the summary of the responses to the Slack thread consisting of the following messages? Limit the summary to one sentence. Do not include any leading text. Be as brief as possible. No context is needed.",
|
||||
}
|
||||
|
||||
var m map[string]any
|
||||
@@ -38,8 +71,8 @@ func newConfigFromEnv(getEnv func(string) string) (Config, error) {
|
||||
for k, v := range m {
|
||||
envK := k
|
||||
idxes := re.FindAllIndex([]byte(envK), -1)
|
||||
slices.Reverse(idxes)
|
||||
for _, idx := range idxes {
|
||||
for i := len(idxes) - 1; i >= 0; i-- {
|
||||
idx := idxes[i]
|
||||
if idx[0] > 0 {
|
||||
envK = fmt.Sprintf("%s_%s", envK[:idx[0]], envK[idx[0]:])
|
||||
}
|
||||
@@ -64,6 +97,10 @@ func newConfigFromEnv(getEnv func(string) string) (Config, error) {
|
||||
return Config{}, err
|
||||
}
|
||||
m[k] = got
|
||||
case nil, []interface{}:
|
||||
m[k] = strings.Split(s, ",")
|
||||
default:
|
||||
return Config{}, fmt.Errorf("not impl: parse %s as %T", envK, v)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,5 +110,59 @@ func newConfigFromEnv(getEnv func(string) string) (Config, error) {
|
||||
} else if err := json.Unmarshal(b, &result); err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
|
||||
ctx, can := context.WithTimeout(ctx, time.Minute)
|
||||
defer can()
|
||||
|
||||
driver, err := NewDriver(ctx, result.DriverConn)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
result.driver = driver
|
||||
if !result.FillWithTestdata {
|
||||
//} else if err := result.driver.FillWithTestdata(ctx, result.AssetPattern, result.DatacenterPattern, result.EventNamePattern); err != nil {
|
||||
} else {
|
||||
return Config{}, errors.New("not impl")
|
||||
}
|
||||
if result.Debug {
|
||||
log.Printf("connected to driver at %s (%s @%s)", result.DriverConn, result.driver.engine, result.driver.conn)
|
||||
}
|
||||
|
||||
storage, err := NewStorage(ctx, result.driver)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
result.storage = storage
|
||||
|
||||
if result.OllamaUrl != "" {
|
||||
result.ai = NewAIOllama(result.OllamaUrl, result.OllamaModel)
|
||||
} else {
|
||||
result.ai = NewAINoop()
|
||||
}
|
||||
|
||||
slackToModelPipeline, err := NewSlackToModelPipeline(ctx, result)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
result.slackToModelPipeline = slackToModelPipeline
|
||||
|
||||
modelToPersistencePipeline, err := NewModelToPersistencePipeline(ctx, result)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
result.modelToPersistencePipeline = modelToPersistencePipeline
|
||||
|
||||
slackScrapePipeline, err := NewSlackScrapePipeline(ctx, result)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
result.slackScrapePipeline = slackScrapePipeline
|
||||
|
||||
persistenceToRecapPipeline, err := NewPersistenceToRecapPipeline(ctx, result)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
result.persistenceToRecapPipeline = persistenceToRecapPipeline
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@@ -1,15 +1,21 @@
|
||||
package main
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewConfig(t *testing.T) {
|
||||
if got, err := newConfigFromEnv(func(k string) string {
|
||||
t.Parallel()
|
||||
if got, err := newConfigFromEnv(context.Background(), func(k string) string {
|
||||
t.Logf("getenv(%s)", k)
|
||||
switch k {
|
||||
case "PORT":
|
||||
return "1"
|
||||
case "INITIALIZE_SLACK":
|
||||
return "true"
|
||||
case "SLACK_CHANNELS":
|
||||
return "x,y"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
@@ -19,5 +25,7 @@ func TestNewConfig(t *testing.T) {
|
||||
t.Error(got)
|
||||
} else if !got.InitializeSlack {
|
||||
t.Error(got)
|
||||
} else if len(got.SlackChannels) != 2 || got.SlackChannels[0] != "x" || got.SlackChannels[1] != "y" {
|
||||
t.Error(got)
|
||||
}
|
||||
}
|
||||
|
||||
156
driver.go
156
driver.go
@@ -2,93 +2,111 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"time"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"go.etcd.io/bbolt"
|
||||
_ "github.com/glebarez/go-sqlite"
|
||||
_ "github.com/lib/pq"
|
||||
)
|
||||
|
||||
type Driver interface {
|
||||
Close() error
|
||||
ForEach(context.Context, string, func(string, []byte) error) error
|
||||
Get(context.Context, string, string) ([]byte, error)
|
||||
Set(context.Context, string, string, []byte) error
|
||||
type Driver struct {
|
||||
engine string
|
||||
conn string
|
||||
*sql.DB
|
||||
}
|
||||
|
||||
type BBolt struct {
|
||||
db *bbolt.DB
|
||||
}
|
||||
|
||||
func NewTestDB() BBolt {
|
||||
d, err := ioutil.TempDir(os.TempDir(), "test-db-*")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
func NewTestDriver(t *testing.T, optionalP ...string) Driver {
|
||||
p := path.Join(t.TempDir(), "db")
|
||||
if len(optionalP) > 0 {
|
||||
p = optionalP[0]
|
||||
}
|
||||
db, err := NewDB(d)
|
||||
driver, err := NewDriver(context.Background(), p)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
return db
|
||||
t.Cleanup(func() { driver.Close() })
|
||||
return driver
|
||||
}
|
||||
|
||||
func NewDB(p string) (BBolt, error) {
|
||||
db, err := bbolt.Open(p, 0600, &bbolt.Options{
|
||||
Timeout: time.Second,
|
||||
})
|
||||
return BBolt{db: db}, err
|
||||
}
|
||||
|
||||
func (bb BBolt) Close() error {
|
||||
return bb.db.Close()
|
||||
}
|
||||
|
||||
func (bb BBolt) ForEach(ctx context.Context, db string, cb func(string, []byte) error) error {
|
||||
return bb.db.View(func(tx *bbolt.Tx) error {
|
||||
bkt := tx.Bucket([]byte(db))
|
||||
if bkt == nil {
|
||||
return nil
|
||||
func NewDriver(ctx context.Context, conn string) (Driver, error) {
|
||||
engine := "sqlite"
|
||||
if conn == "" {
|
||||
f, err := os.CreateTemp(os.TempDir(), "spoc-bot-vr-undef-*.db")
|
||||
if err != nil {
|
||||
return Driver{}, err
|
||||
}
|
||||
|
||||
c := bkt.Cursor()
|
||||
for k, v := c.First(); k != nil && ctx.Err() == nil; k, v = c.Next() {
|
||||
if err := cb(string(k), v); err != nil {
|
||||
return err
|
||||
}
|
||||
f.Close()
|
||||
conn = f.Name()
|
||||
} else {
|
||||
if u, err := url.Parse(conn); err != nil {
|
||||
return Driver{}, err
|
||||
} else if u.Scheme != "" {
|
||||
engine = u.Scheme
|
||||
}
|
||||
}
|
||||
|
||||
return ctx.Err()
|
||||
})
|
||||
db, err := sql.Open(engine, conn)
|
||||
if err != nil {
|
||||
return Driver{}, err
|
||||
}
|
||||
|
||||
driver := Driver{DB: db, conn: conn, engine: engine}
|
||||
if err := driver.setup(ctx); err != nil {
|
||||
driver.Close()
|
||||
return Driver{}, fmt.Errorf("failed setup: %w", err)
|
||||
}
|
||||
|
||||
return driver, nil
|
||||
}
|
||||
|
||||
func (bb BBolt) Get(_ context.Context, db, id string) ([]byte, error) {
|
||||
var b []byte
|
||||
err := bb.db.View(func(tx *bbolt.Tx) error {
|
||||
bkt := tx.Bucket([]byte(db))
|
||||
if bkt == nil {
|
||||
return nil
|
||||
/*
|
||||
func (driver Driver) FillWithTestdata(ctx context.Context, assetPattern, datacenterPattern, eventNamePattern string) error {
|
||||
d := "./testdata/slack_events"
|
||||
entries, err := os.ReadDir(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
b, err := os.ReadFile(path.Join(d, entry.Name()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m, err := ParseSlack(b, assetPattern, datacenterPattern, eventNamePattern)
|
||||
if errors.Is(err, ErrIrrelevantMessage) {
|
||||
continue
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := driver.Set(nil, "m", m.ID, m.Serialize()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
*/
|
||||
|
||||
b = bkt.Get([]byte(id))
|
||||
return nil
|
||||
})
|
||||
return b, err
|
||||
func (driver Driver) setup(ctx context.Context) error {
|
||||
_, err := driver.ExecContext(ctx, `
|
||||
DROP TABLE IF EXISTS spoc_bot_vr_q;
|
||||
DROP TABLE IF EXISTS spoc_bot_vr_messages;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func (bb BBolt) Set(_ context.Context, db, id string, value []byte) error {
|
||||
return bb.db.Update(func(tx *bbolt.Tx) error {
|
||||
bkt := tx.Bucket([]byte(db))
|
||||
if bkt == nil {
|
||||
var err error
|
||||
bkt, err = tx.CreateBucket([]byte(db))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if value == nil {
|
||||
return bkt.Delete([]byte(id))
|
||||
}
|
||||
return bkt.Put([]byte(id), value)
|
||||
})
|
||||
func (d Driver) table(s string) (string, error) {
|
||||
switch s {
|
||||
case "q":
|
||||
return "spoc_bot_vr_q", nil
|
||||
case "m":
|
||||
return "spoc_bot_vr_messages", nil
|
||||
}
|
||||
return "", errors.New("invalid table " + s)
|
||||
}
|
||||
|
||||
53
driver_integration_test.go
Normal file
53
driver_integration_test.go
Normal file
@@ -0,0 +1,53 @@
|
||||
//go:build integration
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/breel-render/spoc-bot-vr/model"
|
||||
)
|
||||
|
||||
func TestDriverIntegration(t *testing.T) {
|
||||
ctx, can := context.WithTimeout(context.Background(), time.Second*30)
|
||||
defer can()
|
||||
|
||||
driver, err := NewDriver(ctx, os.Getenv("DRIVER_CONN"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer driver.Close()
|
||||
|
||||
q, err := NewQueue(ctx, t.Name(), driver)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
qV := []byte("hello")
|
||||
if err := q.Enqueue(ctx, qV); err != nil {
|
||||
t.Error("q cannot enqueue:", err)
|
||||
} else if reservation, v, err := q.Syn(ctx); err != nil {
|
||||
t.Error("q cannot syn:", err)
|
||||
} else if string(v) != string(qV) {
|
||||
t.Error("q enqueued wrong:", string(v))
|
||||
} else if len(reservation) == 0 {
|
||||
t.Error("q didnt have reservation")
|
||||
} else if err := q.Ack(ctx, reservation); err != nil {
|
||||
t.Error("q cannot ack:", err)
|
||||
}
|
||||
|
||||
s, err := NewStorage(ctx, driver)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
evt := model.Event{ID: "x", Name: "y"}
|
||||
if err := s.UpsertEvent(ctx, evt); err != nil {
|
||||
t.Error("s cannot upsert:", err)
|
||||
} else if e, err := s.GetEvent(ctx, evt.ID); err != nil {
|
||||
t.Error("s cannot get:", err)
|
||||
} else if e != evt {
|
||||
t.Error("s upserted wrong:", e)
|
||||
}
|
||||
}
|
||||
24
driver_test.go
Normal file
24
driver_test.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestNewTestDriver(t *testing.T) {
|
||||
t.Parallel()
|
||||
NewTestDriver(t)
|
||||
}
|
||||
|
||||
func TestDriver(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx, can := context.WithTimeout(context.Background(), time.Second*15)
|
||||
defer can()
|
||||
|
||||
d, err := NewDriver(ctx, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer d.Close()
|
||||
}
|
||||
23
go.mod
23
go.mod
@@ -3,8 +3,25 @@ module github.com/breel-render/spoc-bot-vr
|
||||
go 1.22.1
|
||||
|
||||
require (
|
||||
github.com/go-errors/errors v1.5.1
|
||||
go.etcd.io/bbolt v1.3.9
|
||||
github.com/glebarez/go-sqlite v1.21.2
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/tmc/langchaingo v0.1.8
|
||||
golang.org/x/time v0.5.0
|
||||
gotest.tools v2.2.0+incompatible
|
||||
)
|
||||
|
||||
require golang.org/x/sys v0.4.0 // indirect
|
||||
require (
|
||||
github.com/dlclark/regexp2 v1.10.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pkoukk/tiktoken-go v0.1.6 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
golang.org/x/sys v0.16.0 // indirect
|
||||
modernc.org/libc v1.22.5 // indirect
|
||||
modernc.org/mathutil v1.5.0 // indirect
|
||||
modernc.org/memory v1.5.0 // indirect
|
||||
modernc.org/sqlite v1.23.1 // indirect
|
||||
)
|
||||
|
||||
54
go.sum
54
go.sum
@@ -1,6 +1,48 @@
|
||||
github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk=
|
||||
github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
|
||||
go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI=
|
||||
go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE=
|
||||
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
|
||||
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dlclark/regexp2 v1.10.0 h1:+/GIL799phkJqYW+3YbOd8LCcbHzT0Pbo8zl70MHsq0=
|
||||
github.com/dlclark/regexp2 v1.10.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/glebarez/go-sqlite v1.21.2 h1:3a6LFC4sKahUunAmynQKLZceZCOzUthkRkEAl9gAXWo=
|
||||
github.com/glebarez/go-sqlite v1.21.2/go.mod h1:sfxdZyhQjTM2Wry3gVYWaW072Ri1WMdWJi0k6+3382k=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ=
|
||||
github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkoukk/tiktoken-go v0.1.6 h1:JF0TlJzhTbrI30wCvFuiw6FzP2+/bR+FIxUdgEAcUsw=
|
||||
github.com/pkoukk/tiktoken-go v0.1.6/go.mod h1:9NiV+i9mJKGj1rYOT+njbv+ZwA/zJxYdewGl6qVatpg=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tmc/langchaingo v0.1.8 h1:nrImgh0aWdu3stJTHz80N60WGwPWY8HXCK10gQny7bA=
|
||||
github.com/tmc/langchaingo v0.1.8/go.mod h1:iNBfS9e6jxBKsJSPWnlqNhoVWgdA3D1g5cdFJjbIZNQ=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
modernc.org/libc v1.22.5 h1:91BNch/e5B0uPbJFgqbxXuOnxBQjlS//icfQEGmvyjE=
|
||||
modernc.org/libc v1.22.5/go.mod h1:jj+Z7dTNX8fBScMVNRAYZ/jF91K8fdT2hYMThc3YjBY=
|
||||
modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ=
|
||||
modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||
modernc.org/memory v1.5.0 h1:N+/8c5rE6EqugZwHii4IFsaJ7MUhoWX07J5tC/iI5Ds=
|
||||
modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
|
||||
modernc.org/sqlite v1.23.1 h1:nrSBg4aRQQwq59JpvGEQ15tNxoO5pX/kUjcRNwSAGQM=
|
||||
modernc.org/sqlite v1.23.1/go.mod h1:OrDj17Mggn6MhE+iPbBNf7RGKODDE9NFT0f3EwDzJqk=
|
||||
|
||||
252
main.go
252
main.go
@@ -10,19 +10,22 @@ import (
|
||||
"net"
|
||||
"net/http"
|
||||
"os/signal"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func main() {
|
||||
ctx, can := signal.NotifyContext(context.Background(), syscall.SIGINT)
|
||||
defer can()
|
||||
|
||||
cfg, err := newConfig()
|
||||
cfg, err := newConfig(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer cfg.driver.Close()
|
||||
|
||||
if err := run(ctx, cfg); err != nil && ctx.Err() == nil {
|
||||
panic(err)
|
||||
@@ -33,11 +36,35 @@ func run(ctx context.Context, cfg Config) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case err := <-processPipelines(ctx,
|
||||
cfg.slackToModelPipeline,
|
||||
cfg.modelToPersistencePipeline,
|
||||
cfg.slackScrapePipeline,
|
||||
cfg.persistenceToRecapPipeline,
|
||||
):
|
||||
return err
|
||||
case err := <-listenAndServe(ctx, cfg):
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func processPipelines(ctx context.Context, first Pipeline, pipelines ...Pipeline) chan error {
|
||||
ctx, can := context.WithCancel(ctx)
|
||||
|
||||
pipelines = append(pipelines, first)
|
||||
errs := make(chan error)
|
||||
for i := range pipelines {
|
||||
go func(i int) {
|
||||
defer can()
|
||||
select {
|
||||
case errs <- pipelines[i].Process(ctx):
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
func listenAndServe(ctx context.Context, cfg Config) chan error {
|
||||
s := http.Server{
|
||||
Addr: fmt.Sprintf(":%d", cfg.Port),
|
||||
@@ -50,6 +77,7 @@ func listenAndServe(ctx context.Context, cfg Config) chan error {
|
||||
errc := make(chan error)
|
||||
go func() {
|
||||
defer close(errc)
|
||||
log.Printf("listening on %s", s.Addr)
|
||||
errc <- s.ListenAndServe()
|
||||
}()
|
||||
|
||||
@@ -59,7 +87,10 @@ func listenAndServe(ctx context.Context, cfg Config) chan error {
|
||||
func newHandler(cfg Config) http.HandlerFunc {
|
||||
mux := http.NewServeMux()
|
||||
|
||||
mux.Handle("GET /api/v1/version", http.HandlerFunc(newHandlerGetAPIV1Version))
|
||||
mux.Handle("POST /api/v1/events/slack", http.HandlerFunc(newHandlerPostAPIV1EventsSlack(cfg)))
|
||||
mux.Handle("PUT /api/v1/rpc/scrapeslack", http.HandlerFunc(newHandlerPutAPIV1RPCScrapeSlack(cfg)))
|
||||
mux.Handle("GET /api/v1/rpc/recapevent", http.HandlerFunc(newHandlerGetAPIV1RPCRecapEvent(cfg)))
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if cfg.Debug {
|
||||
@@ -72,32 +103,107 @@ func newHandler(cfg Config) http.HandlerFunc {
|
||||
}
|
||||
}
|
||||
|
||||
var Version = "undef"
|
||||
|
||||
func newHandlerGetAPIV1Version(w http.ResponseWriter, _ *http.Request) {
|
||||
json.NewEncoder(w).Encode(map[string]any{"version": Version})
|
||||
}
|
||||
|
||||
func newHandlerGetAPIV1RPCRecapEvent(cfg Config) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if !basicAuth(cfg, w, r) {
|
||||
return
|
||||
}
|
||||
|
||||
event := r.URL.Query().Get("id")
|
||||
b, _ := json.Marshal(ModelIDs{Event: event})
|
||||
if err := cfg.persistenceToRecapPipeline.reader.Enqueue(r.Context(), b); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
json.NewEncoder(w).Encode(map[string]any{"event": event})
|
||||
}
|
||||
}
|
||||
|
||||
func newHandlerPutAPIV1RPCScrapeSlack(cfg Config) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if !basicAuth(cfg, w, r) {
|
||||
return
|
||||
}
|
||||
|
||||
since, err := parseSince(r.URL.Query().Get("since"))
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
job, _ := json.Marshal(SlackScrape{
|
||||
Latest: time.Now().Unix(),
|
||||
Oldest: since.Unix(),
|
||||
ThreadTS: "",
|
||||
Channel: r.Header.Get("slack-channel"),
|
||||
Token: r.Header.Get("slack-oauth-token"),
|
||||
})
|
||||
if err := cfg.slackScrapePipeline.reader.Enqueue(r.Context(), job); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func basicAuth(cfg Config, w http.ResponseWriter, r *http.Request) bool {
|
||||
if u, p, _ := r.BasicAuth(); u != cfg.BasicAuthUser || p != cfg.BasicAuthPassword {
|
||||
http.Error(w, "shoo", http.StatusForbidden)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func newHandlerPostAPIV1EventsSlack(cfg Config) http.HandlerFunc {
|
||||
if cfg.InitializeSlack {
|
||||
return handlerPostAPIV1EventsSlackInitialize
|
||||
return handlerPostAPIV1EventsSlackInitialize(cfg)
|
||||
}
|
||||
return _newHandlerPostAPIV1EventsSlack(cfg)
|
||||
}
|
||||
|
||||
func handlerPostAPIV1EventsSlackInitialize(w http.ResponseWriter, r *http.Request) {
|
||||
b, _ := io.ReadAll(r.Body)
|
||||
var challenge struct {
|
||||
Token string
|
||||
Challenge string
|
||||
Type string
|
||||
func handlerPostAPIV1EventsSlackInitialize(cfg Config) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
b, _ := io.ReadAll(r.Body)
|
||||
var challenge struct {
|
||||
Token string
|
||||
Challenge string
|
||||
Type string
|
||||
}
|
||||
if err := json.Unmarshal(b, &challenge); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
cfg.driver.ExecContext(r.Context(), `
|
||||
CREATE TABLE
|
||||
IF NOT EXISTS
|
||||
initialization (
|
||||
label TEXT,
|
||||
token TEXT,
|
||||
updated TIMESTAMP
|
||||
)
|
||||
`)
|
||||
if _, err := cfg.driver.ExecContext(r.Context(), `
|
||||
INSERT
|
||||
INTO initialization (label, token, updated)
|
||||
VALUES ('slack_events_webhook_token', $1, $2)
|
||||
`, challenge.Token, time.Now().UTC()); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
log.Println("stashed new slack initialization token", challenge.Token)
|
||||
encodeResponse(w, r, map[string]any{"challenge": challenge.Challenge})
|
||||
}
|
||||
if err := json.Unmarshal(b, &challenge); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(map[string]any{"challenge": challenge.Challenge})
|
||||
}
|
||||
|
||||
func _newHandlerPostAPIV1EventsSlack(cfg Config) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
b, _ := io.ReadAll(r.Body)
|
||||
r.Body = io.NopCloser(bytes.NewReader(b))
|
||||
body, _ := io.ReadAll(r.Body)
|
||||
r.Body = io.NopCloser(bytes.NewReader(body))
|
||||
|
||||
var allowList struct {
|
||||
Token string
|
||||
@@ -105,17 +211,121 @@ func _newHandlerPostAPIV1EventsSlack(cfg Config) http.HandlerFunc {
|
||||
Channel string
|
||||
}
|
||||
}
|
||||
if err := json.Unmarshal(b, &allowList); err != nil {
|
||||
if err := json.Unmarshal(body, &allowList); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
} else if allowList.Token != cfg.SlackToken {
|
||||
http.Error(w, "invalid .token", http.StatusForbidden)
|
||||
return
|
||||
} else if !slices.Contains(strings.Split(cfg.SlackChannels, ","), allowList.Event.Channel) {
|
||||
} else if !func() bool {
|
||||
for _, slackChannel := range cfg.SlackChannels {
|
||||
if slackChannel == allowList.Event.Channel {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}() {
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("slack event: %s", b)
|
||||
http.Error(w, "not impl", http.StatusNotImplemented)
|
||||
if err := cfg.slackToModelPipeline.reader.Enqueue(r.Context(), body); err != nil {
|
||||
log.Printf("failed to ingest: %v", err)
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
log.Printf("ingested")
|
||||
}
|
||||
}
|
||||
|
||||
func parseSince(s string) (time.Time, error) {
|
||||
if s == "" {
|
||||
return time.Unix(0, 0), nil
|
||||
}
|
||||
|
||||
if n, err := strconv.ParseInt(s, 10, 64); err != nil {
|
||||
} else {
|
||||
return time.Unix(n, 0), nil
|
||||
}
|
||||
|
||||
if t, err := time.Parse(time.RFC3339, s); err != nil {
|
||||
} else {
|
||||
return t, nil
|
||||
}
|
||||
|
||||
if t, err := time.Parse(time.RFC3339Nano, s); err != nil {
|
||||
} else {
|
||||
return t, nil
|
||||
}
|
||||
|
||||
if t, err := time.ParseInLocation(time.DateOnly, s, time.Local); err != nil {
|
||||
} else {
|
||||
return t, nil
|
||||
}
|
||||
|
||||
return time.Time{}, fmt.Errorf("failed to parse since=%q", s)
|
||||
}
|
||||
|
||||
func encodeResponse(w http.ResponseWriter, r *http.Request, v interface{}) error {
|
||||
if strings.Contains(r.Header.Get("Accept"), "text/csv") {
|
||||
return encodeCSVResponse(w, v)
|
||||
}
|
||||
if strings.Contains(r.Header.Get("Accept"), "text/tsv") {
|
||||
return encodeTSVResponse(w, v)
|
||||
}
|
||||
return encodeJSONResponse(w, v)
|
||||
}
|
||||
|
||||
func encodeJSONResponse(w http.ResponseWriter, v interface{}) error {
|
||||
return json.NewEncoder(w).Encode(v)
|
||||
}
|
||||
|
||||
func encodeTSVResponse(w http.ResponseWriter, v interface{}) error {
|
||||
return encodeSVResponse(w, v, "\t")
|
||||
}
|
||||
|
||||
func encodeCSVResponse(w http.ResponseWriter, v interface{}) error {
|
||||
return encodeSVResponse(w, v, ",")
|
||||
}
|
||||
|
||||
func encodeSVResponse(w http.ResponseWriter, v interface{}, delim string) error {
|
||||
b, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var data map[string][]map[string]json.RawMessage
|
||||
if err := json.Unmarshal(b, &data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var objects []map[string]json.RawMessage
|
||||
for k := range data {
|
||||
objects = data[k]
|
||||
}
|
||||
|
||||
fields := []string{}
|
||||
for i := range objects {
|
||||
for k := range objects[i] {
|
||||
b, _ := json.Marshal(k)
|
||||
fields = append(fields, string(b))
|
||||
}
|
||||
break
|
||||
}
|
||||
sort.Strings(fields)
|
||||
|
||||
w.Write([]byte(strings.Join(fields, delim)))
|
||||
w.Write([]byte("\n"))
|
||||
|
||||
for _, object := range objects {
|
||||
for j, field := range fields {
|
||||
json.Unmarshal([]byte(field), &field)
|
||||
if j > 0 {
|
||||
w.Write([]byte(delim))
|
||||
}
|
||||
w.Write(object[field])
|
||||
}
|
||||
w.Write([]byte("\n"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
130
main_test.go
Normal file
130
main_test.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestRun(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx, can := context.WithTimeout(context.Background(), time.Second*10)
|
||||
defer can()
|
||||
|
||||
port := func() int {
|
||||
s := httptest.NewServer(http.HandlerFunc(http.NotFound))
|
||||
s.Close()
|
||||
u, err := url.Parse(s.URL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
portS := strings.Split(u.Host, ":")[1]
|
||||
port, err := strconv.ParseInt(portS, 10, 32)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return int(port)
|
||||
}()
|
||||
u := fmt.Sprintf("http://localhost:%d", port)
|
||||
var err error
|
||||
|
||||
cfg := Config{}
|
||||
cfg.DatacenterPattern = renderDatacenterPattern
|
||||
cfg.AssetPattern = renderAssetPattern
|
||||
cfg.EventNamePattern = renderEventNamePattern
|
||||
cfg.Port = port
|
||||
cfg.driver = NewTestDriver(t)
|
||||
cfg.storage, _ = NewStorage(ctx, cfg.driver)
|
||||
cfg.ai = NewAINoop()
|
||||
cfg.SlackToken = "redacted"
|
||||
cfg.SlackChannels = []string{"C06U1DDBBU4"}
|
||||
cfg.slackToModelPipeline, _ = NewSlackToModelPipeline(ctx, cfg)
|
||||
cfg.slackScrapePipeline, _ = NewSlackScrapePipeline(ctx, cfg)
|
||||
cfg.modelToPersistencePipeline, _ = NewModelToPersistencePipeline(ctx, cfg)
|
||||
cfg.persistenceToRecapPipeline, err = NewPersistenceToRecapPipeline(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err := run(ctx, cfg); err != nil && ctx.Err() == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
if resp, err := http.Get(u); err == nil {
|
||||
resp.Body.Close()
|
||||
break
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Fatal(ctx.Err())
|
||||
case <-time.After(time.Millisecond * 50):
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("POST /api/v1/events/slack", func(t *testing.T) {
|
||||
b, err := os.ReadFile(path.Join("testdata", "slack_events", "opsgenie_alert_3.json"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp, err := http.Post(fmt.Sprintf("%s/api/v1/events/slack", u), "application/json", bytes.NewReader(b))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
b, _ := io.ReadAll(resp.Body)
|
||||
t.Fatalf("(%d) %s", resp.StatusCode, b)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("GET /api/v1/rpc/recapevent", func(t *testing.T) {
|
||||
b, err := os.ReadFile(path.Join("testdata", "slack_events", "human_thread_message_from_opsgenie_alert.json"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cfg.slackToModelPipeline.reader.Enqueue(ctx, b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b, err = os.ReadFile(path.Join("testdata", "slack_events", "opsgenie_alert.json"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cfg.slackToModelPipeline.reader.Enqueue(ctx, b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for ctx.Err() == nil {
|
||||
if thread, _ := cfg.storage.GetThread(ctx, "1712927439.728409"); thread.Recap != "" {
|
||||
break
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-time.After(time.Millisecond * 100):
|
||||
}
|
||||
}
|
||||
if err := ctx.Err(); err != nil {
|
||||
t.Fatal("timed out waiting for recap")
|
||||
}
|
||||
|
||||
thread, _ := cfg.storage.GetThread(ctx, "1712927439.728409")
|
||||
if thread.Recap == "" {
|
||||
t.Error(thread.Recap)
|
||||
}
|
||||
t.Log(thread.Recap)
|
||||
})
|
||||
}
|
||||
41
message.go
41
message.go
@@ -1,41 +0,0 @@
|
||||
package main
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
type Message struct {
|
||||
ID string
|
||||
TS uint64
|
||||
Source string
|
||||
Channel string
|
||||
Thread string
|
||||
EventName string
|
||||
Event string
|
||||
Plaintext string
|
||||
Asset string
|
||||
}
|
||||
|
||||
func (m Message) Empty() bool {
|
||||
return m == (Message{})
|
||||
}
|
||||
|
||||
func (m Message) Serialize() []byte {
|
||||
b, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func MustDeserialize(b []byte) Message {
|
||||
m, err := Deserialize(b)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func Deserialize(b []byte) (Message, error) {
|
||||
var m Message
|
||||
err := json.Unmarshal(b, &m)
|
||||
return m, err
|
||||
}
|
||||
37
model/event.go
Normal file
37
model/event.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package model
|
||||
|
||||
import "time"
|
||||
|
||||
type Event struct {
|
||||
Updated uint64
|
||||
ID string
|
||||
URL string
|
||||
TS uint64
|
||||
Name string
|
||||
Asset string
|
||||
Datacenter string
|
||||
Team string
|
||||
Resolved bool
|
||||
}
|
||||
|
||||
func NewEvent(ID, URL string, TS uint64, Name, Asset, Datacenter, Team string, Resolved bool) Event {
|
||||
return Event{
|
||||
Updated: updated(),
|
||||
ID: ID,
|
||||
URL: URL,
|
||||
TS: TS,
|
||||
Name: Name,
|
||||
Asset: Asset,
|
||||
Datacenter: Datacenter,
|
||||
Team: Team,
|
||||
Resolved: Resolved,
|
||||
}
|
||||
}
|
||||
|
||||
func (e Event) Empty() bool {
|
||||
return e == (Event{})
|
||||
}
|
||||
|
||||
func updated() uint64 {
|
||||
return uint64(time.Now().UnixNano() / int64(time.Millisecond))
|
||||
}
|
||||
26
model/message.go
Normal file
26
model/message.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package model
|
||||
|
||||
// THREAD ||--|{ MESSAGE: "populated by"
|
||||
type Message struct {
|
||||
Updated uint64
|
||||
ID string
|
||||
TS uint64
|
||||
Author string
|
||||
Plaintext string
|
||||
ThreadID string
|
||||
}
|
||||
|
||||
func NewMessage(ID string, TS uint64, Author, Plaintext string, ThreadID string) Message {
|
||||
return Message{
|
||||
Updated: updated(),
|
||||
ID: ID,
|
||||
TS: TS,
|
||||
Author: Author,
|
||||
Plaintext: Plaintext,
|
||||
ThreadID: ThreadID,
|
||||
}
|
||||
}
|
||||
|
||||
func (m Message) Empty() bool {
|
||||
return m == (Message{})
|
||||
}
|
||||
30
model/model.go
Normal file
30
model/model.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package model
|
||||
|
||||
var _ = `
|
||||
erDiagram
|
||||
%% thread event eventName
|
||||
EVENT ||--|{ THREAD: "spawns"
|
||||
THREAD ||--|{ MESSAGE: "populated by"
|
||||
|
||||
MESSAGE {
|
||||
|
||||
ID str
|
||||
URL str
|
||||
TS number
|
||||
Plaintext str
|
||||
Author str
|
||||
}
|
||||
THREAD {
|
||||
ID str
|
||||
URL str
|
||||
Channel str
|
||||
}
|
||||
EVENT {
|
||||
ID str
|
||||
Name str
|
||||
Asset str
|
||||
Resolved bool
|
||||
Datacenter str
|
||||
Team str
|
||||
}
|
||||
`
|
||||
27
model/thread.go
Normal file
27
model/thread.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package model
|
||||
|
||||
// EVENT ||--|{ THREAD: "spawns"
|
||||
type Thread struct {
|
||||
Updated uint64
|
||||
ID string
|
||||
URL string
|
||||
TS uint64
|
||||
Channel string
|
||||
EventID string
|
||||
Recap string
|
||||
}
|
||||
|
||||
func NewThread(ID, URL string, TS uint64, Channel string, EventID string) Thread {
|
||||
return Thread{
|
||||
Updated: updated(),
|
||||
ID: ID,
|
||||
URL: URL,
|
||||
TS: TS,
|
||||
Channel: Channel,
|
||||
EventID: EventID,
|
||||
}
|
||||
}
|
||||
|
||||
func (t Thread) Empty() bool {
|
||||
return t == (Thread{})
|
||||
}
|
||||
67
persistence.go
Normal file
67
persistence.go
Normal file
@@ -0,0 +1,67 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
)
|
||||
|
||||
type ModelToPersistence struct {
|
||||
pipeline Pipeline
|
||||
}
|
||||
|
||||
type ModelIDs struct {
|
||||
Event string
|
||||
Message string
|
||||
Thread string
|
||||
}
|
||||
|
||||
func NewModelToPersistencePipeline(ctx context.Context, cfg Config) (Pipeline, error) {
|
||||
reader, err := NewQueue(ctx, "new_models", cfg.driver)
|
||||
if err != nil {
|
||||
return Pipeline{}, err
|
||||
}
|
||||
writer, err := NewQueue(ctx, "new_persistence", cfg.driver)
|
||||
if err != nil {
|
||||
return Pipeline{}, err
|
||||
}
|
||||
return Pipeline{
|
||||
writer: writer,
|
||||
reader: reader,
|
||||
process: newModelToPersistenceProcess(cfg, cfg.storage),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newModelToPersistenceProcess(cfg Config, storage Storage) processFunc {
|
||||
return func(ctx context.Context, models []byte) ([]byte, error) {
|
||||
var m Models
|
||||
if err := json.Unmarshal(models, &m); err != nil {
|
||||
return nil, fmt.Errorf("received non models payload: %w", err)
|
||||
}
|
||||
|
||||
if m.Event.Empty() {
|
||||
} else if err := storage.UpsertEvent(ctx, m.Event); err != nil {
|
||||
return nil, fmt.Errorf("failed to persist event: %w", err)
|
||||
}
|
||||
|
||||
if m.Thread.Empty() {
|
||||
} else if err := storage.UpsertThread(ctx, m.Thread); err != nil {
|
||||
return nil, fmt.Errorf("failed to persist thread: %w", err)
|
||||
}
|
||||
|
||||
if m.Message.Empty() {
|
||||
} else if err := storage.UpsertMessage(ctx, m.Message); err != nil {
|
||||
return nil, fmt.Errorf("failed to persist message: %w", err)
|
||||
}
|
||||
|
||||
if cfg.Debug {
|
||||
log.Printf("persisted models")
|
||||
}
|
||||
return json.Marshal(ModelIDs{
|
||||
Event: m.Event.ID,
|
||||
Thread: m.Thread.ID,
|
||||
Message: m.Message.ID,
|
||||
})
|
||||
}
|
||||
}
|
||||
63
persistence_test.go
Normal file
63
persistence_test.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/breel-render/spoc-bot-vr/model"
|
||||
)
|
||||
|
||||
func TestModelToPersistenceProcessor(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx, can := context.WithTimeout(context.Background(), time.Second*10)
|
||||
defer can()
|
||||
|
||||
d := NewTestDriver(t)
|
||||
s, _ := NewStorage(ctx, d)
|
||||
process := newModelToPersistenceProcess(Config{}, s)
|
||||
|
||||
_, _ = ctx, process
|
||||
|
||||
inputModels := Models{
|
||||
Event: model.Event{ID: "event", Asset: "event-asset"},
|
||||
//Thread: {ID: "thread", Channel: "thread-channel"},
|
||||
Message: model.Message{ID: "message", Plaintext: "message-plaintext"},
|
||||
}
|
||||
input, _ := json.Marshal(inputModels)
|
||||
|
||||
var outputModelIDs ModelIDs
|
||||
var n int
|
||||
if output, err := process(ctx, input); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := json.Unmarshal(output, &outputModelIDs); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if outputModelIDs != (ModelIDs{Event: "event", Message: "message"}) {
|
||||
t.Error(outputModelIDs)
|
||||
}
|
||||
|
||||
if row := d.QueryRowContext(ctx, `SELECT COUNT(*) FROM events`); row.Err() != nil {
|
||||
t.Error("cant count events:", row.Err())
|
||||
} else if err := row.Scan(&n); err != nil {
|
||||
t.Error("cant count events:", err)
|
||||
} else if n != 1 {
|
||||
t.Error("bad event count:", n)
|
||||
}
|
||||
|
||||
if row := d.QueryRowContext(ctx, `SELECT COUNT(*) FROM threads`); row.Err() != nil {
|
||||
t.Error("cant count threads:", row.Err())
|
||||
} else if err := row.Scan(&n); err != nil {
|
||||
t.Error("cant count threads:", err)
|
||||
} else if n != 0 {
|
||||
t.Error("bad thread count:", n)
|
||||
}
|
||||
|
||||
if row := d.QueryRowContext(ctx, `SELECT COUNT(*) FROM messages`); row.Err() != nil {
|
||||
t.Error("cant count messages:", row.Err())
|
||||
} else if err := row.Scan(&n); err != nil {
|
||||
t.Error("cant count messages:", err)
|
||||
} else if n != 1 {
|
||||
t.Error("bad message count:", n)
|
||||
}
|
||||
}
|
||||
55
pipeline.go
Normal file
55
pipeline.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
)
|
||||
|
||||
type (
|
||||
Pipeline struct {
|
||||
writer Queue
|
||||
reader Queue
|
||||
process processFunc
|
||||
}
|
||||
processFunc func(context.Context, []byte) ([]byte, error)
|
||||
)
|
||||
|
||||
func NewPipeline(writer, reader Queue, process processFunc) Pipeline {
|
||||
return Pipeline{
|
||||
writer: writer,
|
||||
reader: reader,
|
||||
process: process,
|
||||
}
|
||||
}
|
||||
|
||||
func (p Pipeline) Process(ctx context.Context) error {
|
||||
ctx, can := context.WithCancel(ctx)
|
||||
defer can()
|
||||
err := p.processUntilErr(ctx)
|
||||
if err != nil {
|
||||
log.Printf("pipeline failed to process: %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p Pipeline) processUntilErr(ctx context.Context) error {
|
||||
for ctx.Err() == nil {
|
||||
reservation, read, err := p.reader.Syn(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
processed, err := p.process(ctx, read)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if processed == nil {
|
||||
} else if err := p.writer.Enqueue(ctx, processed); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.reader.Ack(ctx, reservation); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return ctx.Err()
|
||||
}
|
||||
92
pipeline_test.go
Normal file
92
pipeline_test.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestPipelineDoesntPushEmptyMessage(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx, can := context.WithTimeout(context.Background(), time.Second*10)
|
||||
defer can()
|
||||
|
||||
output, _ := NewQueue(ctx, "output", NewTestDriver(t))
|
||||
input, _ := NewQueue(ctx, "input", NewTestDriver(t))
|
||||
|
||||
calls := 0
|
||||
process := func(_ context.Context, v []byte) ([]byte, error) {
|
||||
calls += 1
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if err := input.Enqueue(ctx, []byte("hello")); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
ing := NewPipeline(output, input, process)
|
||||
go func() {
|
||||
defer can()
|
||||
if err := ing.Process(ctx); err != nil && ctx.Err() == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
for ctx.Err() == nil {
|
||||
if calls != 0 {
|
||||
break
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-time.After(time.Millisecond * 100):
|
||||
}
|
||||
}
|
||||
|
||||
if r, _, _ := output.syn(ctx); len(r) != 0 {
|
||||
t.Error("something was pushed to out queue even though processor didnt emit content")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPipeline(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx, can := context.WithTimeout(context.Background(), time.Second*10)
|
||||
defer can()
|
||||
|
||||
output, err := NewQueue(ctx, "output", NewTestDriver(t))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
input, err := NewQueue(ctx, "input", NewTestDriver(t))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
found := map[string]struct{}{}
|
||||
process := func(_ context.Context, v []byte) ([]byte, error) {
|
||||
found[string(v)] = struct{}{}
|
||||
return []byte("world"), nil
|
||||
}
|
||||
|
||||
if err := input.Enqueue(ctx, []byte("hello")); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
ing := NewPipeline(output, input, process)
|
||||
go func() {
|
||||
defer can()
|
||||
if err := ing.Process(ctx); err != nil && ctx.Err() == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
if r, p, err := output.Syn(ctx); err != nil {
|
||||
t.Error(err)
|
||||
} else if string(p) != "world" {
|
||||
t.Errorf("Syn() = (%q, %q, %v)", r, p, err)
|
||||
} else if err := output.Ack(ctx, r); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(found) != 1 {
|
||||
t.Error(found)
|
||||
}
|
||||
}
|
||||
141
queue.go
141
queue.go
@@ -2,61 +2,138 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-errors/errors"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type Queue struct {
|
||||
driver Driver
|
||||
topic string
|
||||
}
|
||||
|
||||
func NewTestQueue() Queue {
|
||||
return Queue{driver: NewTestDB()}
|
||||
func NewNoopQueue() Queue {
|
||||
return Queue{}
|
||||
}
|
||||
|
||||
func NewQueue(driver Driver) Queue {
|
||||
return Queue{driver: driver}
|
||||
func NewQueue(ctx context.Context, topic string, driver Driver) (Queue, error) {
|
||||
if _, err := driver.ExecContext(ctx, `
|
||||
CREATE TABLE IF NOT EXISTS queue (
|
||||
id TEXT PRIMARY KEY,
|
||||
topic TEXT NOT NULL,
|
||||
updated INTEGER NOT NULL,
|
||||
reservation TEXT,
|
||||
payload TEXT
|
||||
);
|
||||
`); err != nil {
|
||||
return Queue{}, fmt.Errorf("failed to create table: %w", err)
|
||||
}
|
||||
return Queue{topic: topic, driver: driver}, nil
|
||||
}
|
||||
|
||||
func (q Queue) Push(ctx context.Context, m Message) error {
|
||||
return q.driver.Set(ctx, "q", m.ID, m.Serialize())
|
||||
func (q Queue) Enqueue(ctx context.Context, b []byte) error {
|
||||
if q.driver.DB == nil {
|
||||
return nil
|
||||
}
|
||||
result, err := q.driver.ExecContext(ctx, `
|
||||
INSERT INTO queue (id, topic, updated, payload) VALUES ($1, $2, $3, $4)
|
||||
`,
|
||||
uuid.New().String(),
|
||||
q.topic,
|
||||
time.Now().Unix(),
|
||||
b,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n, err := result.RowsAffected(); err != nil {
|
||||
return err
|
||||
} else if n != 1 {
|
||||
return fmt.Errorf("insert into queue %s affected %v rows", b, n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q Queue) PeekFirst(ctx context.Context) (Message, error) {
|
||||
func (q Queue) Syn(ctx context.Context) (string, []byte, error) {
|
||||
if q.driver.DB == nil {
|
||||
return "", nil, nil
|
||||
}
|
||||
for {
|
||||
m, err := q.peekFirst(ctx)
|
||||
if err != nil {
|
||||
return m, err
|
||||
}
|
||||
|
||||
if !m.Empty() {
|
||||
return m, nil
|
||||
reservation, m, err := q.syn(ctx)
|
||||
if reservation != nil || err != nil {
|
||||
return string(reservation), m, err
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return Message{}, ctx.Err()
|
||||
case <-time.After(time.Second):
|
||||
return "", nil, ctx.Err()
|
||||
case <-time.After(time.Millisecond * 500):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (q Queue) Ack(ctx context.Context, id string) error {
|
||||
return q.driver.Set(ctx, "q", id, nil)
|
||||
func (q Queue) syn(ctx context.Context) ([]byte, []byte, error) {
|
||||
now := time.Now().Unix()
|
||||
reservation := []byte(uuid.New().String())
|
||||
var payload []byte
|
||||
if result, err := q.driver.ExecContext(ctx, `
|
||||
UPDATE queue
|
||||
SET
|
||||
updated = $1, reservation = $2
|
||||
WHERE
|
||||
id IN (
|
||||
SELECT id
|
||||
FROM queue
|
||||
WHERE
|
||||
topic = $3
|
||||
AND (
|
||||
reservation IS NULL
|
||||
OR $4 - updated > 600
|
||||
)
|
||||
LIMIT 1
|
||||
)
|
||||
`, now, reservation, q.topic, now); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to assign reservation: %w", err)
|
||||
} else if n, err := result.RowsAffected(); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to assign reservation: no count: %w", err)
|
||||
} else if n == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
row := q.driver.QueryRowContext(ctx, `
|
||||
SELECT payload
|
||||
FROM queue
|
||||
WHERE reservation=$1
|
||||
LIMIT 1
|
||||
`, reservation)
|
||||
if err := row.Err(); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to query reservation: %w", err)
|
||||
} else if err := row.Scan(&payload); err != nil && !strings.Contains(err.Error(), "no rows in result") {
|
||||
return nil, nil, fmt.Errorf("failed to parse reservation: %w", err)
|
||||
}
|
||||
|
||||
return reservation, payload, nil
|
||||
}
|
||||
|
||||
func (q Queue) peekFirst(ctx context.Context) (Message, error) {
|
||||
var m Message
|
||||
subctx, subcan := context.WithCancel(ctx)
|
||||
defer subcan()
|
||||
err := q.driver.ForEach(subctx, "q", func(_ string, value []byte) error {
|
||||
m = MustDeserialize(value)
|
||||
subcan()
|
||||
return nil
|
||||
})
|
||||
if errors.Is(err, subctx.Err()) {
|
||||
err = nil
|
||||
}
|
||||
return m, err
|
||||
func (q Queue) Ack(ctx context.Context, reservation string) error {
|
||||
return q.ack(ctx, []byte(reservation))
|
||||
}
|
||||
|
||||
func (q Queue) ack(ctx context.Context, reservation []byte) error {
|
||||
if q.driver.DB == nil {
|
||||
return nil
|
||||
}
|
||||
result, err := q.driver.ExecContext(ctx, `
|
||||
DELETE FROM queue
|
||||
WHERE reservation=$1
|
||||
`, reservation)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n, _ := result.RowsAffected(); n != 1 {
|
||||
return fmt.Errorf("failed to ack %s: %v rows affected", reservation, n)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
74
queue_test.go
Normal file
74
queue_test.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestQueue(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx, can := context.WithTimeout(context.Background(), time.Second*10)
|
||||
defer can()
|
||||
|
||||
q, err := NewQueue(ctx, "", NewTestDriver(t))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
qOther, _ := NewQueue(ctx, "other", q.driver)
|
||||
|
||||
if reservation, _, err := q.syn(ctx); reservation != nil {
|
||||
t.Errorf("able to syn before any enqueues created: %v", err)
|
||||
} else {
|
||||
t.Logf("sync before enqueues: %v", err)
|
||||
}
|
||||
|
||||
t.Run("enqueue", func(t *testing.T) {
|
||||
for i := 0; i < 39; i++ {
|
||||
if err := q.Enqueue(ctx, []byte(strconv.Itoa(i))); err != nil {
|
||||
t.Fatal(i, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
if err := qOther.Enqueue(ctx, []byte(strconv.Itoa(100))); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Run("syn ack", func(t *testing.T) {
|
||||
found := map[string]struct{}{}
|
||||
for i := 0; i < 39; i++ {
|
||||
if reservation, b, err := q.Syn(ctx); err != nil {
|
||||
t.Fatal(i, "syn err", err)
|
||||
} else if _, ok := found[string(b)]; ok {
|
||||
t.Errorf("syn'd %q twice (%+v)", b, found)
|
||||
} else if err := q.Ack(ctx, reservation); err != nil {
|
||||
t.Fatal(i, "failed to ack", err)
|
||||
} else {
|
||||
found[string(b)] = struct{}{}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
if reservation, _, err := q.syn(ctx); reservation != nil {
|
||||
t.Errorf("able to syn 1 more message than created: %v", err)
|
||||
} else if reservation, _, err := qOther.syn(ctx); reservation == nil {
|
||||
t.Errorf("unable to syn from other topic: %v", err)
|
||||
} else {
|
||||
t.Logf("empty q.syn = %v", err)
|
||||
}
|
||||
|
||||
t.Run("noop", func(t *testing.T) {
|
||||
q := NewNoopQueue()
|
||||
if err := q.Enqueue(nil, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if _, _, err := q.Syn(nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if err := q.Ack(nil, ""); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
87
recap.go
Normal file
87
recap.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type PersistenceToRecap struct {
|
||||
pipeline Pipeline
|
||||
}
|
||||
|
||||
func NewPersistenceToRecapPipeline(ctx context.Context, cfg Config) (Pipeline, error) {
|
||||
reader, err := NewQueue(ctx, "new_persistence", cfg.driver)
|
||||
if err != nil {
|
||||
return Pipeline{}, err
|
||||
}
|
||||
writer := NewNoopQueue()
|
||||
return Pipeline{
|
||||
writer: writer,
|
||||
reader: reader,
|
||||
process: newPersistenceToRecapProcess(cfg),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newPersistenceToRecapProcess(cfg Config) processFunc {
|
||||
return func(ctx context.Context, modelIDs []byte) ([]byte, error) {
|
||||
var m ModelIDs
|
||||
if err := json.Unmarshal(modelIDs, &m); err != nil {
|
||||
return nil, fmt.Errorf("received non model ids payload: %w", err)
|
||||
}
|
||||
|
||||
if m.Event == "" {
|
||||
} else if event, err := cfg.storage.GetEvent(ctx, m.Event); err != nil {
|
||||
return nil, err
|
||||
} else if !event.Resolved {
|
||||
} else if err := func() error {
|
||||
threads, err := cfg.storage.GetEventThreads(ctx, event.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, thread := range threads {
|
||||
messages, err := cfg.storage.GetThreadMessages(ctx, thread.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if len(messages) < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
prompt := []string{
|
||||
cfg.RecapPromptIntro,
|
||||
"---",
|
||||
messages[0].Plaintext,
|
||||
"---",
|
||||
cfg.RecapPrompt,
|
||||
"---",
|
||||
}
|
||||
for _, message := range messages[1:] {
|
||||
prompt = append(prompt, fmt.Sprintf("%s\n%s", message.Author, message.Plaintext))
|
||||
}
|
||||
|
||||
recap, err := cfg.ai.Do(ctx, strings.Join(prompt, "\n\n"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
thread.Recap = recap
|
||||
if err := cfg.storage.UpsertThread(ctx, thread); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Println("recapped", thread.ID)
|
||||
if cfg.Debug {
|
||||
log.Printf("Recapped %q as %q from %q/%q and %+v", thread.ID, thread.Recap, cfg.RecapPromptIntro, cfg.RecapPrompt, messages)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cfg.Debug {
|
||||
log.Printf("persisted recap")
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
50
recap_test.go
Normal file
50
recap_test.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/breel-render/spoc-bot-vr/model"
|
||||
)
|
||||
|
||||
func TestNewPersistenceToRecapProcess(t *testing.T) {
|
||||
ctx, can := context.WithTimeout(context.Background(), time.Second*10)
|
||||
defer can()
|
||||
|
||||
d := NewTestDriver(t)
|
||||
s, _ := NewStorage(ctx, d)
|
||||
|
||||
cfg := Config{
|
||||
driver: d,
|
||||
storage: s,
|
||||
ai: NewAINoop(),
|
||||
Debug: true,
|
||||
}
|
||||
|
||||
proc := newPersistenceToRecapProcess(cfg)
|
||||
|
||||
if err := s.UpsertEvent(ctx, model.NewEvent("Event", "", 0, "", "", "", "", true)); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := s.UpsertThread(ctx, model.NewThread("Thread", "", 0, "", "Event")); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := s.UpsertMessage(ctx, model.NewMessage("Root", 0, "bot", "an alert has fired", "Thread")); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := s.UpsertMessage(ctx, model.NewMessage("Message", 0, "me", "hello world", "Thread")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b, _ := json.Marshal(ModelIDs{Event: "Event"})
|
||||
if _, err := proc(ctx, b); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if thread, err := s.GetThread(ctx, "Thread"); err != nil {
|
||||
t.Error(err)
|
||||
} else if thread.Recap == "" {
|
||||
t.Error("no recap:", thread.Recap)
|
||||
} else {
|
||||
t.Logf("%+v", thread)
|
||||
}
|
||||
}
|
||||
278
slack.go
Normal file
278
slack.go
Normal file
@@ -0,0 +1,278 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/breel-render/spoc-bot-vr/model"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrIrrelevantMessage = errors.New("message isnt relevant to spoc bot vr")
|
||||
)
|
||||
|
||||
type SlackToModel struct {
|
||||
pipeline Pipeline
|
||||
}
|
||||
|
||||
type Models struct {
|
||||
Event model.Event
|
||||
Message model.Message
|
||||
Thread model.Thread
|
||||
}
|
||||
|
||||
func NewSlackToModelPipeline(ctx context.Context, cfg Config) (Pipeline, error) {
|
||||
reader, err := NewQueue(ctx, "slack_event", cfg.driver)
|
||||
if err != nil {
|
||||
return Pipeline{}, err
|
||||
}
|
||||
writer, err := NewQueue(ctx, "new_models", cfg.driver)
|
||||
if err != nil {
|
||||
return Pipeline{}, err
|
||||
}
|
||||
return Pipeline{
|
||||
writer: writer,
|
||||
reader: reader,
|
||||
process: newSlackToModelProcess(cfg),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newSlackToModelProcess(cfg Config) processFunc {
|
||||
return func(ctx context.Context, slack []byte) ([]byte, error) {
|
||||
s, err := parseSlack(slack)
|
||||
if cfg.Debug {
|
||||
log.Printf("%v: %s => %+v", err, slack, s)
|
||||
}
|
||||
if errors.Is(err, ErrIrrelevantMessage) {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("failed to deserialize slack %v", err)
|
||||
}
|
||||
|
||||
for pattern, ptr := range map[string]*string{
|
||||
cfg.AssetPattern: &s.Asset,
|
||||
cfg.DatacenterPattern: &s.Datacenter,
|
||||
cfg.EventNamePattern: &s.EventName,
|
||||
} {
|
||||
*ptr = withPattern(pattern, *ptr)
|
||||
}
|
||||
|
||||
event := model.Event{}
|
||||
if s.Event != "" && s.Source != "" && s.TS > 0 && s.EventName != "" {
|
||||
event = model.NewEvent(s.Event, s.Source, s.TS, s.EventName, s.Asset, s.Datacenter, s.Team, s.Resolved)
|
||||
}
|
||||
message := model.Message{}
|
||||
if s.ID != "" && s.Source != "" && s.TS > 0 && s.Thread != "" {
|
||||
message = model.NewMessage(s.ID, s.TS, s.Author, s.Plaintext, s.Thread)
|
||||
}
|
||||
thread := model.Thread{}
|
||||
if s.Thread != "" && s.Source != "" && s.TS > 0 && s.Event != "" {
|
||||
thread = model.NewThread(s.Thread, s.Source, s.TS, s.Channel, s.Event)
|
||||
}
|
||||
|
||||
if cfg.Debug {
|
||||
log.Printf("parsed slack message into models")
|
||||
}
|
||||
return json.Marshal(Models{
|
||||
Event: event,
|
||||
Message: message,
|
||||
Thread: thread,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func withPattern(pattern string, given string) string {
|
||||
r := regexp.MustCompile(pattern)
|
||||
parsed := r.FindString(given)
|
||||
for i, name := range r.SubexpNames() {
|
||||
if i > 0 && name != "" {
|
||||
parsed = r.FindStringSubmatch(given)[i]
|
||||
}
|
||||
}
|
||||
return parsed
|
||||
}
|
||||
|
||||
type (
|
||||
parsedSlackMessage struct {
|
||||
ID string
|
||||
TS uint64
|
||||
Source string
|
||||
Channel string
|
||||
Thread string
|
||||
EventName string
|
||||
Event string
|
||||
Plaintext string
|
||||
Asset string
|
||||
Resolved bool
|
||||
Datacenter string
|
||||
Author string
|
||||
Team string
|
||||
}
|
||||
|
||||
slackMessage struct {
|
||||
slackEvent
|
||||
Type string
|
||||
TS uint64 `json:"event_time"`
|
||||
Event slackEvent
|
||||
MessageTS string `json:"ts"`
|
||||
}
|
||||
|
||||
slackEvent struct {
|
||||
ID string `json:"event_ts"`
|
||||
Channel string
|
||||
// rewrites
|
||||
Nested *slackEvent `json:"message"`
|
||||
PreviousMessage *slackEvent `json:"previous_message"`
|
||||
// human
|
||||
ParentID string `json:"thread_ts"`
|
||||
Text string
|
||||
Blocks []slackBlock
|
||||
User string
|
||||
// bot
|
||||
Bot slackBot `json:"bot_profile"`
|
||||
Attachments []slackAttachment
|
||||
}
|
||||
|
||||
slackBlock struct {
|
||||
Elements []slackElement
|
||||
}
|
||||
|
||||
slackElement struct {
|
||||
Elements []slackElement
|
||||
RichText string `json:"text"`
|
||||
}
|
||||
|
||||
slackBot struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
slackAttachment struct {
|
||||
Color string
|
||||
Title string
|
||||
Text string
|
||||
Fields []slackField
|
||||
Actions []slackAction
|
||||
}
|
||||
|
||||
slackField struct {
|
||||
Value string
|
||||
Title string
|
||||
}
|
||||
|
||||
slackAction struct{}
|
||||
)
|
||||
|
||||
func parseSlack(b []byte) (parsedSlackMessage, error) {
|
||||
s, err := _parseSlack(b)
|
||||
if err != nil {
|
||||
return parsedSlackMessage{}, err
|
||||
}
|
||||
|
||||
/*
|
||||
if ch != "" {
|
||||
s.Event.Channel = ch
|
||||
}
|
||||
*/
|
||||
|
||||
if s.Event.Bot.Name != "" {
|
||||
if len(s.Event.Attachments) == 0 {
|
||||
return parsedSlackMessage{}, ErrIrrelevantMessage
|
||||
} else if !strings.Contains(s.Event.Attachments[0].Title, ": Firing: ") {
|
||||
return parsedSlackMessage{}, ErrIrrelevantMessage
|
||||
}
|
||||
var tagsField string
|
||||
var teamField string
|
||||
for _, field := range s.Event.Attachments[0].Fields {
|
||||
switch field.Title {
|
||||
case "Tags":
|
||||
tagsField = field.Value
|
||||
case "Routed Teams":
|
||||
teamField = field.Value
|
||||
}
|
||||
}
|
||||
return parsedSlackMessage{
|
||||
ID: fmt.Sprintf("%s/%v", s.Event.ID, s.TS),
|
||||
TS: s.TS,
|
||||
Source: fmt.Sprintf(`https://renderinc.slack.com/archives/%s/p%s`, s.Event.Channel, strings.ReplaceAll(s.Event.ID, ".", "")),
|
||||
Channel: s.Event.Channel,
|
||||
Thread: s.Event.ID,
|
||||
EventName: strings.Split(s.Event.Attachments[0].Title, ": Firing: ")[1],
|
||||
Event: strings.TrimPrefix(strings.Split(s.Event.Attachments[0].Title, ":")[0], "#"),
|
||||
Plaintext: s.Event.Attachments[0].Text,
|
||||
Asset: s.Event.Attachments[0].Text,
|
||||
Resolved: !strings.HasPrefix(s.Event.Attachments[0].Color, "F"),
|
||||
Datacenter: tagsField,
|
||||
Author: s.Event.Bot.Name,
|
||||
Team: teamField,
|
||||
}, nil
|
||||
}
|
||||
|
||||
if s.Event.ParentID == "" {
|
||||
return parsedSlackMessage{}, ErrIrrelevantMessage
|
||||
}
|
||||
return parsedSlackMessage{
|
||||
ID: fmt.Sprintf("%s/%v", s.Event.ParentID, s.TS),
|
||||
TS: s.TS,
|
||||
Source: fmt.Sprintf(`https://renderinc.slack.com/archives/%s/p%s`, s.Event.Channel, strings.ReplaceAll(s.Event.ParentID, ".", "")),
|
||||
Channel: s.Event.Channel,
|
||||
Thread: s.Event.ParentID,
|
||||
EventName: "",
|
||||
Event: "",
|
||||
Plaintext: s.Event.Text,
|
||||
Asset: "",
|
||||
Datacenter: "",
|
||||
Author: s.Event.User,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func _parseSlack(b []byte) (slackMessage, error) {
|
||||
var wrapper ChannelWrapper
|
||||
if err := json.Unmarshal(b, &wrapper); err == nil && len(wrapper.V) > 0 {
|
||||
b = wrapper.V
|
||||
}
|
||||
|
||||
var result slackMessage
|
||||
err := json.Unmarshal(b, &result)
|
||||
switch result.Type {
|
||||
case "message":
|
||||
result.Event = result.slackEvent
|
||||
result.TS, _ = strconv.ParseUint(strings.Split(result.MessageTS, ".")[0], 10, 64)
|
||||
result.Event.ID = result.MessageTS
|
||||
}
|
||||
if result.Event.Nested != nil && !result.Event.Nested.Empty() {
|
||||
result.Event.Blocks = result.Event.Nested.Blocks
|
||||
result.Event.Bot = result.Event.Nested.Bot
|
||||
result.Event.Attachments = result.Event.Nested.Attachments
|
||||
result.Event.Nested = nil
|
||||
}
|
||||
if result.Event.PreviousMessage != nil {
|
||||
if result.Event.PreviousMessage.ID != "" {
|
||||
result.Event.ID = result.Event.PreviousMessage.ID
|
||||
}
|
||||
result.Event.PreviousMessage = nil
|
||||
}
|
||||
if wrapper.Channel != "" {
|
||||
result.Event.Channel = wrapper.Channel
|
||||
}
|
||||
return result, err
|
||||
}
|
||||
|
||||
func (this slackEvent) Empty() bool {
|
||||
return fmt.Sprintf("%+v", this) == fmt.Sprintf("%+v", slackEvent{})
|
||||
}
|
||||
|
||||
func (this parsedSlackMessage) Time() time.Time {
|
||||
return time.Unix(int64(this.TS), 0)
|
||||
}
|
||||
|
||||
type ChannelWrapper struct {
|
||||
Channel string
|
||||
V json.RawMessage
|
||||
}
|
||||
303
slack_test.go
Normal file
303
slack_test.go
Normal file
@@ -0,0 +1,303 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/breel-render/spoc-bot-vr/model"
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
func TestSlackToModelPipeline(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx, can := context.WithTimeout(context.Background(), time.Second*5)
|
||||
defer can()
|
||||
|
||||
pipeline, err := NewSlackToModelPipeline(ctx, Config{
|
||||
driver: NewTestDriver(t),
|
||||
AssetPattern: renderAssetPattern,
|
||||
DatacenterPattern: renderDatacenterPattern,
|
||||
EventNamePattern: renderEventNamePattern,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
go func() {
|
||||
if err := pipeline.Process(ctx); err != nil && ctx.Err() == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
want := Models{
|
||||
Event: model.NewEvent(
|
||||
"11071",
|
||||
"https://renderinc.slack.com/archives/C06U1DDBBU4/p1712927439728409",
|
||||
1712927439,
|
||||
"Alertconfig Workflow Failed",
|
||||
"",
|
||||
"",
|
||||
"Datastores Non-Critical",
|
||||
true,
|
||||
),
|
||||
Message: model.NewMessage(
|
||||
"1712927439.728409/1712927439",
|
||||
1712927439,
|
||||
"Opsgenie for Alert Management",
|
||||
"At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
||||
"1712927439.728409",
|
||||
),
|
||||
Thread: model.NewThread(
|
||||
"1712927439.728409",
|
||||
"https://renderinc.slack.com/archives/C06U1DDBBU4/p1712927439728409",
|
||||
1712927439,
|
||||
"C06U1DDBBU4",
|
||||
"11071",
|
||||
),
|
||||
/*
|
||||
ID: "1712927439.728409/1712927439",
|
||||
TS: 1712927439,
|
||||
Source: "https://renderinc.slack.com/archives/C06U1DDBBU4/p1712927439728409",
|
||||
Channel: "C06U1DDBBU4",
|
||||
Thread: "1712927439.728409",
|
||||
EventName: "",
|
||||
Event: "11071",
|
||||
Plaintext: "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
||||
Asset: "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
||||
Datacenter: "alertname:Alertconfig Workflow Failed, grafana_folder:Datastores, rule_uid:a7639f7e-6950-41be-850a-b22119f74cbb",
|
||||
*/
|
||||
}
|
||||
|
||||
b, _ := os.ReadFile("testdata/slack_events/opsgenie_alert.json")
|
||||
if err := pipeline.reader.Enqueue(ctx, b); err != nil {
|
||||
t.Fatal("failed to enqueue", err)
|
||||
}
|
||||
var got Models
|
||||
if _, b2, err := pipeline.writer.Syn(ctx); err != nil {
|
||||
t.Fatal("failed to syn", err)
|
||||
} else if err := json.Unmarshal(b2, &got); err != nil {
|
||||
t.Fatal("failed to parse outqueue:", err)
|
||||
} else {
|
||||
want.Event.Updated = 0
|
||||
want.Message.Updated = 0
|
||||
want.Thread.Updated = 0
|
||||
got.Event.Updated = 0
|
||||
got.Message.Updated = 0
|
||||
got.Thread.Updated = 0
|
||||
assert.DeepEqual(t, want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseSlackTestdata(t *testing.T) {
|
||||
t.Parallel()
|
||||
cases := map[string]struct {
|
||||
slackMessage slackMessage
|
||||
message parsedSlackMessage
|
||||
}{
|
||||
"human_thread_message_from_opsgenie_alert.json": {
|
||||
slackMessage: slackMessage{
|
||||
TS: 1712930706,
|
||||
Event: slackEvent{
|
||||
ID: "1712930706.598629",
|
||||
Channel: "C06U1DDBBU4",
|
||||
ParentID: "1712927439.728409",
|
||||
Text: "I gotta do this",
|
||||
Blocks: []slackBlock{{
|
||||
Elements: []slackElement{{
|
||||
Elements: []slackElement{{
|
||||
RichText: "I gotta do this",
|
||||
}},
|
||||
}},
|
||||
}},
|
||||
Bot: slackBot{
|
||||
Name: "",
|
||||
},
|
||||
Attachments: []slackAttachment{},
|
||||
},
|
||||
},
|
||||
message: parsedSlackMessage{
|
||||
ID: "1712927439.728409/1712930706",
|
||||
TS: 1712930706,
|
||||
Source: "https://renderinc.slack.com/archives/C06U1DDBBU4/p1712927439728409",
|
||||
Channel: "C06U1DDBBU4",
|
||||
Thread: "1712927439.728409",
|
||||
EventName: "",
|
||||
Event: "",
|
||||
Plaintext: "I gotta do this",
|
||||
Asset: "",
|
||||
Author: "U06868T6ADV",
|
||||
},
|
||||
},
|
||||
"opsgenie_alert.json": {
|
||||
slackMessage: slackMessage{
|
||||
TS: 1712927439,
|
||||
Event: slackEvent{
|
||||
ID: "1712927439.728409",
|
||||
Channel: "C06U1DDBBU4",
|
||||
Bot: slackBot{
|
||||
Name: "Opsgenie for Alert Management",
|
||||
},
|
||||
Attachments: []slackAttachment{{
|
||||
Color: "2ecc71",
|
||||
Title: "#11071: [Grafana]: Firing: Alertconfig Workflow Failed",
|
||||
Text: "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
||||
Fields: []slackField{
|
||||
{Value: "P3", Title: "Priority"},
|
||||
{Value: "alertname:Alertconfig Workflow Failed, grafana_folder:Datastores, rule_uid:a7639f7e-6950-41be-850a-b22119f74cbb", Title: "Tags"},
|
||||
{Value: "Datastores Non-Critical", Title: "Routed Teams"},
|
||||
},
|
||||
Actions: []slackAction{{}, {}, {}},
|
||||
}},
|
||||
},
|
||||
},
|
||||
message: parsedSlackMessage{
|
||||
ID: "1712927439.728409/1712927439",
|
||||
TS: 1712927439,
|
||||
Source: "https://renderinc.slack.com/archives/C06U1DDBBU4/p1712927439728409",
|
||||
Channel: "C06U1DDBBU4",
|
||||
Thread: "1712927439.728409",
|
||||
EventName: "Alertconfig Workflow Failed",
|
||||
Event: "11071",
|
||||
Plaintext: "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
||||
Asset: "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
||||
Datacenter: "alertname:Alertconfig Workflow Failed, grafana_folder:Datastores, rule_uid:a7639f7e-6950-41be-850a-b22119f74cbb",
|
||||
Author: "Opsgenie for Alert Management",
|
||||
Team: "Datastores Non-Critical",
|
||||
Resolved: true,
|
||||
},
|
||||
},
|
||||
"opsgenie_alert_resolved.json": {
|
||||
slackMessage: slackMessage{
|
||||
TS: 1712916339,
|
||||
Event: slackEvent{
|
||||
ID: "1712916339.000300",
|
||||
Channel: "C06U1DDBBU4",
|
||||
Bot: slackBot{
|
||||
Name: "Opsgenie for Alert Management",
|
||||
},
|
||||
Attachments: []slackAttachment{{
|
||||
Color: "2ecc71",
|
||||
Title: "#11069: [Grafana]: Firing: Alertconfig Workflow Failed",
|
||||
Text: "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
||||
Fields: []slackField{
|
||||
{Value: "P3", Title: "Priority"},
|
||||
{Value: "alertname:Alertconfig Workflow Failed, grafana_folder:Datastores, rule_uid:a7639f7e-6950-41be-850a-b22119f74cbb", Title: "Tags"},
|
||||
{Value: "Datastores Non-Critical", Title: "Routed Teams"},
|
||||
},
|
||||
Actions: []slackAction{},
|
||||
}},
|
||||
},
|
||||
},
|
||||
message: parsedSlackMessage{
|
||||
ID: "1712916339.000300/1712916339",
|
||||
TS: 1712916339,
|
||||
Source: "https://renderinc.slack.com/archives/C06U1DDBBU4/p1712916339000300",
|
||||
Channel: "C06U1DDBBU4",
|
||||
Thread: "1712916339.000300",
|
||||
EventName: "Alertconfig Workflow Failed",
|
||||
Event: "11069",
|
||||
Plaintext: "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
||||
Asset: "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
||||
Resolved: true,
|
||||
Datacenter: "alertname:Alertconfig Workflow Failed, grafana_folder:Datastores, rule_uid:a7639f7e-6950-41be-850a-b22119f74cbb",
|
||||
Author: "Opsgenie for Alert Management",
|
||||
Team: "Datastores Non-Critical",
|
||||
},
|
||||
},
|
||||
"reingested_alert.json": {
|
||||
message: parsedSlackMessage{
|
||||
ID: "1712892637.037639/1712892637",
|
||||
TS: 1712892637,
|
||||
Source: "https://renderinc.slack.com/archives//p1712892637037639",
|
||||
//Channel: "C06U1DDBBU4",
|
||||
Thread: "1712892637.037639",
|
||||
EventName: "Alertconfig Workflow Failed",
|
||||
Event: "11061",
|
||||
Plaintext: "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
||||
Asset: "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
||||
Resolved: true,
|
||||
Datacenter: "alertname:Alertconfig Workflow Failed, grafana_folder:Datastores, rule_uid:a7639f7e-6950-41be-850a-b22119f74cbb",
|
||||
Author: "Opsgenie for Alert Management",
|
||||
Team: "Datastores Non-Critical",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, d := range cases {
|
||||
want := d
|
||||
t.Run(name, func(t *testing.T) {
|
||||
b, err := os.ReadFile(path.Join("testdata", "slack_events", name))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Run("parseSlack", func(t *testing.T) {
|
||||
got, err := parseSlack(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got != want.message {
|
||||
assert.DeepEqual(t, want.message, got)
|
||||
t.Errorf("wanted \n\t%+v, got\n\t%+v", want.message, got)
|
||||
}
|
||||
if time := got.Time(); time.Unix() != int64(got.TS) {
|
||||
t.Error("not unix time", got.TS, time)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWrappedSlack(t *testing.T) {
|
||||
b, _ := os.ReadFile("testdata/slack_events/human_thread_message_from_opsgenie_alert.json")
|
||||
b2, _ := json.Marshal(ChannelWrapper{Channel: "X", V: json.RawMessage(b)})
|
||||
|
||||
if got, err := _parseSlack(b); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if got2, err := _parseSlack(b2); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if got2.Event.Channel != "X" {
|
||||
t.Error(got2.Event.Channel)
|
||||
} else if got2.Event.ParentID == "" {
|
||||
t.Error(got2.Event)
|
||||
} else if got.Event.ParentID != got2.Event.ParentID {
|
||||
t.Error(got, got2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWithPattern(t *testing.T) {
|
||||
cases := map[string]struct {
|
||||
given string
|
||||
pattern string
|
||||
want string
|
||||
}{
|
||||
"pods unavailable on node": {
|
||||
given: `pods are unavailable on node ip-12-345-67-890.xx-yyyyy-1.compute.internal.`,
|
||||
pattern: renderAssetPattern,
|
||||
want: `ip-12-345-67-890.xx-yyyyy-1.compute.internal`,
|
||||
},
|
||||
"redis err": {
|
||||
given: `Redis instance red-abc123 is emitting Some error repeatedly`,
|
||||
pattern: renderAssetPattern,
|
||||
want: `red-abc123`,
|
||||
},
|
||||
"pg err": {
|
||||
given: `db dpg-xyz123 is in a pinch`,
|
||||
pattern: renderAssetPattern,
|
||||
want: `dpg-xyz123`,
|
||||
},
|
||||
}
|
||||
|
||||
for name, d := range cases {
|
||||
c := d
|
||||
t.Run(name, func(t *testing.T) {
|
||||
got := withPattern(c.pattern, c.given)
|
||||
if got != c.want {
|
||||
t.Errorf("withPattern(%q, %q) expected %q but got %q", c.pattern, c.given, c.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
149
slackscrape.go
Normal file
149
slackscrape.go
Normal file
@@ -0,0 +1,149 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
type SlackScrape struct {
|
||||
Latest int64
|
||||
Oldest int64
|
||||
ThreadTS string
|
||||
Channel string
|
||||
Token string
|
||||
}
|
||||
|
||||
func NewSlackScrapePipeline(ctx context.Context, cfg Config) (Pipeline, error) {
|
||||
writer, err := NewQueue(ctx, "new_persistence", cfg.driver)
|
||||
if err != nil {
|
||||
return Pipeline{}, err
|
||||
}
|
||||
cfg.slackScrapePipeline.reader, err = NewQueue(ctx, "slack_channels_to_scrape", cfg.driver)
|
||||
if err != nil {
|
||||
return Pipeline{}, err
|
||||
}
|
||||
return Pipeline{
|
||||
writer: writer,
|
||||
reader: cfg.slackScrapePipeline.reader,
|
||||
process: newSlackScrapeProcess(cfg),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newSlackScrapeProcess(cfg Config) processFunc {
|
||||
limiter := rate.NewLimiter(0.5, 1)
|
||||
return func(ctx context.Context, jobb []byte) ([]byte, error) {
|
||||
if err := limiter.Wait(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var job SlackScrape
|
||||
if err := json.Unmarshal(jobb, &job); err != nil {
|
||||
return nil, fmt.Errorf("received non SlackScrape payload: %w", err)
|
||||
}
|
||||
|
||||
u := url.URL{
|
||||
Scheme: "https",
|
||||
Host: "slack.com",
|
||||
Path: "/api/conversations.history",
|
||||
}
|
||||
q := url.Values{}
|
||||
q.Set("channel", job.Channel)
|
||||
q.Set("latest", strconv.FormatInt(job.Latest, 10))
|
||||
q.Set("limit", "999")
|
||||
q.Set("inclusive", "true")
|
||||
if job.ThreadTS != "" {
|
||||
u.Path = "/api/conversations.replies"
|
||||
q.Set("ts", job.ThreadTS)
|
||||
}
|
||||
if job.Oldest != 0 {
|
||||
q.Set("oldest", strconv.FormatInt(job.Oldest, 10))
|
||||
}
|
||||
u.RawQuery = q.Encode()
|
||||
url := u.String()
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+job.Token)
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
httpc := http.Client{Timeout: time.Second}
|
||||
resp, err := httpc.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
defer io.Copy(io.Discard, resp.Body)
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
b, _ := io.ReadAll(resp.Body)
|
||||
return nil, fmt.Errorf("(%d) %s", resp.StatusCode, b)
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var page struct {
|
||||
Messages []json.RawMessage
|
||||
}
|
||||
if err := json.Unmarshal(body, &page); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newLatest := float64(job.Latest)
|
||||
for _, messageJSON := range page.Messages {
|
||||
if cfg.Debug {
|
||||
log.Printf("slackScrapePipeline %s => %s", url, messageJSON)
|
||||
}
|
||||
b, _ := json.Marshal(ChannelWrapper{Channel: job.Channel, V: messageJSON})
|
||||
if err := cfg.slackToModelPipeline.reader.Enqueue(ctx, b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var peekTS struct {
|
||||
TS float64 `json:"ts,string"`
|
||||
}
|
||||
if err := json.Unmarshal(messageJSON, &peekTS); err == nil && peekTS.TS > 0 && peekTS.TS < newLatest {
|
||||
newLatest = peekTS.TS
|
||||
}
|
||||
if job.ThreadTS == "" {
|
||||
var peek struct {
|
||||
ThreadTS string `json:"thread_ts"`
|
||||
}
|
||||
json.Unmarshal(messageJSON, &peek)
|
||||
if peek.ThreadTS != "" {
|
||||
clone := job
|
||||
clone.ThreadTS = peek.ThreadTS
|
||||
clone.Oldest = 0
|
||||
b, _ := json.Marshal(clone)
|
||||
if err := cfg.slackScrapePipeline.reader.Enqueue(ctx, b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Printf("fanout thread scrape for %s/%s", job.Channel, peek.ThreadTS)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(page.Messages) == 999 {
|
||||
clone := job
|
||||
clone.Latest = int64(newLatest)
|
||||
b, _ := json.Marshal(clone)
|
||||
if err := cfg.slackScrapePipeline.reader.Enqueue(ctx, b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Printf("fanout page scrape for %s up to %v", job.Channel, clone.Latest)
|
||||
}
|
||||
|
||||
log.Printf("scraped %v from %s", len(page.Messages), url)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
252
storage.go
252
storage.go
@@ -2,36 +2,256 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
)
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
var (
|
||||
ErrNotFound = errors.New("not found")
|
||||
"github.com/breel-render/spoc-bot-vr/model"
|
||||
)
|
||||
|
||||
type Storage struct {
|
||||
driver Driver
|
||||
}
|
||||
|
||||
func NewTestStorage() Storage {
|
||||
return Storage{driver: NewTestDB()}
|
||||
func NewStorage(ctx context.Context, driver Driver) (Storage, error) {
|
||||
if _, err := driver.ExecContext(ctx, `
|
||||
CREATE TABLE IF NOT EXISTS events (ID TEXT UNIQUE);
|
||||
CREATE TABLE IF NOT EXISTS messages (ID TEXT UNIQUE);
|
||||
CREATE TABLE IF NOT EXISTS threads (ID TEXT UNIQUE);
|
||||
`); err != nil {
|
||||
return Storage{}, err
|
||||
}
|
||||
|
||||
for table, v := range map[string]any{
|
||||
"events": model.Event{},
|
||||
"messages": model.Message{},
|
||||
"threads": model.Thread{},
|
||||
} {
|
||||
b, _ := json.Marshal(v)
|
||||
var m map[string]struct{}
|
||||
json.Unmarshal(b, &m)
|
||||
for k := range m {
|
||||
if k == `ID` {
|
||||
continue
|
||||
}
|
||||
driver.ExecContext(ctx, fmt.Sprintf(`ALTER TABLE %s ADD COLUMN %s TEXT`, table, k))
|
||||
}
|
||||
}
|
||||
|
||||
return Storage{driver: driver}, nil
|
||||
}
|
||||
|
||||
func NewStorage(driver Driver) Storage {
|
||||
return Storage{driver: driver}
|
||||
func (s Storage) GetEvent(ctx context.Context, ID string) (model.Event, error) {
|
||||
v := model.Event{}
|
||||
err := s.selectOne(ctx, "events", &v, "ID = $1", ID)
|
||||
return v, err
|
||||
}
|
||||
|
||||
func (s Storage) Upsert(ctx context.Context, m Message) error {
|
||||
return s.driver.Set(ctx, "storage", m.ID, m.Serialize())
|
||||
func (s Storage) UpsertEvent(ctx context.Context, event model.Event) error {
|
||||
return s.upsert(ctx, "events", event)
|
||||
}
|
||||
|
||||
func (s Storage) Get(ctx context.Context, id string) (Message, error) {
|
||||
b, err := s.driver.Get(ctx, "storage", id)
|
||||
func (s Storage) GetMessage(ctx context.Context, ID string) (model.Message, error) {
|
||||
v := model.Message{}
|
||||
err := s.selectOne(ctx, "messages", &v, "ID = $1", ID)
|
||||
return v, err
|
||||
}
|
||||
|
||||
func (s Storage) UpsertMessage(ctx context.Context, message model.Message) error {
|
||||
return s.upsert(ctx, "messages", message)
|
||||
}
|
||||
|
||||
func (s Storage) GetThread(ctx context.Context, ID string) (model.Thread, error) {
|
||||
v := model.Thread{}
|
||||
err := s.selectOne(ctx, "threads", &v, "ID = $1", ID)
|
||||
return v, err
|
||||
}
|
||||
|
||||
func (s Storage) GetEventThreads(ctx context.Context, ID string) ([]model.Thread, error) {
|
||||
return s.selectThreadsWhere(ctx, "EventID = $1", ID)
|
||||
}
|
||||
|
||||
func (s Storage) GetThreadMessages(ctx context.Context, ID string) ([]model.Message, error) {
|
||||
return s.selectMessagesWhere(ctx, "ThreadID = $1", ID)
|
||||
}
|
||||
|
||||
func (s Storage) UpsertThread(ctx context.Context, thread model.Thread) error {
|
||||
return s.upsert(ctx, "threads", thread)
|
||||
}
|
||||
|
||||
func (s Storage) selectThreadsWhere(ctx context.Context, clause string, args ...any) ([]model.Thread, error) {
|
||||
keys, _, _, _, err := keysArgsKeyargsValues(model.Thread{})
|
||||
if err != nil {
|
||||
return Message{}, err
|
||||
return nil, err
|
||||
}
|
||||
if b == nil {
|
||||
return Message{}, ErrNotFound
|
||||
args2 := make([]any, len(args))
|
||||
for i := range args {
|
||||
args2[i], _ = json.Marshal(args[i])
|
||||
}
|
||||
return MustDeserialize(b), nil
|
||||
scanTargets := make([]any, len(keys))
|
||||
|
||||
q := fmt.Sprintf(`
|
||||
SELECT %s FROM threads WHERE %s
|
||||
ORDER BY TS ASC
|
||||
`, strings.Join(keys, ", "), clause)
|
||||
rows, err := s.driver.QueryContext(ctx, q, args2...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var result []model.Thread
|
||||
for rows.Next() {
|
||||
for i := range scanTargets {
|
||||
scanTargets[i] = &[]byte{}
|
||||
}
|
||||
|
||||
if err := rows.Scan(scanTargets...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := map[string]json.RawMessage{}
|
||||
for i, k := range keys {
|
||||
m[k] = *scanTargets[i].(*[]byte)
|
||||
}
|
||||
b, _ := json.Marshal(m)
|
||||
|
||||
var one model.Thread
|
||||
if err := json.Unmarshal(b, &one); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result = append(result, one)
|
||||
}
|
||||
|
||||
return result, rows.Err()
|
||||
}
|
||||
|
||||
func (s Storage) selectMessagesWhere(ctx context.Context, clause string, args ...any) ([]model.Message, error) {
|
||||
keys, _, _, _, err := keysArgsKeyargsValues(model.Message{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
args2 := make([]any, len(args))
|
||||
for i := range args {
|
||||
args2[i], _ = json.Marshal(args[i])
|
||||
}
|
||||
scanTargets := make([]any, len(keys))
|
||||
|
||||
q := fmt.Sprintf(`
|
||||
SELECT %s FROM messages WHERE %s
|
||||
ORDER BY TS ASC
|
||||
`, strings.Join(keys, ", "), clause)
|
||||
rows, err := s.driver.QueryContext(ctx, q, args2...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var result []model.Message
|
||||
for rows.Next() {
|
||||
for i := range scanTargets {
|
||||
scanTargets[i] = &[]byte{}
|
||||
}
|
||||
|
||||
if err := rows.Scan(scanTargets...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := map[string]json.RawMessage{}
|
||||
for i, k := range keys {
|
||||
m[k] = *scanTargets[i].(*[]byte)
|
||||
}
|
||||
b, _ := json.Marshal(m)
|
||||
|
||||
var one model.Message
|
||||
if err := json.Unmarshal(b, &one); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result = append(result, one)
|
||||
}
|
||||
|
||||
return result, rows.Err()
|
||||
}
|
||||
|
||||
func (s Storage) selectOne(ctx context.Context, table string, v any, clause string, args ...any) error {
|
||||
if questions := strings.Count(clause, "$"); questions != len(args) {
|
||||
return fmt.Errorf("expected %v args for clause but found %v", questions, len(args))
|
||||
}
|
||||
|
||||
keys, _, _, _, err := keysArgsKeyargsValues(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := range args {
|
||||
args[i], _ = json.Marshal(args[i])
|
||||
}
|
||||
|
||||
q := fmt.Sprintf(`
|
||||
SELECT %s FROM %s WHERE %s
|
||||
`, strings.Join(keys, ", "), table, clause)
|
||||
row := s.driver.QueryRowContext(ctx, q, args...)
|
||||
if err := row.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scanTargets := make([]any, len(keys))
|
||||
for i := range scanTargets {
|
||||
scanTargets[i] = &[]byte{}
|
||||
}
|
||||
if err := row.Scan(scanTargets...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m := map[string]json.RawMessage{}
|
||||
for i, k := range keys {
|
||||
m[k] = *scanTargets[i].(*[]byte)
|
||||
}
|
||||
b, _ := json.Marshal(m)
|
||||
return json.Unmarshal(b, v)
|
||||
}
|
||||
|
||||
func (s Storage) upsert(ctx context.Context, table string, v any) error {
|
||||
keys, args, keyArgs, values, err := keysArgsKeyargsValues(v)
|
||||
if err != nil || len(keys) == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
q := fmt.Sprintf(`
|
||||
INSERT INTO %s (%s) VALUES (%s)
|
||||
ON CONFLICT (ID) DO UPDATE SET %s
|
||||
`, table, strings.Join(keys, ", "), strings.Join(args, ", "), strings.Join(keyArgs, ", "))
|
||||
if result, err := s.driver.ExecContext(ctx, q, values...); err != nil {
|
||||
return err
|
||||
} else if n, err := result.RowsAffected(); err != nil {
|
||||
return err
|
||||
} else if n != 1 {
|
||||
return fmt.Errorf("UpsertMessage affected %v rows", n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func keysArgsKeyargsValues(v any) ([]string, []string, []string, []any, error) {
|
||||
b, _ := json.Marshal(v)
|
||||
var m map[string]json.RawMessage
|
||||
err := json.Unmarshal(b, &m)
|
||||
|
||||
keys := []string{}
|
||||
for k := range m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
args := make([]string, len(keys))
|
||||
for i := range args {
|
||||
args[i] = fmt.Sprintf("$%d", i+1)
|
||||
}
|
||||
keyArgs := make([]string, len(keys))
|
||||
for i := range keyArgs {
|
||||
keyArgs[i] = fmt.Sprintf("%s=$%d", keys[i], i+1)
|
||||
}
|
||||
values := make([]any, len(keys))
|
||||
for i := range values {
|
||||
values[i] = []byte(m[keys[i]])
|
||||
}
|
||||
|
||||
return keys, args, keyArgs, values, err
|
||||
}
|
||||
|
||||
150
storage_test.go
Normal file
150
storage_test.go
Normal file
@@ -0,0 +1,150 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/breel-render/spoc-bot-vr/model"
|
||||
)
|
||||
|
||||
func TestStorage(t *testing.T) {
|
||||
ctx, can := context.WithTimeout(context.Background(), time.Minute)
|
||||
defer can()
|
||||
|
||||
s, err := NewStorage(ctx, NewTestDriver(t))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Run("upsert get event", func(t *testing.T) {
|
||||
m := model.NewEvent(
|
||||
"ID",
|
||||
"URL",
|
||||
1,
|
||||
"Name",
|
||||
"Asset",
|
||||
"Datacenter",
|
||||
"Team",
|
||||
true,
|
||||
)
|
||||
|
||||
if err := s.UpsertEvent(ctx, m); err != nil {
|
||||
t.Fatal("unexpected error on insert:", err)
|
||||
} else if err := s.UpsertEvent(ctx, m); err != nil {
|
||||
t.Fatal("unexpected error on noop update:", err)
|
||||
}
|
||||
|
||||
if got, err := s.GetEvent(ctx, m.ID); err != nil {
|
||||
t.Fatal("unexpected error on get:", err)
|
||||
} else if got != m {
|
||||
t.Fatal("unexpected result from get:", got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("upsert get thread", func(t *testing.T) {
|
||||
m := model.NewThread(
|
||||
"ID",
|
||||
"URL",
|
||||
1,
|
||||
"Channel",
|
||||
"EventID",
|
||||
)
|
||||
|
||||
if err := s.UpsertThread(ctx, m); err != nil {
|
||||
t.Fatal("unexpected error on insert:", err)
|
||||
} else if err := s.UpsertThread(ctx, m); err != nil {
|
||||
t.Fatal("unexpected error on noop update:", err)
|
||||
}
|
||||
|
||||
if got, err := s.GetThread(ctx, m.ID); err != nil {
|
||||
t.Fatal("unexpected error on get:", err)
|
||||
} else if got != m {
|
||||
t.Fatal("unexpected result from get:", got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("upsert get message", func(t *testing.T) {
|
||||
m := model.NewMessage(
|
||||
"ID",
|
||||
1,
|
||||
"Author",
|
||||
"Plaintext",
|
||||
"ThreadID",
|
||||
)
|
||||
|
||||
if err := s.UpsertMessage(ctx, m); err != nil {
|
||||
t.Fatal("unexpected error on insert:", err)
|
||||
} else if err := s.UpsertMessage(ctx, m); err != nil {
|
||||
t.Fatal("unexpected error on noop update:", err)
|
||||
}
|
||||
|
||||
if got, err := s.GetMessage(ctx, m.ID); err != nil {
|
||||
t.Fatal("unexpected error on get:", err)
|
||||
} else if got != m {
|
||||
t.Fatal("unexpected result from get:", got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("get thread messages", func(t *testing.T) {
|
||||
thread := fmt.Sprintf("thread-%d", rand.Int())
|
||||
m := model.NewMessage(
|
||||
"ID",
|
||||
1,
|
||||
"Author",
|
||||
"Plaintext",
|
||||
thread,
|
||||
)
|
||||
|
||||
if err := s.UpsertMessage(ctx, m); err != nil {
|
||||
t.Fatal("unexpected error on insert:", err)
|
||||
} else if m2, err := s.GetMessage(ctx, m.ID); err != nil {
|
||||
t.Fatal("unexpected error on upsert-get:", err)
|
||||
} else if m2 != m {
|
||||
t.Errorf("expected %+v but got %+v", m, m2)
|
||||
}
|
||||
|
||||
msgs, err := s.GetThreadMessages(ctx, thread)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if len(msgs) != 1 {
|
||||
t.Fatal(msgs)
|
||||
} else if msgs[0].ThreadID != m.ThreadID {
|
||||
t.Fatal(msgs[0].ThreadID)
|
||||
} else if msgs[0] != m {
|
||||
t.Fatalf("wanted msgs like %+v but got %+v", m, msgs[0])
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("get event threads", func(t *testing.T) {
|
||||
event := fmt.Sprintf("event-%d", rand.Int())
|
||||
m := model.NewThread(
|
||||
"ID",
|
||||
"URL",
|
||||
1,
|
||||
"Channel",
|
||||
event,
|
||||
)
|
||||
|
||||
if err := s.UpsertThread(ctx, m); err != nil {
|
||||
t.Fatal("unexpected error on insert:", err)
|
||||
} else if m2, err := s.GetThread(ctx, m.ID); err != nil {
|
||||
t.Fatal("unexpected error on upsert-get:", err)
|
||||
} else if m2 != m {
|
||||
t.Errorf("expected %+v but got %+v", m, m2)
|
||||
}
|
||||
|
||||
msgs, err := s.GetEventThreads(ctx, event)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if len(msgs) != 1 {
|
||||
t.Fatal(msgs)
|
||||
} else if msgs[0].EventID != m.EventID {
|
||||
t.Fatal(msgs[0].EventID)
|
||||
} else if msgs[0] != m {
|
||||
t.Fatalf("wanted msgs like %+v but got %+v", m, msgs[0])
|
||||
}
|
||||
})
|
||||
}
|
||||
8
testdata/slack_events.json
vendored
8
testdata/slack_events.json
vendored
File diff suppressed because one or more lines are too long
50
testdata/slack_events/human_message.json
vendored
Normal file
50
testdata/slack_events/human_message.json
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
{
|
||||
"hint": "a message",
|
||||
"token": "redacted",
|
||||
"team_id": "T9RQLQ0KV",
|
||||
"context_team_id": "T9RQLQ0KV",
|
||||
"context_enterprise_id": null,
|
||||
"api_app_id": "A06TYH7CALB",
|
||||
"event": {
|
||||
"user": "U06868T6ADV",
|
||||
"type": "message",
|
||||
"ts": "1712878479.415559",
|
||||
"client_msg_id": "fce57841-0446-4720-aa1d-557e162c7667",
|
||||
"text": "BOTH OF YOU GET IN HERE HEEEHEEEHEEE",
|
||||
"team": "T9RQLQ0KV",
|
||||
"blocks": [
|
||||
{
|
||||
"type": "rich_text",
|
||||
"block_id": "6G5BZ",
|
||||
"elements": [
|
||||
{
|
||||
"type": "rich_text_section",
|
||||
"elements": [
|
||||
{
|
||||
"type": "text",
|
||||
"text": "BOTH OF YOU GET IN HERE HEEEHEEEHEEE"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"channel": "C06U1DDBBU4",
|
||||
"event_ts": "1712878479.415559",
|
||||
"channel_type": "channel"
|
||||
},
|
||||
"type": "event_callback",
|
||||
"event_id": "Ev06U1F01XMJ",
|
||||
"event_time": 1712878479,
|
||||
"authorizations": [
|
||||
{
|
||||
"enterprise_id": null,
|
||||
"team_id": "T9RQLQ0KV",
|
||||
"user_id": "U06TS9M7ABG",
|
||||
"is_bot": true,
|
||||
"is_enterprise_install": false
|
||||
}
|
||||
],
|
||||
"is_ext_shared_channel": false,
|
||||
"event_context": "4-eyJldCI6Im1lc3NhZ2UiLCJ0aWQiOiJUOVJRTFEwS1YiLCJhaWQiOiJBMDZUWUg3Q0FMQiIsImNpZCI6IkMwNlUxRERCQlU0In0"
|
||||
}
|
||||
52
testdata/slack_events/human_thread_message.json
vendored
Normal file
52
testdata/slack_events/human_thread_message.json
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
{
|
||||
"hint": "a thread message",
|
||||
"token": "redacted",
|
||||
"team_id": "T9RQLQ0KV",
|
||||
"context_team_id": "T9RQLQ0KV",
|
||||
"context_enterprise_id": null,
|
||||
"api_app_id": "A06TYH7CALB",
|
||||
"event": {
|
||||
"user": "U06868T6ADV",
|
||||
"type": "message",
|
||||
"ts": "1712878566.650149",
|
||||
"client_msg_id": "5d9d586b-eee0-40f8-b15e-66245c073a4c",
|
||||
"text": "in a thread",
|
||||
"team": "T9RQLQ0KV",
|
||||
"thread_ts": "1712877772.926539",
|
||||
"parent_user_id": "U06868T6ADV",
|
||||
"blocks": [
|
||||
{
|
||||
"type": "rich_text",
|
||||
"block_id": "KHSCu",
|
||||
"elements": [
|
||||
{
|
||||
"type": "rich_text_section",
|
||||
"elements": [
|
||||
{
|
||||
"type": "text",
|
||||
"text": "in a thread"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"channel": "C06U1DDBBU4",
|
||||
"event_ts": "1712878566.650149",
|
||||
"channel_type": "channel"
|
||||
},
|
||||
"type": "event_callback",
|
||||
"event_id": "Ev06TW3WE58V",
|
||||
"event_time": 1712878566,
|
||||
"authorizations": [
|
||||
{
|
||||
"enterprise_id": null,
|
||||
"team_id": "T9RQLQ0KV",
|
||||
"user_id": "U06TS9M7ABG",
|
||||
"is_bot": true,
|
||||
"is_enterprise_install": false
|
||||
}
|
||||
],
|
||||
"is_ext_shared_channel": false,
|
||||
"event_context": "4-eyJldCI6Im1lc3NhZ2UiLCJ0aWQiOiJUOVJRTFEwS1YiLCJhaWQiOiJBMDZUWUg3Q0FMQiIsImNpZCI6IkMwNlUxRERCQlU0In0"
|
||||
}
|
||||
52
testdata/slack_events/human_thread_message_from_opsgenie_alert.json
vendored
Normal file
52
testdata/slack_events/human_thread_message_from_opsgenie_alert.json
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
{
|
||||
"hint": "a thread reply to an alert",
|
||||
"token": "redacted",
|
||||
"team_id": "T9RQLQ0KV",
|
||||
"context_team_id": "T9RQLQ0KV",
|
||||
"context_enterprise_id": null,
|
||||
"api_app_id": "A06TYH7CALB",
|
||||
"event": {
|
||||
"user": "U06868T6ADV",
|
||||
"type": "message",
|
||||
"ts": "1712930706.598629",
|
||||
"client_msg_id": "880fc69f-de95-4c1e-90a7-aae701a40c21",
|
||||
"text": "I gotta do this",
|
||||
"team": "T9RQLQ0KV",
|
||||
"thread_ts": "1712927439.728409",
|
||||
"parent_user_id": "U03RUK7FBUY",
|
||||
"blocks": [
|
||||
{
|
||||
"type": "rich_text",
|
||||
"block_id": "Cp0IU",
|
||||
"elements": [
|
||||
{
|
||||
"type": "rich_text_section",
|
||||
"elements": [
|
||||
{
|
||||
"type": "text",
|
||||
"text": "I gotta do this"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"channel": "C06U1DDBBU4",
|
||||
"event_ts": "1712930706.598629",
|
||||
"channel_type": "channel"
|
||||
},
|
||||
"type": "event_callback",
|
||||
"event_id": "Ev06UF3U1LM7",
|
||||
"event_time": 1712930706,
|
||||
"authorizations": [
|
||||
{
|
||||
"enterprise_id": null,
|
||||
"team_id": "T9RQLQ0KV",
|
||||
"user_id": "U06TS9M7ABG",
|
||||
"is_bot": true,
|
||||
"is_enterprise_install": false
|
||||
}
|
||||
],
|
||||
"is_ext_shared_channel": false,
|
||||
"event_context": "4-eyJldCI6Im1lc3NhZ2UiLCJ0aWQiOiJUOVJRTFEwS1YiLCJhaWQiOiJBMDZUWUg3Q0FMQiIsImNpZCI6IkMwNlUxRERCQlU0In0"
|
||||
}
|
||||
128
testdata/slack_events/opsgenie_alert.json
vendored
Normal file
128
testdata/slack_events/opsgenie_alert.json
vendored
Normal file
@@ -0,0 +1,128 @@
|
||||
{
|
||||
"hint": "a new alert from opsgenie",
|
||||
"token": "redacted",
|
||||
"team_id": "T9RQLQ0KV",
|
||||
"context_team_id": "T9RQLQ0KV",
|
||||
"context_enterprise_id": null,
|
||||
"api_app_id": "A06TYH7CALB",
|
||||
"event": {
|
||||
"user": "U03RUK7FBUY",
|
||||
"type": "message",
|
||||
"ts": "1712927439.728409",
|
||||
"bot_id": "B03RHGBPH2M",
|
||||
"app_id": "A286WATV2",
|
||||
"text": "",
|
||||
"team": "T9RQLQ0KV",
|
||||
"bot_profile": {
|
||||
"id": "B03RHGBPH2M",
|
||||
"deleted": false,
|
||||
"name": "Opsgenie for Alert Management",
|
||||
"updated": 1658887059,
|
||||
"app_id": "A286WATV2",
|
||||
"icons": {
|
||||
"image_36": "https://avatars.slack-edge.com/2019-05-30/652285939191_7831939cc30ef7159561_36.png",
|
||||
"image_48": "https://avatars.slack-edge.com/2019-05-30/652285939191_7831939cc30ef7159561_48.png",
|
||||
"image_72": "https://avatars.slack-edge.com/2019-05-30/652285939191_7831939cc30ef7159561_72.png"
|
||||
},
|
||||
"team_id": "T9RQLQ0KV"
|
||||
},
|
||||
"attachments": [
|
||||
{
|
||||
"id": 1,
|
||||
"realcolor": "F4511E",
|
||||
"color": "2ecc71",
|
||||
"fallback": "New alert: \"[Grafana]: Firing: Alertconfig Workflow Failed\" <https://opsg.in/a/i/render/38152bc5-bc5d-411d-9feb-d285af5b6481-1712927439305|11071>\nTags: alertname:Alertconfig Workflow Failed, grafana_folder:Datastores, rule_uid:a7639f7e-6950-41be-850a-b22119f74cbb",
|
||||
"text": "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
||||
"title": "#11071: [Grafana]: Firing: Alertconfig Workflow Failed",
|
||||
"title_link": "https://opsg.in/a/i/render/38152bc5-bc5d-411d-9feb-d285af5b6481-1712927439305",
|
||||
"author_name": "New Alert created via Grafana - Datastore Slack",
|
||||
"callback_id": "bbd4a269-08a9-470e-ba79-ce238ac03dc7_05fa2e9b-bec4-4a7e-842d-36043d267a13_11071",
|
||||
"fields": [
|
||||
{
|
||||
"value": "P3",
|
||||
"title": "Priority",
|
||||
"short": true
|
||||
},
|
||||
{
|
||||
"value": "alertname:Alertconfig Workflow Failed, grafana_folder:Datastores, rule_uid:a7639f7e-6950-41be-850a-b22119f74cbb",
|
||||
"title": "Tags",
|
||||
"short": true
|
||||
},
|
||||
{
|
||||
"value": "Datastores Non-Critical",
|
||||
"title": "Routed Teams",
|
||||
"short": true
|
||||
}
|
||||
],
|
||||
"mrkdwn_in": [
|
||||
"pretext",
|
||||
"text"
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"id": "1",
|
||||
"name": "acknowledge",
|
||||
"text": "Acknowledge",
|
||||
"type": "button",
|
||||
"value": "ack",
|
||||
"style": "default"
|
||||
},
|
||||
{
|
||||
"id": "2",
|
||||
"name": "close",
|
||||
"text": "Close",
|
||||
"type": "button",
|
||||
"value": "close",
|
||||
"style": "primary"
|
||||
},
|
||||
{
|
||||
"id": "3",
|
||||
"name": "action",
|
||||
"text": "Other actions...",
|
||||
"type": "select",
|
||||
"data_source": "static",
|
||||
"options": [
|
||||
{
|
||||
"text": "Assign",
|
||||
"value": "assign"
|
||||
},
|
||||
{
|
||||
"text": "Take Ownership",
|
||||
"value": "own"
|
||||
},
|
||||
{
|
||||
"text": "Snooze",
|
||||
"value": "snooze"
|
||||
},
|
||||
{
|
||||
"text": "Add Note",
|
||||
"value": "addNote"
|
||||
},
|
||||
{
|
||||
"text": "Update Priority",
|
||||
"value": "updatePriority"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"channel": "C06U1DDBBU4",
|
||||
"event_ts": "1712927439.728409",
|
||||
"channel_type": "channel"
|
||||
},
|
||||
"type": "event_callback",
|
||||
"event_id": "Ev06UEPF5BSM",
|
||||
"event_time": 1712927439,
|
||||
"authorizations": [
|
||||
{
|
||||
"enterprise_id": null,
|
||||
"team_id": "T9RQLQ0KV",
|
||||
"user_id": "U06TS9M7ABG",
|
||||
"is_bot": true,
|
||||
"is_enterprise_install": false
|
||||
}
|
||||
],
|
||||
"is_ext_shared_channel": false,
|
||||
"event_context": "4-eyJldCI6Im1lc3NhZ2UiLCJ0aWQiOiJUOVJRTFEwS1YiLCJhaWQiOiJBMDZUWUg3Q0FMQiIsImNpZCI6IkMwNlUxRERCQlU0In0"
|
||||
}
|
||||
192
testdata/slack_events/opsgenie_alert_2.json
vendored
Normal file
192
testdata/slack_events/opsgenie_alert_2.json
vendored
Normal file
@@ -0,0 +1,192 @@
|
||||
{
|
||||
"hint": "a new alert from opsgenie",
|
||||
"token": "redacted",
|
||||
"team_id": "T9RQLQ0KV",
|
||||
"context_team_id": "T9RQLQ0KV",
|
||||
"context_enterprise_id": null,
|
||||
"api_app_id": "A06TYH7CALB",
|
||||
"event": {
|
||||
"type": "message",
|
||||
"subtype": "message_changed",
|
||||
"message": {
|
||||
"user": "U03RUK7FBUY",
|
||||
"type": "message",
|
||||
"edited": {
|
||||
"user": "B03RHGBPH2M",
|
||||
"ts": "1712925631.000000"
|
||||
},
|
||||
"bot_id": "B03RHGBPH2M",
|
||||
"app_id": "A286WATV2",
|
||||
"text": "",
|
||||
"team": "T9RQLQ0KV",
|
||||
"bot_profile": {
|
||||
"id": "B03RHGBPH2M",
|
||||
"app_id": "A286WATV2",
|
||||
"name": "Opsgenie for Alert Management",
|
||||
"icons": {
|
||||
"image_36": "https://avatars.slack-edge.com/2019-05-30/652285939191_7831939cc30ef7159561_36.png",
|
||||
"image_48": "https://avatars.slack-edge.com/2019-05-30/652285939191_7831939cc30ef7159561_48.png",
|
||||
"image_72": "https://avatars.slack-edge.com/2019-05-30/652285939191_7831939cc30ef7159561_72.png"
|
||||
},
|
||||
"deleted": false,
|
||||
"updated": 1658887059,
|
||||
"team_id": "T9RQLQ0KV"
|
||||
},
|
||||
"attachments": [
|
||||
{
|
||||
"id": 1,
|
||||
"color": "2ecc71",
|
||||
"fallback": "\"[Grafana]: Firing: Alertconfig Workflow Failed\" <https://opsg.in/a/i/render/ec6e7c4b-90a6-4c3c-94e9-901f6b7ada47-1712922031403|11070>\nTags: alertname:Alertconfig Workflow Failed, grafana_folder:Datastores, rule_uid:a7639f7e-6950-41be-850a-b22119f74cbb",
|
||||
"text": "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
||||
"title": "#11070: [Grafana]: Firing: Alertconfig Workflow Failed",
|
||||
"title_link": "https://opsg.in/a/i/render/ec6e7c4b-90a6-4c3c-94e9-901f6b7ada47-1712922031403",
|
||||
"callback_id": "bbd4a269-08a9-470e-ba79-ce238ac03dc7_05fa2e9b-bec4-4a7e-842d-36043d267a13_11070",
|
||||
"fields": [
|
||||
{
|
||||
"value": "P3",
|
||||
"title": "Priority",
|
||||
"short": true
|
||||
},
|
||||
{
|
||||
"value": "alertname:Alertconfig Workflow Failed, grafana_folder:Datastores, rule_uid:a7639f7e-6950-41be-850a-b22119f74cbb",
|
||||
"title": "Tags",
|
||||
"short": true
|
||||
},
|
||||
{
|
||||
"value": "Datastores Non-Critical",
|
||||
"title": "Routed Teams",
|
||||
"short": true
|
||||
}
|
||||
],
|
||||
"mrkdwn_in": [
|
||||
"text"
|
||||
]
|
||||
}
|
||||
],
|
||||
"ts": "1712922031.821339",
|
||||
"source_team": "T9RQLQ0KV",
|
||||
"user_team": "T9RQLQ0KV"
|
||||
},
|
||||
"previous_message": {
|
||||
"user": "U03RUK7FBUY",
|
||||
"type": "message",
|
||||
"ts": "1712922031.821339",
|
||||
"bot_id": "B03RHGBPH2M",
|
||||
"app_id": "A286WATV2",
|
||||
"text": "",
|
||||
"team": "T9RQLQ0KV",
|
||||
"bot_profile": {
|
||||
"id": "B03RHGBPH2M",
|
||||
"app_id": "A286WATV2",
|
||||
"name": "Opsgenie for Alert Management",
|
||||
"icons": {
|
||||
"image_36": "https://avatars.slack-edge.com/2019-05-30/652285939191_7831939cc30ef7159561_36.png",
|
||||
"image_48": "https://avatars.slack-edge.com/2019-05-30/652285939191_7831939cc30ef7159561_48.png",
|
||||
"image_72": "https://avatars.slack-edge.com/2019-05-30/652285939191_7831939cc30ef7159561_72.png"
|
||||
},
|
||||
"deleted": false,
|
||||
"updated": 1658887059,
|
||||
"team_id": "T9RQLQ0KV"
|
||||
},
|
||||
"attachments": [
|
||||
{
|
||||
"id": 1,
|
||||
"color": "F4511E",
|
||||
"fallback": "New alert: \"[Grafana]: Firing: Alertconfig Workflow Failed\" <https://opsg.in/a/i/render/ec6e7c4b-90a6-4c3c-94e9-901f6b7ada47-1712922031403|11070>\nTags: alertname:Alertconfig Workflow Failed, grafana_folder:Datastores, rule_uid:a7639f7e-6950-41be-850a-b22119f74cbb",
|
||||
"text": "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
||||
"title": "#11070: [Grafana]: Firing: Alertconfig Workflow Failed",
|
||||
"title_link": "https://opsg.in/a/i/render/ec6e7c4b-90a6-4c3c-94e9-901f6b7ada47-1712922031403",
|
||||
"author_name": "New Alert created via Grafana - Datastore Slack",
|
||||
"callback_id": "bbd4a269-08a9-470e-ba79-ce238ac03dc7_05fa2e9b-bec4-4a7e-842d-36043d267a13_11070",
|
||||
"fields": [
|
||||
{
|
||||
"value": "P3",
|
||||
"title": "Priority",
|
||||
"short": true
|
||||
},
|
||||
{
|
||||
"value": "alertname:Alertconfig Workflow Failed, grafana_folder:Datastores, rule_uid:a7639f7e-6950-41be-850a-b22119f74cbb",
|
||||
"title": "Tags",
|
||||
"short": true
|
||||
},
|
||||
{
|
||||
"value": "Datastores Non-Critical",
|
||||
"title": "Routed Teams",
|
||||
"short": true
|
||||
}
|
||||
],
|
||||
"mrkdwn_in": [
|
||||
"pretext",
|
||||
"text"
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"id": "1",
|
||||
"name": "acknowledge",
|
||||
"text": "Acknowledge",
|
||||
"type": "button",
|
||||
"value": "ack",
|
||||
"style": "default"
|
||||
},
|
||||
{
|
||||
"id": "2",
|
||||
"name": "close",
|
||||
"text": "Close",
|
||||
"type": "button",
|
||||
"value": "close",
|
||||
"style": "primary"
|
||||
},
|
||||
{
|
||||
"id": "3",
|
||||
"name": "action",
|
||||
"text": "Other actions...",
|
||||
"type": "select",
|
||||
"data_source": "static",
|
||||
"options": [
|
||||
{
|
||||
"text": "Assign",
|
||||
"value": "assign"
|
||||
},
|
||||
{
|
||||
"text": "Take Ownership",
|
||||
"value": "own"
|
||||
},
|
||||
{
|
||||
"text": "Snooze",
|
||||
"value": "snooze"
|
||||
},
|
||||
{
|
||||
"text": "Add Note",
|
||||
"value": "addNote"
|
||||
},
|
||||
{
|
||||
"text": "Update Priority",
|
||||
"value": "updatePriority"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"channel": "C06U1DDBBU4",
|
||||
"hidden": true,
|
||||
"ts": "1712925631.000400",
|
||||
"event_ts": "1712925631.000400",
|
||||
"channel_type": "channel"
|
||||
},
|
||||
"type": "event_callback",
|
||||
"event_id": "Ev06UQNCJKNC",
|
||||
"event_time": 1712925631,
|
||||
"authorizations": [
|
||||
{
|
||||
"enterprise_id": null,
|
||||
"team_id": "T9RQLQ0KV",
|
||||
"user_id": "U06TS9M7ABG",
|
||||
"is_bot": true,
|
||||
"is_enterprise_install": false
|
||||
}
|
||||
],
|
||||
"is_ext_shared_channel": false,
|
||||
"event_context": "4-eyJldCI6Im1lc3NhZ2UiLCJ0aWQiOiJUOVJRTFEwS1YiLCJhaWQiOiJBMDZUWUg3Q0FMQiIsImNpZCI6IkMwNlUxRERCQlU0In0"
|
||||
}
|
||||
127
testdata/slack_events/opsgenie_alert_3.json
vendored
Normal file
127
testdata/slack_events/opsgenie_alert_3.json
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
{
|
||||
"hint": "an alert firing that has a render_id",
|
||||
"token": "redacted",
|
||||
"team_id": "T9RQLQ0KV",
|
||||
"context_team_id": "T9RQLQ0KV",
|
||||
"context_enterprise_id": null,
|
||||
"api_app_id": "A06TYH7CALB",
|
||||
"event": {
|
||||
"user": "U03RUK7FBUY",
|
||||
"type": "message",
|
||||
"ts": "1712911957.023359",
|
||||
"bot_id": "B03RHGBPH2M",
|
||||
"app_id": "A286WATV2",
|
||||
"text": "",
|
||||
"team": "T9RQLQ0KV",
|
||||
"bot_profile": {
|
||||
"id": "B03RHGBPH2M",
|
||||
"deleted": false,
|
||||
"name": "Opsgenie for Alert Management",
|
||||
"updated": 1658887059,
|
||||
"app_id": "A286WATV2",
|
||||
"icons": {
|
||||
"image_36": "https://avatars.slack-edge.com/2019-05-30/652285939191_7831939cc30ef7159561_36.png",
|
||||
"image_48": "https://avatars.slack-edge.com/2019-05-30/652285939191_7831939cc30ef7159561_48.png",
|
||||
"image_72": "https://avatars.slack-edge.com/2019-05-30/652285939191_7831939cc30ef7159561_72.png"
|
||||
},
|
||||
"team_id": "T9RQLQ0KV"
|
||||
},
|
||||
"attachments": [
|
||||
{
|
||||
"id": 1,
|
||||
"color": "F4511E",
|
||||
"fallback": "New alert: \"[Grafana]: Firing: [Oregon-1] Wal Receive Count Alert\" <https://opsg.in/a/i/render/c1565736-aac4-4a78-b7db-65e4d90a59f2-1712911956739|11067>\nTags: alertname:[oregon-1] WAL Receive Count alert, business_hours:true, cluster:oregon-1, contacts:\"OpsGenie - Datastore\",\"Slack - Datastore, grafana_folder:[Generated] oregon-1, template-source:ab1da08c-129f-4f08-b505-db44929eb8",
|
||||
"text": "Replica for dpg-cn7te6nsc6pc73ak5pig-b cannot start streaming replication, because the primary has already deleted the needed WAL segment.\n\nSync the replica from scratch using the runbook.\nRunbook: <https://slab.render.com/posts/runbook-postgres-replica-too-far-behind-pr084dj4>\nSource: <https://grafana.render.com/alerting/grafana/aecdfbcf-5298-4679-bd20-d9dec32ee2a0/view?orgId=1>",
|
||||
"title": "#11067: [Grafana]: Firing: [Oregon-1] Wal Receive Count Alert",
|
||||
"title_link": "https://opsg.in/a/i/render/c1565736-aac4-4a78-b7db-65e4d90a59f2-1712911956739",
|
||||
"author_name": "New Alert created via Grafana - Datastore Slack",
|
||||
"callback_id": "bbd4a269-08a9-470e-ba79-ce238ac03dc7_05fa2e9b-bec4-4a7e-842d-36043d267a13_11067",
|
||||
"fields": [
|
||||
{
|
||||
"value": "P3",
|
||||
"title": "Priority",
|
||||
"short": true
|
||||
},
|
||||
{
|
||||
"value": "alertname:[oregon-1] WAL Receive Count alert, business_hours:true, cluster:oregon-1, contacts:\"OpsGenie - Datastore\",\"Slack - Datastore, grafana_folder:[Generated] oregon-1, template-source:ab1da08c-129f-4f08-b505-db44929eb8",
|
||||
"title": "Tags",
|
||||
"short": true
|
||||
},
|
||||
{
|
||||
"value": "Datastores Non-Critical",
|
||||
"title": "Routed Teams",
|
||||
"short": true
|
||||
}
|
||||
],
|
||||
"mrkdwn_in": [
|
||||
"pretext",
|
||||
"text"
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"id": "1",
|
||||
"name": "acknowledge",
|
||||
"text": "Acknowledge",
|
||||
"type": "button",
|
||||
"value": "ack",
|
||||
"style": "default"
|
||||
},
|
||||
{
|
||||
"id": "2",
|
||||
"name": "close",
|
||||
"text": "Close",
|
||||
"type": "button",
|
||||
"value": "close",
|
||||
"style": "primary"
|
||||
},
|
||||
{
|
||||
"id": "3",
|
||||
"name": "action",
|
||||
"text": "Other actions...",
|
||||
"type": "select",
|
||||
"data_source": "static",
|
||||
"options": [
|
||||
{
|
||||
"text": "Assign",
|
||||
"value": "assign"
|
||||
},
|
||||
{
|
||||
"text": "Take Ownership",
|
||||
"value": "own"
|
||||
},
|
||||
{
|
||||
"text": "Snooze",
|
||||
"value": "snooze"
|
||||
},
|
||||
{
|
||||
"text": "Add Note",
|
||||
"value": "addNote"
|
||||
},
|
||||
{
|
||||
"text": "Update Priority",
|
||||
"value": "updatePriority"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"channel": "C06U1DDBBU4",
|
||||
"event_ts": "1712911957.023359",
|
||||
"channel_type": "channel"
|
||||
},
|
||||
"type": "event_callback",
|
||||
"event_id": "Ev06U0TNBXV0",
|
||||
"event_time": 1712911957,
|
||||
"authorizations": [
|
||||
{
|
||||
"enterprise_id": null,
|
||||
"team_id": "T9RQLQ0KV",
|
||||
"user_id": "U06TS9M7ABG",
|
||||
"is_bot": true,
|
||||
"is_enterprise_install": false
|
||||
}
|
||||
],
|
||||
"is_ext_shared_channel": false,
|
||||
"event_context": "4-eyJldCI6Im1lc3NhZ2UiLCJ0aWQiOiJUOVJRTFEwS1YiLCJhaWQiOiJBMDZUWUg3Q0FMQiIsImNpZCI6IkMwNlUxRERCQlU0In0"
|
||||
}
|
||||
192
testdata/slack_events/opsgenie_alert_resolved.json
vendored
Normal file
192
testdata/slack_events/opsgenie_alert_resolved.json
vendored
Normal file
@@ -0,0 +1,192 @@
|
||||
{
|
||||
"hint": "an edit to resolve an alert from opsgenie",
|
||||
"token": "redacted",
|
||||
"team_id": "T9RQLQ0KV",
|
||||
"context_team_id": "T9RQLQ0KV",
|
||||
"context_enterprise_id": null,
|
||||
"api_app_id": "A06TYH7CALB",
|
||||
"event": {
|
||||
"type": "message",
|
||||
"subtype": "message_changed",
|
||||
"message": {
|
||||
"user": "U03RUK7FBUY",
|
||||
"type": "message",
|
||||
"edited": {
|
||||
"user": "B03RHGBPH2M",
|
||||
"ts": "1712916339.000000"
|
||||
},
|
||||
"bot_id": "B03RHGBPH2M",
|
||||
"app_id": "A286WATV2",
|
||||
"text": "",
|
||||
"team": "T9RQLQ0KV",
|
||||
"bot_profile": {
|
||||
"id": "B03RHGBPH2M",
|
||||
"app_id": "A286WATV2",
|
||||
"name": "Opsgenie for Alert Management",
|
||||
"icons": {
|
||||
"image_36": "https://avatars.slack-edge.com/2019-05-30/652285939191_7831939cc30ef7159561_36.png",
|
||||
"image_48": "https://avatars.slack-edge.com/2019-05-30/652285939191_7831939cc30ef7159561_48.png",
|
||||
"image_72": "https://avatars.slack-edge.com/2019-05-30/652285939191_7831939cc30ef7159561_72.png"
|
||||
},
|
||||
"deleted": false,
|
||||
"updated": 1658887059,
|
||||
"team_id": "T9RQLQ0KV"
|
||||
},
|
||||
"attachments": [
|
||||
{
|
||||
"id": 1,
|
||||
"color": "2ecc71",
|
||||
"fallback": "\"[Grafana]: Firing: Alertconfig Workflow Failed\" <https://opsg.in/a/i/render/ba6f8300-b869-4aba-9d02-c142237ae59e-1712912739431|11069>\nTags: alertname:Alertconfig Workflow Failed, grafana_folder:Datastores, rule_uid:a7639f7e-6950-41be-850a-b22119f74cbb",
|
||||
"text": "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
||||
"title": "#11069: [Grafana]: Firing: Alertconfig Workflow Failed",
|
||||
"title_link": "https://opsg.in/a/i/render/ba6f8300-b869-4aba-9d02-c142237ae59e-1712912739431",
|
||||
"callback_id": "bbd4a269-08a9-470e-ba79-ce238ac03dc7_05fa2e9b-bec4-4a7e-842d-36043d267a13_11069",
|
||||
"fields": [
|
||||
{
|
||||
"value": "P3",
|
||||
"title": "Priority",
|
||||
"short": true
|
||||
},
|
||||
{
|
||||
"value": "alertname:Alertconfig Workflow Failed, grafana_folder:Datastores, rule_uid:a7639f7e-6950-41be-850a-b22119f74cbb",
|
||||
"title": "Tags",
|
||||
"short": true
|
||||
},
|
||||
{
|
||||
"value": "Datastores Non-Critical",
|
||||
"title": "Routed Teams",
|
||||
"short": true
|
||||
}
|
||||
],
|
||||
"mrkdwn_in": [
|
||||
"text"
|
||||
]
|
||||
}
|
||||
],
|
||||
"ts": "1712912739.723049",
|
||||
"source_team": "T9RQLQ0KV",
|
||||
"user_team": "T9RQLQ0KV"
|
||||
},
|
||||
"previous_message": {
|
||||
"user": "U03RUK7FBUY",
|
||||
"type": "message",
|
||||
"ts": "1712912739.723049",
|
||||
"bot_id": "B03RHGBPH2M",
|
||||
"app_id": "A286WATV2",
|
||||
"text": "",
|
||||
"team": "T9RQLQ0KV",
|
||||
"bot_profile": {
|
||||
"id": "B03RHGBPH2M",
|
||||
"app_id": "A286WATV2",
|
||||
"name": "Opsgenie for Alert Management",
|
||||
"icons": {
|
||||
"image_36": "https://avatars.slack-edge.com/2019-05-30/652285939191_7831939cc30ef7159561_36.png",
|
||||
"image_48": "https://avatars.slack-edge.com/2019-05-30/652285939191_7831939cc30ef7159561_48.png",
|
||||
"image_72": "https://avatars.slack-edge.com/2019-05-30/652285939191_7831939cc30ef7159561_72.png"
|
||||
},
|
||||
"deleted": false,
|
||||
"updated": 1658887059,
|
||||
"team_id": "T9RQLQ0KV"
|
||||
},
|
||||
"attachments": [
|
||||
{
|
||||
"id": 1,
|
||||
"color": "F4511E",
|
||||
"fallback": "New alert: \"[Grafana]: Firing: Alertconfig Workflow Failed\" <https://opsg.in/a/i/render/ba6f8300-b869-4aba-9d02-c142237ae59e-1712912739431|11069>\nTags: alertname:Alertconfig Workflow Failed, grafana_folder:Datastores, rule_uid:a7639f7e-6950-41be-850a-b22119f74cbb",
|
||||
"text": "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
||||
"title": "#11069: [Grafana]: Firing: Alertconfig Workflow Failed",
|
||||
"title_link": "https://opsg.in/a/i/render/ba6f8300-b869-4aba-9d02-c142237ae59e-1712912739431",
|
||||
"author_name": "New Alert created via Grafana - Datastore Slack",
|
||||
"callback_id": "bbd4a269-08a9-470e-ba79-ce238ac03dc7_05fa2e9b-bec4-4a7e-842d-36043d267a13_11069",
|
||||
"fields": [
|
||||
{
|
||||
"value": "P3",
|
||||
"title": "Priority",
|
||||
"short": true
|
||||
},
|
||||
{
|
||||
"value": "alertname:Alertconfig Workflow Failed, grafana_folder:Datastores, rule_uid:a7639f7e-6950-41be-850a-b22119f74cbb",
|
||||
"title": "Tags",
|
||||
"short": true
|
||||
},
|
||||
{
|
||||
"value": "Datastores Non-Critical",
|
||||
"title": "Routed Teams",
|
||||
"short": true
|
||||
}
|
||||
],
|
||||
"mrkdwn_in": [
|
||||
"pretext",
|
||||
"text"
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"id": "1",
|
||||
"name": "acknowledge",
|
||||
"text": "Acknowledge",
|
||||
"type": "button",
|
||||
"value": "ack",
|
||||
"style": "default"
|
||||
},
|
||||
{
|
||||
"id": "2",
|
||||
"name": "close",
|
||||
"text": "Close",
|
||||
"type": "button",
|
||||
"value": "close",
|
||||
"style": "primary"
|
||||
},
|
||||
{
|
||||
"id": "3",
|
||||
"name": "action",
|
||||
"text": "Other actions...",
|
||||
"type": "select",
|
||||
"data_source": "static",
|
||||
"options": [
|
||||
{
|
||||
"text": "Assign",
|
||||
"value": "assign"
|
||||
},
|
||||
{
|
||||
"text": "Take Ownership",
|
||||
"value": "own"
|
||||
},
|
||||
{
|
||||
"text": "Snooze",
|
||||
"value": "snooze"
|
||||
},
|
||||
{
|
||||
"text": "Add Note",
|
||||
"value": "addNote"
|
||||
},
|
||||
{
|
||||
"text": "Update Priority",
|
||||
"value": "updatePriority"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"channel": "C06U1DDBBU4",
|
||||
"hidden": true,
|
||||
"ts": "1712916339.000300",
|
||||
"event_ts": "1712916339.000300",
|
||||
"channel_type": "channel"
|
||||
},
|
||||
"type": "event_callback",
|
||||
"event_id": "Ev06TLMY8FJB",
|
||||
"event_time": 1712916339,
|
||||
"authorizations": [
|
||||
{
|
||||
"enterprise_id": null,
|
||||
"team_id": "T9RQLQ0KV",
|
||||
"user_id": "U06TS9M7ABG",
|
||||
"is_bot": true,
|
||||
"is_enterprise_install": false
|
||||
}
|
||||
],
|
||||
"is_ext_shared_channel": false,
|
||||
"event_context": "4-eyJldCI6Im1lc3NhZ2UiLCJ0aWQiOiJUOVJRTFEwS1YiLCJhaWQiOiJBMDZUWUg3Q0FMQiIsImNpZCI6IkMwNlUxRERCQlU0In0"
|
||||
}
|
||||
55
testdata/slack_events/opsgenie_alert_resolved_fyi.json
vendored
Normal file
55
testdata/slack_events/opsgenie_alert_resolved_fyi.json
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
{
|
||||
"hint": "a teeny closed alert message",
|
||||
"token": "redacted",
|
||||
"team_id": "T9RQLQ0KV",
|
||||
"context_team_id": "T9RQLQ0KV",
|
||||
"context_enterprise_id": null,
|
||||
"api_app_id": "A06TYH7CALB",
|
||||
"event": {
|
||||
"user": "U03RUK7FBUY",
|
||||
"type": "message",
|
||||
"ts": "1712925631.682559",
|
||||
"bot_id": "B03RHGBPH2M",
|
||||
"app_id": "A286WATV2",
|
||||
"text": "",
|
||||
"team": "T9RQLQ0KV",
|
||||
"bot_profile": {
|
||||
"id": "B03RHGBPH2M",
|
||||
"deleted": false,
|
||||
"name": "Opsgenie for Alert Management",
|
||||
"updated": 1658887059,
|
||||
"app_id": "A286WATV2",
|
||||
"icons": {
|
||||
"image_36": "https://avatars.slack-edge.com/2019-05-30/652285939191_7831939cc30ef7159561_36.png",
|
||||
"image_48": "https://avatars.slack-edge.com/2019-05-30/652285939191_7831939cc30ef7159561_48.png",
|
||||
"image_72": "https://avatars.slack-edge.com/2019-05-30/652285939191_7831939cc30ef7159561_72.png"
|
||||
},
|
||||
"team_id": "T9RQLQ0KV"
|
||||
},
|
||||
"attachments": [
|
||||
{
|
||||
"id": 1,
|
||||
"color": "2ecc71",
|
||||
"fallback": "Alert API closed alert <https://opsg.in/a/i/render/ec6e7c4b-90a6-4c3c-94e9-901f6b7ada47-1712922031403|#11070> \"[Grafana]: Firing: Alertconfig Workflow Failed\"",
|
||||
"text": "Alert API closed alert <https://opsg.in/a/i/render/ec6e7c4b-90a6-4c3c-94e9-901f6b7ada47-1712922031403|#11070> \"[Grafana]: Firing: Alertconfig Workflow Failed\""
|
||||
}
|
||||
],
|
||||
"channel": "C06U1DDBBU4",
|
||||
"event_ts": "1712925631.682559",
|
||||
"channel_type": "channel"
|
||||
},
|
||||
"type": "event_callback",
|
||||
"event_id": "Ev06U1R10TPV",
|
||||
"event_time": 1712925631,
|
||||
"authorizations": [
|
||||
{
|
||||
"enterprise_id": null,
|
||||
"team_id": "T9RQLQ0KV",
|
||||
"user_id": "U06TS9M7ABG",
|
||||
"is_bot": true,
|
||||
"is_enterprise_install": false
|
||||
}
|
||||
],
|
||||
"is_ext_shared_channel": false,
|
||||
"event_context": "4-eyJldCI6Im1lc3NhZ2UiLCJ0aWQiOiJUOVJRTFEwS1YiLCJhaWQiOiJBMDZUWUg3Q0FMQiIsImNpZCI6IkMwNlUxRERCQlU0In0"
|
||||
}
|
||||
57
testdata/slack_events/reingested_alert.json
vendored
Normal file
57
testdata/slack_events/reingested_alert.json
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
{
|
||||
"user": "U03RUK7FBUY",
|
||||
"type": "message",
|
||||
"ts": "1712892637.037639",
|
||||
"edited": {
|
||||
"user": "B03RHGBPH2M",
|
||||
"ts": "1712896236.000000"
|
||||
},
|
||||
"bot_id": "B03RHGBPH2M",
|
||||
"app_id": "A286WATV2",
|
||||
"text": "",
|
||||
"team": "T9RQLQ0KV",
|
||||
"bot_profile": {
|
||||
"id": "B03RHGBPH2M",
|
||||
"app_id": "A286WATV2",
|
||||
"name": "Opsgenie for Alert Management",
|
||||
"icons": {
|
||||
"image_36": "https://avatars.slack-edge.com/2019-05-30/652285939191_7831939cc30ef7159561_36.png",
|
||||
"image_48": "https://avatars.slack-edge.com/2019-05-30/652285939191_7831939cc30ef7159561_48.png",
|
||||
"image_72": "https://avatars.slack-edge.com/2019-05-30/652285939191_7831939cc30ef7159561_72.png"
|
||||
},
|
||||
"deleted": false,
|
||||
"updated": 1658887059,
|
||||
"team_id": "T9RQLQ0KV"
|
||||
},
|
||||
"attachments": [
|
||||
{
|
||||
"id": 1,
|
||||
"color": "2ecc71",
|
||||
"fallback": "\"[Grafana]: Firing: Alertconfig Workflow Failed\" <https://opsg.in/a/i/render/bdbbe5a6-738b-4643-9267-39d8dfcb2ead-1712892636514|11061>\nTags: alertname:Alertconfig Workflow Failed, grafana_folder:Datastores, rule_uid:a7639f7e-6950-41be-850a-b22119f74cbb",
|
||||
"text": "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
||||
"title": "#11061: [Grafana]: Firing: Alertconfig Workflow Failed",
|
||||
"title_link": "https://opsg.in/a/i/render/bdbbe5a6-738b-4643-9267-39d8dfc$2ead-1712892636514",
|
||||
"callback_id": "bbd4a269-08a9-470e-ba79-ce238ac03dc7_05fa2e9b-bec4-4a7e-842d-36043d267a13_11061",
|
||||
"fields": [
|
||||
{
|
||||
"value": "P3",
|
||||
"title": "Priority",
|
||||
"short": true
|
||||
},
|
||||
{
|
||||
"value": "alertname:Alertconfig Workflow Failed, grafana_folder:Datastores, rule_uid:a7639f7e-6950-41be-850a-b22119f74cbb",
|
||||
"title": "Tags",
|
||||
"short": true
|
||||
},
|
||||
{
|
||||
"value": "Datastores Non-Critical",
|
||||
"title": "Routed Teams",
|
||||
"short": true
|
||||
}
|
||||
],
|
||||
"mrkdwn_in": [
|
||||
"text"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
Reference in New Issue
Block a user