Compare commits
69 Commits
83c0ee3f53
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
31fe916000 | ||
|
|
d31042e971 | ||
|
|
2e7d58fd13 | ||
|
|
93672e67a6 | ||
|
|
1b06f727fd | ||
|
|
3ae62390cf | ||
|
|
5cfb89bc64 | ||
|
|
b554be6282 | ||
|
|
5785ea37ae | ||
|
|
f27d416a5a | ||
|
|
81793876f8 | ||
|
|
6d81164161 | ||
|
|
20256bd6b4 | ||
|
|
e5e98e2890 | ||
|
|
4fb26ec775 | ||
|
|
782b9ec3cf | ||
|
|
9d7f69bd8a | ||
|
|
12de99da57 | ||
|
|
81fe8070ca | ||
|
|
79de56e236 | ||
|
|
f485b5ea88 | ||
|
|
894536d209 | ||
|
|
f8861a73b5 | ||
|
|
14de286415 | ||
|
|
8557ddc522 | ||
|
|
1e43c2a14e | ||
|
|
04c574ffec | ||
|
|
b2f64037e2 | ||
|
|
fbd151f9ef | ||
|
|
5f21098fdc | ||
|
|
7c2d663401 | ||
|
|
95b0394199 | ||
|
|
d9d91193dd | ||
|
|
2033cefc2a | ||
|
|
600a2e0111 | ||
|
|
4f3b8ec866 | ||
|
|
c4a7eaf04a | ||
|
|
5557c0920a | ||
|
|
c7f5cdb040 | ||
|
|
a8e8fdc451 | ||
|
|
d88a8bb23a | ||
|
|
39c0056190 | ||
|
|
d87af2fadc | ||
|
|
9bc47bfde6 | ||
|
|
098986eb07 | ||
|
|
5bc068451f | ||
|
|
5fa21d0cd9 | ||
|
|
709f2ac254 | ||
|
|
ba06796b8c | ||
|
|
f38c183fe8 | ||
|
|
8ae8f47753 | ||
|
|
e372be4288 | ||
|
|
acfd95e5af | ||
|
|
d70a0e313f | ||
|
|
0cecd5ea04 | ||
|
|
a7d5d021d6 | ||
|
|
44db0c6939 | ||
|
|
dd98aedb5d | ||
|
|
254cb1ec0a | ||
|
|
38a68de67f | ||
|
|
3c62411927 | ||
|
|
c84d80e8d3 | ||
|
|
74477fc09c | ||
|
|
1fd4b72b22 | ||
|
|
d9244e4e1c | ||
|
|
c9d3b4998b | ||
|
|
c5e1556f61 | ||
|
|
d76f8e2c15 | ||
|
|
ff280997b1 |
33
README.md
33
README.md
@@ -1,3 +1,36 @@
|
|||||||
# Spoc Bot v. Render
|
# Spoc Bot v. Render
|
||||||
|
|
||||||
Thank you, [Sean](https://www.linkedin.com/in/sean-moore-1755a619/)
|
Thank you, [Sean](https://www.linkedin.com/in/sean-moore-1755a619/)
|
||||||
|
|
||||||
|
## TODO
|
||||||
|
|
||||||
|
- what SLO/SLI can I help benoit with
|
||||||
|
- scott; like to keep state in incident.io and zendesk
|
||||||
|
- @spoc -ignore, @spoc -s summary
|
||||||
|
- limit queue retries
|
||||||
|
|
||||||
|
```
|
||||||
|
erDiagram
|
||||||
|
%% thread event eventName
|
||||||
|
EVENT ||--|{ THREAD: "spawns"
|
||||||
|
THREAD ||--|{ MESSAGE: "populated by"
|
||||||
|
|
||||||
|
MESSAGE {
|
||||||
|
ID str
|
||||||
|
URL str
|
||||||
|
TS number
|
||||||
|
Plaintext str
|
||||||
|
}
|
||||||
|
THREAD {
|
||||||
|
ID str
|
||||||
|
URL str
|
||||||
|
Channel str
|
||||||
|
}
|
||||||
|
EVENT {
|
||||||
|
ID str
|
||||||
|
Name str
|
||||||
|
Asset str
|
||||||
|
Resolved bool
|
||||||
|
Datacenter str
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|||||||
139
ai.go
139
ai.go
@@ -1,13 +1,10 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"os"
|
"net/http"
|
||||||
"strings"
|
"time"
|
||||||
|
|
||||||
nn "github.com/nikolaydubina/llama2.go/exp/nnfast"
|
|
||||||
"github.com/nikolaydubina/llama2.go/llama2"
|
|
||||||
"github.com/tmc/langchaingo/llms"
|
"github.com/tmc/langchaingo/llms"
|
||||||
"github.com/tmc/langchaingo/llms/ollama"
|
"github.com/tmc/langchaingo/llms/ollama"
|
||||||
)
|
)
|
||||||
@@ -37,133 +34,23 @@ func NewAIOllama(url, model string) AIOllama {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ai AIOllama) Do(ctx context.Context, prompt string) (string, error) {
|
func (ai AIOllama) Do(ctx context.Context, prompt string) (string, error) {
|
||||||
|
c := &http.Client{
|
||||||
|
Timeout: time.Hour,
|
||||||
|
Transport: &http.Transport{
|
||||||
|
//DisableKeepAlives: true,
|
||||||
|
IdleConnTimeout: time.Hour,
|
||||||
|
ResponseHeaderTimeout: time.Hour,
|
||||||
|
ExpectContinueTimeout: time.Hour,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
defer c.CloseIdleConnections()
|
||||||
llm, err := ollama.New(
|
llm, err := ollama.New(
|
||||||
ollama.WithModel(ai.model),
|
ollama.WithModel(ai.model),
|
||||||
ollama.WithServerURL(ai.url),
|
ollama.WithServerURL(ai.url),
|
||||||
|
ollama.WithHTTPClient(c),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
return llms.GenerateFromSinglePrompt(ctx, llm, prompt)
|
return llms.GenerateFromSinglePrompt(ctx, llm, prompt)
|
||||||
}
|
}
|
||||||
|
|
||||||
type AILocal struct {
|
|
||||||
checkpointPath string
|
|
||||||
tokenizerPath string
|
|
||||||
temperature float64
|
|
||||||
steps int
|
|
||||||
topp float64
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewAILocal(
|
|
||||||
checkpointPath string,
|
|
||||||
tokenizerPath string,
|
|
||||||
temperature float64,
|
|
||||||
steps int,
|
|
||||||
topp float64,
|
|
||||||
) AILocal {
|
|
||||||
return AILocal{
|
|
||||||
checkpointPath: checkpointPath,
|
|
||||||
tokenizerPath: tokenizerPath,
|
|
||||||
temperature: temperature,
|
|
||||||
steps: steps,
|
|
||||||
topp: topp,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// https://github.com/nikolaydubina/llama2.go/blob/master/main.go
|
|
||||||
func (ai AILocal) Do(ctx context.Context, prompt string) (string, error) {
|
|
||||||
checkpointFile, err := os.OpenFile(ai.checkpointPath, os.O_RDONLY, 0)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer checkpointFile.Close()
|
|
||||||
|
|
||||||
config, err := llama2.NewConfigFromCheckpoint(checkpointFile)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
isSharedWeights := config.VocabSize > 0
|
|
||||||
if config.VocabSize < 0 {
|
|
||||||
config.VocabSize = -config.VocabSize
|
|
||||||
}
|
|
||||||
|
|
||||||
tokenizerFile, err := os.OpenFile(ai.tokenizerPath, os.O_RDONLY, 0)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer tokenizerFile.Close()
|
|
||||||
|
|
||||||
vocab := llama2.NewVocabFromFile(config.VocabSize, tokenizerFile)
|
|
||||||
|
|
||||||
w := llama2.NewTransformerWeightsFromCheckpoint(config, checkpointFile, isSharedWeights)
|
|
||||||
|
|
||||||
// right now we cannot run for more than config.SeqLen steps
|
|
||||||
steps := ai.steps
|
|
||||||
if steps <= 0 || steps > config.SeqLen {
|
|
||||||
steps = config.SeqLen
|
|
||||||
}
|
|
||||||
|
|
||||||
runState := llama2.NewRunState(config)
|
|
||||||
|
|
||||||
promptTokens := vocab.Encode(strings.ReplaceAll(prompt, "\n", "<0x0A>"))
|
|
||||||
|
|
||||||
out := bytes.NewBuffer(nil)
|
|
||||||
|
|
||||||
// the current position we are in
|
|
||||||
var token int = 1 // 1 = BOS token in llama-2 sentencepiece
|
|
||||||
var pos = 0
|
|
||||||
|
|
||||||
for pos < steps {
|
|
||||||
// forward the transformer to get logits for the next token
|
|
||||||
llama2.Transformer(token, pos, config, runState, w)
|
|
||||||
|
|
||||||
var next int
|
|
||||||
if pos < len(promptTokens) {
|
|
||||||
next = promptTokens[pos]
|
|
||||||
} else {
|
|
||||||
// sample the next token
|
|
||||||
if ai.temperature == 0 {
|
|
||||||
// greedy argmax sampling
|
|
||||||
next = nn.ArgMax(runState.Logits)
|
|
||||||
} else {
|
|
||||||
// apply the temperature to the logits
|
|
||||||
for q := 0; q < config.VocabSize; q++ {
|
|
||||||
runState.Logits[q] /= float32(ai.temperature)
|
|
||||||
}
|
|
||||||
// apply softmax to the logits to the probabilities for next token
|
|
||||||
nn.SoftMax(runState.Logits)
|
|
||||||
// we now want to sample from this distribution to get the next token
|
|
||||||
if ai.topp <= 0 || ai.topp >= 1 {
|
|
||||||
// simply sample from the predicted probability distribution
|
|
||||||
next = nn.Sample(runState.Logits)
|
|
||||||
} else {
|
|
||||||
// top-p (nucleus) sampling, clamping the least likely tokens to zero
|
|
||||||
next = nn.SampleTopP(runState.Logits, float32(ai.topp))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pos++
|
|
||||||
|
|
||||||
// data-dependent terminating condition: the BOS (1) token delimits sequences
|
|
||||||
if next == 1 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// following BOS (1) token, sentencepiece decoder strips any leading whitespace
|
|
||||||
var tokenStr string
|
|
||||||
if token == 1 && vocab.Words[next][0] == ' ' {
|
|
||||||
tokenStr = vocab.Words[next][1:]
|
|
||||||
} else {
|
|
||||||
tokenStr = vocab.Words[next]
|
|
||||||
}
|
|
||||||
out.Write([]byte(tokenStr))
|
|
||||||
|
|
||||||
// advance forward
|
|
||||||
token = next
|
|
||||||
}
|
|
||||||
out.Write([]byte("\n"))
|
|
||||||
|
|
||||||
return strings.ReplaceAll(string(out.Bytes()), "<0x0A>", "\n"), nil
|
|
||||||
}
|
|
||||||
|
|||||||
59
ai_test.go
59
ai_test.go
@@ -4,68 +4,20 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAINoop(t *testing.T) {
|
func TestAINoop(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
ai := NewAINoop()
|
ai := NewAINoop()
|
||||||
|
|
||||||
testAI(t, ai)
|
testAI(t, ai)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAIOllama(t *testing.T) {
|
func TestAIOllama(t *testing.T) {
|
||||||
ai := NewAIOllama("http://localhost:11434", "gemma:2b")
|
t.Parallel()
|
||||||
|
ai := NewAIOllama("http://localhost:11434", "llama3")
|
||||||
testAI(t, ai)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAILocal(t *testing.T) {
|
|
||||||
d := os.TempDir()
|
|
||||||
checkpoints := "checkpoints"
|
|
||||||
tokenizer := "tokenizer"
|
|
||||||
for u, p := range map[string]*string{
|
|
||||||
"https://huggingface.co/karpathy/tinyllamas/resolve/main/stories110M.bin": &checkpoints,
|
|
||||||
"https://github.com/karpathy/llama2.c/raw/master/tokenizer.bin": &tokenizer,
|
|
||||||
} {
|
|
||||||
func() {
|
|
||||||
*p = path.Base(u)
|
|
||||||
if _, err := os.Stat(path.Join(d, *p)); os.IsNotExist(err) {
|
|
||||||
t.Logf("downloading %s from %s", u, *p)
|
|
||||||
|
|
||||||
resp, err := http.Get(u)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
f, err := os.Create(path.Join(d, *p))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
if _, err := io.Copy(f, resp.Body); err != nil {
|
|
||||||
f.Close()
|
|
||||||
os.Remove(path.Join(d, *p))
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
ai := NewAILocal(
|
|
||||||
path.Join(d, checkpoints),
|
|
||||||
path.Join(d, tokenizer),
|
|
||||||
0.9,
|
|
||||||
256,
|
|
||||||
0.9,
|
|
||||||
)
|
|
||||||
|
|
||||||
testAI(t, ai)
|
testAI(t, ai)
|
||||||
}
|
}
|
||||||
@@ -75,7 +27,7 @@ func testAI(t *testing.T, ai AI) {
|
|||||||
defer can()
|
defer can()
|
||||||
|
|
||||||
t.Run("mvp", func(t *testing.T) {
|
t.Run("mvp", func(t *testing.T) {
|
||||||
if result, err := ai.Do(ctx, "hello world"); err != nil {
|
if result, err := ai.Do(ctx, "Tell me a fun fact."); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
} else if len(result) < 3 {
|
} else if len(result) < 3 {
|
||||||
t.Error(result)
|
t.Error(result)
|
||||||
@@ -84,6 +36,7 @@ func testAI(t *testing.T, ai AI) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
/*
|
||||||
t.Run("simulation", func(t *testing.T) {
|
t.Run("simulation", func(t *testing.T) {
|
||||||
d := NewRAM()
|
d := NewRAM()
|
||||||
FillWithTestdata(ctx, d, renderAssetPattern, renderDatacenterPattern, renderEventNamePattern)
|
FillWithTestdata(ctx, d, renderAssetPattern, renderDatacenterPattern, renderEventNamePattern)
|
||||||
@@ -111,5 +64,5 @@ func testAI(t *testing.T, ai AI) {
|
|||||||
}
|
}
|
||||||
t.Logf("\n\t%s\n->\n\t%s", input, result)
|
t.Logf("\n\t%s\n->\n\t%s", input, result)
|
||||||
})
|
})
|
||||||
|
*/
|
||||||
}
|
}
|
||||||
|
|||||||
59
config.go
59
config.go
@@ -5,6 +5,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
@@ -22,20 +23,24 @@ type Config struct {
|
|||||||
BasicAuthUser string
|
BasicAuthUser string
|
||||||
BasicAuthPassword string
|
BasicAuthPassword string
|
||||||
FillWithTestdata bool
|
FillWithTestdata bool
|
||||||
OllamaURL string
|
OllamaUrl string
|
||||||
OllamaModel string
|
OllamaModel string
|
||||||
LocalCheckpoint string
|
RecapPromptIntro string
|
||||||
LocalTokenizer string
|
RecapPrompt string
|
||||||
AssetPattern string
|
AssetPattern string
|
||||||
DatacenterPattern string
|
DatacenterPattern string
|
||||||
EventNamePattern string
|
EventNamePattern string
|
||||||
driver Driver
|
driver Driver
|
||||||
|
storage Storage
|
||||||
ai AI
|
ai AI
|
||||||
slackToMessagePipeline Pipeline
|
slackToModelPipeline Pipeline
|
||||||
|
slackScrapePipeline Pipeline
|
||||||
|
modelToPersistencePipeline Pipeline
|
||||||
|
persistenceToRecapPipeline Pipeline
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
renderAssetPattern = `(dpg|svc|red)-[a-z0-9-]*[a-z0-9]`
|
renderAssetPattern = `(dpg|svc|red)-[a-z0-9-]*[a-z0-9]|ip-[0-9]+-[0-9]+-[0-9]+-[0-9]+\.[a-z]+-[a-z]+-[0-9]+\.compute\.internal`
|
||||||
renderDatacenterPattern = `[a-z]{4}[a-z]*-[0-9]`
|
renderDatacenterPattern = `[a-z]{4}[a-z]*-[0-9]`
|
||||||
renderEventNamePattern = `(\[[^\]]*\] *)?(?P<result>.*)`
|
renderEventNamePattern = `(\[[^\]]*\] *)?(?P<result>.*)`
|
||||||
)
|
)
|
||||||
@@ -47,10 +52,12 @@ func newConfig(ctx context.Context) (Config, error) {
|
|||||||
func newConfigFromEnv(ctx context.Context, getEnv func(string) string) (Config, error) {
|
func newConfigFromEnv(ctx context.Context, getEnv func(string) string) (Config, error) {
|
||||||
def := Config{
|
def := Config{
|
||||||
Port: 38080,
|
Port: 38080,
|
||||||
OllamaModel: "gemma:2b",
|
OllamaModel: "llama3",
|
||||||
AssetPattern: renderAssetPattern,
|
AssetPattern: renderAssetPattern,
|
||||||
DatacenterPattern: renderDatacenterPattern,
|
DatacenterPattern: renderDatacenterPattern,
|
||||||
EventNamePattern: renderEventNamePattern,
|
EventNamePattern: renderEventNamePattern,
|
||||||
|
RecapPromptIntro: "A Slack thread began with the following original post.",
|
||||||
|
RecapPrompt: "What is the summary of the responses to the Slack thread consisting of the following messages? Limit the summary to one sentence. Do not include any leading text. Be as brief as possible. No context is needed.",
|
||||||
}
|
}
|
||||||
|
|
||||||
var m map[string]any
|
var m map[string]any
|
||||||
@@ -104,8 +111,9 @@ func newConfigFromEnv(ctx context.Context, getEnv func(string) string) (Config,
|
|||||||
return Config{}, err
|
return Config{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, can := context.WithTimeout(ctx, time.Second*10)
|
ctx, can := context.WithTimeout(ctx, time.Minute)
|
||||||
defer can()
|
defer can()
|
||||||
|
|
||||||
driver, err := NewDriver(ctx, result.DriverConn)
|
driver, err := NewDriver(ctx, result.DriverConn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Config{}, err
|
return Config{}, err
|
||||||
@@ -116,20 +124,45 @@ func newConfigFromEnv(ctx context.Context, getEnv func(string) string) (Config,
|
|||||||
} else {
|
} else {
|
||||||
return Config{}, errors.New("not impl")
|
return Config{}, errors.New("not impl")
|
||||||
}
|
}
|
||||||
|
if result.Debug {
|
||||||
|
log.Printf("connected to driver at %s (%s @%s)", result.DriverConn, result.driver.engine, result.driver.conn)
|
||||||
|
}
|
||||||
|
|
||||||
if result.OllamaURL != "" {
|
storage, err := NewStorage(ctx, result.driver)
|
||||||
result.ai = NewAIOllama(result.OllamaURL, result.OllamaModel)
|
if err != nil {
|
||||||
} else if result.LocalCheckpoint != "" && result.LocalTokenizer != "" {
|
return Config{}, err
|
||||||
result.ai = NewAILocal(result.LocalCheckpoint, result.LocalTokenizer, 0.9, 128, 0.9)
|
}
|
||||||
|
result.storage = storage
|
||||||
|
|
||||||
|
if result.OllamaUrl != "" {
|
||||||
|
result.ai = NewAIOllama(result.OllamaUrl, result.OllamaModel)
|
||||||
} else {
|
} else {
|
||||||
result.ai = NewAINoop()
|
result.ai = NewAINoop()
|
||||||
}
|
}
|
||||||
|
|
||||||
slackToMessagePipeline, err := NewSlackToMessagePipeline(ctx, result)
|
slackToModelPipeline, err := NewSlackToModelPipeline(ctx, result)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Config{}, err
|
return Config{}, err
|
||||||
}
|
}
|
||||||
result.slackToMessagePipeline = slackToMessagePipeline
|
result.slackToModelPipeline = slackToModelPipeline
|
||||||
|
|
||||||
|
modelToPersistencePipeline, err := NewModelToPersistencePipeline(ctx, result)
|
||||||
|
if err != nil {
|
||||||
|
return Config{}, err
|
||||||
|
}
|
||||||
|
result.modelToPersistencePipeline = modelToPersistencePipeline
|
||||||
|
|
||||||
|
slackScrapePipeline, err := NewSlackScrapePipeline(ctx, result)
|
||||||
|
if err != nil {
|
||||||
|
return Config{}, err
|
||||||
|
}
|
||||||
|
result.slackScrapePipeline = slackScrapePipeline
|
||||||
|
|
||||||
|
persistenceToRecapPipeline, err := NewPersistenceToRecapPipeline(ctx, result)
|
||||||
|
if err != nil {
|
||||||
|
return Config{}, err
|
||||||
|
}
|
||||||
|
result.persistenceToRecapPipeline = persistenceToRecapPipeline
|
||||||
|
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestNewConfig(t *testing.T) {
|
func TestNewConfig(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
if got, err := newConfigFromEnv(context.Background(), func(k string) string {
|
if got, err := newConfigFromEnv(context.Background(), func(k string) string {
|
||||||
t.Logf("getenv(%s)", k)
|
t.Logf("getenv(%s)", k)
|
||||||
switch k {
|
switch k {
|
||||||
|
|||||||
27
driver.go
27
driver.go
@@ -6,19 +6,42 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"testing"
|
||||||
|
|
||||||
_ "github.com/glebarez/go-sqlite"
|
_ "github.com/glebarez/go-sqlite"
|
||||||
_ "github.com/lib/pq"
|
_ "github.com/lib/pq"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Driver struct {
|
type Driver struct {
|
||||||
|
engine string
|
||||||
|
conn string
|
||||||
*sql.DB
|
*sql.DB
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewTestDriver(t *testing.T, optionalP ...string) Driver {
|
||||||
|
p := path.Join(t.TempDir(), "db")
|
||||||
|
if len(optionalP) > 0 {
|
||||||
|
p = optionalP[0]
|
||||||
|
}
|
||||||
|
driver, err := NewDriver(context.Background(), p)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
t.Cleanup(func() { driver.Close() })
|
||||||
|
return driver
|
||||||
|
}
|
||||||
|
|
||||||
func NewDriver(ctx context.Context, conn string) (Driver, error) {
|
func NewDriver(ctx context.Context, conn string) (Driver, error) {
|
||||||
engine := "sqlite"
|
engine := "sqlite"
|
||||||
if conn == "" {
|
if conn == "" {
|
||||||
conn = ":memory:"
|
f, err := os.CreateTemp(os.TempDir(), "spoc-bot-vr-undef-*.db")
|
||||||
|
if err != nil {
|
||||||
|
return Driver{}, err
|
||||||
|
}
|
||||||
|
f.Close()
|
||||||
|
conn = f.Name()
|
||||||
} else {
|
} else {
|
||||||
if u, err := url.Parse(conn); err != nil {
|
if u, err := url.Parse(conn); err != nil {
|
||||||
return Driver{}, err
|
return Driver{}, err
|
||||||
@@ -32,7 +55,7 @@ func NewDriver(ctx context.Context, conn string) (Driver, error) {
|
|||||||
return Driver{}, err
|
return Driver{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
driver := Driver{DB: db}
|
driver := Driver{DB: db, conn: conn, engine: engine}
|
||||||
if err := driver.setup(ctx); err != nil {
|
if err := driver.setup(ctx); err != nil {
|
||||||
driver.Close()
|
driver.Close()
|
||||||
return Driver{}, fmt.Errorf("failed setup: %w", err)
|
return Driver{}, fmt.Errorf("failed setup: %w", err)
|
||||||
|
|||||||
53
driver_integration_test.go
Normal file
53
driver_integration_test.go
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
//go:build integration
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/breel-render/spoc-bot-vr/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDriverIntegration(t *testing.T) {
|
||||||
|
ctx, can := context.WithTimeout(context.Background(), time.Second*30)
|
||||||
|
defer can()
|
||||||
|
|
||||||
|
driver, err := NewDriver(ctx, os.Getenv("DRIVER_CONN"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer driver.Close()
|
||||||
|
|
||||||
|
q, err := NewQueue(ctx, t.Name(), driver)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
qV := []byte("hello")
|
||||||
|
if err := q.Enqueue(ctx, qV); err != nil {
|
||||||
|
t.Error("q cannot enqueue:", err)
|
||||||
|
} else if reservation, v, err := q.Syn(ctx); err != nil {
|
||||||
|
t.Error("q cannot syn:", err)
|
||||||
|
} else if string(v) != string(qV) {
|
||||||
|
t.Error("q enqueued wrong:", string(v))
|
||||||
|
} else if len(reservation) == 0 {
|
||||||
|
t.Error("q didnt have reservation")
|
||||||
|
} else if err := q.Ack(ctx, reservation); err != nil {
|
||||||
|
t.Error("q cannot ack:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s, err := NewStorage(ctx, driver)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
evt := model.Event{ID: "x", Name: "y"}
|
||||||
|
if err := s.UpsertEvent(ctx, evt); err != nil {
|
||||||
|
t.Error("s cannot upsert:", err)
|
||||||
|
} else if e, err := s.GetEvent(ctx, evt.ID); err != nil {
|
||||||
|
t.Error("s cannot get:", err)
|
||||||
|
} else if e != evt {
|
||||||
|
t.Error("s upserted wrong:", e)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -6,7 +6,13 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestNewTestDriver(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
NewTestDriver(t)
|
||||||
|
}
|
||||||
|
|
||||||
func TestDriver(t *testing.T) {
|
func TestDriver(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
ctx, can := context.WithTimeout(context.Background(), time.Second*15)
|
ctx, can := context.WithTimeout(context.Background(), time.Second*15)
|
||||||
defer can()
|
defer can()
|
||||||
|
|
||||||
|
|||||||
5
go.mod
5
go.mod
@@ -6,9 +6,9 @@ require (
|
|||||||
github.com/glebarez/go-sqlite v1.21.2
|
github.com/glebarez/go-sqlite v1.21.2
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/lib/pq v1.10.9
|
github.com/lib/pq v1.10.9
|
||||||
github.com/nikolaydubina/llama2.go v0.7.1
|
|
||||||
github.com/tmc/langchaingo v0.1.8
|
github.com/tmc/langchaingo v0.1.8
|
||||||
gotest.tools/v3 v3.5.1
|
golang.org/x/time v0.5.0
|
||||||
|
gotest.tools v2.2.0+incompatible
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
@@ -16,6 +16,7 @@ require (
|
|||||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||||
github.com/google/go-cmp v0.6.0 // indirect
|
github.com/google/go-cmp v0.6.0 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||||
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pkoukk/tiktoken-go v0.1.6 // indirect
|
github.com/pkoukk/tiktoken-go v0.1.6 // indirect
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||||
golang.org/x/sys v0.16.0 // indirect
|
golang.org/x/sys v0.16.0 // indirect
|
||||||
|
|||||||
10
go.sum
10
go.sum
@@ -16,8 +16,8 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
|||||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/nikolaydubina/llama2.go v0.7.1 h1:ORmH1XbwFYGIOPHprkjtUPOEovlVXhnmnMjbMckaSyE=
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
github.com/nikolaydubina/llama2.go v0.7.1/go.mod h1:ggXhXOaDnEAgSSkcYsomqx/RLjInxe5ZAbcJ+/Y2mTM=
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pkoukk/tiktoken-go v0.1.6 h1:JF0TlJzhTbrI30wCvFuiw6FzP2+/bR+FIxUdgEAcUsw=
|
github.com/pkoukk/tiktoken-go v0.1.6 h1:JF0TlJzhTbrI30wCvFuiw6FzP2+/bR+FIxUdgEAcUsw=
|
||||||
github.com/pkoukk/tiktoken-go v0.1.6/go.mod h1:9NiV+i9mJKGj1rYOT+njbv+ZwA/zJxYdewGl6qVatpg=
|
github.com/pkoukk/tiktoken-go v0.1.6/go.mod h1:9NiV+i9mJKGj1rYOT+njbv+ZwA/zJxYdewGl6qVatpg=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
@@ -32,10 +32,12 @@ github.com/tmc/langchaingo v0.1.8/go.mod h1:iNBfS9e6jxBKsJSPWnlqNhoVWgdA3D1g5cdF
|
|||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
|
golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
|
||||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
|
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||||
|
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
|
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||||
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
|
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||||
modernc.org/libc v1.22.5 h1:91BNch/e5B0uPbJFgqbxXuOnxBQjlS//icfQEGmvyjE=
|
modernc.org/libc v1.22.5 h1:91BNch/e5B0uPbJFgqbxXuOnxBQjlS//icfQEGmvyjE=
|
||||||
modernc.org/libc v1.22.5/go.mod h1:jj+Z7dTNX8fBScMVNRAYZ/jF91K8fdT2hYMThc3YjBY=
|
modernc.org/libc v1.22.5/go.mod h1:jj+Z7dTNX8fBScMVNRAYZ/jF91K8fdT2hYMThc3YjBY=
|
||||||
modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ=
|
modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ=
|
||||||
|
|||||||
186
main.go
186
main.go
@@ -36,22 +36,32 @@ func run(ctx context.Context, cfg Config) error {
|
|||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
case err := <-processSlackToMessagePipeline(ctx, cfg):
|
case err := <-processPipelines(ctx,
|
||||||
|
cfg.slackToModelPipeline,
|
||||||
|
cfg.modelToPersistencePipeline,
|
||||||
|
cfg.slackScrapePipeline,
|
||||||
|
cfg.persistenceToRecapPipeline,
|
||||||
|
):
|
||||||
return err
|
return err
|
||||||
case err := <-listenAndServe(ctx, cfg):
|
case err := <-listenAndServe(ctx, cfg):
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func processSlackToMessagePipeline(ctx context.Context, cfg Config) chan error {
|
func processPipelines(ctx context.Context, first Pipeline, pipelines ...Pipeline) chan error {
|
||||||
|
ctx, can := context.WithCancel(ctx)
|
||||||
|
|
||||||
|
pipelines = append(pipelines, first)
|
||||||
errs := make(chan error)
|
errs := make(chan error)
|
||||||
go func() {
|
for i := range pipelines {
|
||||||
defer close(errs)
|
go func(i int) {
|
||||||
|
defer can()
|
||||||
select {
|
select {
|
||||||
case errs <- cfg.slackToMessagePipeline.Process(ctx):
|
case errs <- pipelines[i].Process(ctx):
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
}
|
}
|
||||||
}()
|
}(i)
|
||||||
|
}
|
||||||
return errs
|
return errs
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -77,13 +87,10 @@ func listenAndServe(ctx context.Context, cfg Config) chan error {
|
|||||||
func newHandler(cfg Config) http.HandlerFunc {
|
func newHandler(cfg Config) http.HandlerFunc {
|
||||||
mux := http.NewServeMux()
|
mux := http.NewServeMux()
|
||||||
|
|
||||||
mux.Handle("GET /api/v1/eventnames", http.HandlerFunc(newHandlerGetAPIV1EventNames(cfg)))
|
mux.Handle("GET /api/v1/version", http.HandlerFunc(newHandlerGetAPIV1Version))
|
||||||
mux.Handle("GET /api/v1/events", http.HandlerFunc(newHandlerGetAPIV1Events(cfg)))
|
|
||||||
mux.Handle("GET /api/v1/messages", http.HandlerFunc(newHandlerGetAPIV1Messages(cfg)))
|
|
||||||
mux.Handle("GET /api/v1/threads", http.HandlerFunc(newHandlerGetAPIV1Threads(cfg)))
|
|
||||||
mux.Handle("GET /api/v1/threads/{thread}", http.HandlerFunc(newHandlerGetAPIV1ThreadsThread(cfg)))
|
|
||||||
mux.Handle("POST /api/v1/events/slack", http.HandlerFunc(newHandlerPostAPIV1EventsSlack(cfg)))
|
mux.Handle("POST /api/v1/events/slack", http.HandlerFunc(newHandlerPostAPIV1EventsSlack(cfg)))
|
||||||
mux.Handle("PUT /api/v1/rpc/scrapeslack", http.HandlerFunc(newHandlerPutAPIV1RPCScrapeSlack(cfg)))
|
mux.Handle("PUT /api/v1/rpc/scrapeslack", http.HandlerFunc(newHandlerPutAPIV1RPCScrapeSlack(cfg)))
|
||||||
|
mux.Handle("GET /api/v1/rpc/recapevent", http.HandlerFunc(newHandlerGetAPIV1RPCRecapEvent(cfg)))
|
||||||
|
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
if cfg.Debug {
|
if cfg.Debug {
|
||||||
@@ -96,105 +103,51 @@ func newHandler(cfg Config) http.HandlerFunc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var Version = "undef"
|
||||||
|
|
||||||
|
func newHandlerGetAPIV1Version(w http.ResponseWriter, _ *http.Request) {
|
||||||
|
json.NewEncoder(w).Encode(map[string]any{"version": Version})
|
||||||
|
}
|
||||||
|
|
||||||
|
func newHandlerGetAPIV1RPCRecapEvent(cfg Config) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if !basicAuth(cfg, w, r) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
event := r.URL.Query().Get("id")
|
||||||
|
b, _ := json.Marshal(ModelIDs{Event: event})
|
||||||
|
if err := cfg.persistenceToRecapPipeline.reader.Enqueue(r.Context(), b); err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
json.NewEncoder(w).Encode(map[string]any{"event": event})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func newHandlerPutAPIV1RPCScrapeSlack(cfg Config) http.HandlerFunc {
|
func newHandlerPutAPIV1RPCScrapeSlack(cfg Config) http.HandlerFunc {
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
if !basicAuth(cfg, w, r) {
|
if !basicAuth(cfg, w, r) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
channel := r.Header.Get("slack-channel")
|
|
||||||
token := r.Header.Get("slack-oauth-token")
|
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodGet, "https://slack.com/api/conversations.history?channel="+channel, nil)
|
since, err := parseSince(r.URL.Query().Get("since"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
job, _ := json.Marshal(SlackScrape{
|
||||||
|
Latest: time.Now().Unix(),
|
||||||
|
Oldest: since.Unix(),
|
||||||
|
ThreadTS: "",
|
||||||
|
Channel: r.Header.Get("slack-channel"),
|
||||||
|
Token: r.Header.Get("slack-oauth-token"),
|
||||||
|
})
|
||||||
|
if err := cfg.slackScrapePipeline.reader.Enqueue(r.Context(), job); err != nil {
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
req.Header.Set("Authorization", "Bearer "+token)
|
|
||||||
|
|
||||||
resp, err := http.DefaultClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusBadGateway)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
defer io.Copy(io.Discard, resp.Body)
|
|
||||||
|
|
||||||
var page struct {
|
|
||||||
OK bool
|
|
||||||
Messages []json.RawMessage
|
|
||||||
}
|
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&page); err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusBadGateway)
|
|
||||||
return
|
|
||||||
} else if !page.OK {
|
|
||||||
http.Error(w, "slack page was !.ok", http.StatusBadGateway)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
errs := []error{}
|
|
||||||
for _, messageJSON := range page.Messages {
|
|
||||||
if err := cfg.slackToMessagePipeline.reader.Enqueue(r.Context(), messageJSON); err != nil {
|
|
||||||
errs = append(errs, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(errs) > 0 {
|
|
||||||
http.Error(w, fmt.Sprint(errs), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
json.NewEncoder(w).Encode(map[string]any{"scraped": len(page.Messages)})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newHandlerGetAPIV1EventNames(cfg Config) http.HandlerFunc {
|
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
if !basicAuth(cfg, w, r) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
http.Error(w, "not impl", http.StatusNotImplemented)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newHandlerGetAPIV1Events(cfg Config) http.HandlerFunc {
|
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
if !basicAuth(cfg, w, r) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
http.Error(w, "not impl", http.StatusNotImplemented)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newHandlerGetAPIV1Messages(cfg Config) http.HandlerFunc {
|
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
if !basicAuth(cfg, w, r) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
http.Error(w, "not impl", http.StatusNotImplemented)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newHandlerGetAPIV1Threads(cfg Config) http.HandlerFunc {
|
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
if !basicAuth(cfg, w, r) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
http.Error(w, "not impl", http.StatusNotImplemented)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newHandlerGetAPIV1ThreadsThread(cfg Config) http.HandlerFunc {
|
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
if !basicAuth(cfg, w, r) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
thread := strings.Split(strings.Split(r.URL.Path, "/threads/")[1], "/")[0]
|
|
||||||
_ = thread
|
|
||||||
|
|
||||||
http.Error(w, "not impl", http.StatusNotImplemented)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -208,12 +161,13 @@ func basicAuth(cfg Config, w http.ResponseWriter, r *http.Request) bool {
|
|||||||
|
|
||||||
func newHandlerPostAPIV1EventsSlack(cfg Config) http.HandlerFunc {
|
func newHandlerPostAPIV1EventsSlack(cfg Config) http.HandlerFunc {
|
||||||
if cfg.InitializeSlack {
|
if cfg.InitializeSlack {
|
||||||
return handlerPostAPIV1EventsSlackInitialize
|
return handlerPostAPIV1EventsSlackInitialize(cfg)
|
||||||
}
|
}
|
||||||
return _newHandlerPostAPIV1EventsSlack(cfg)
|
return _newHandlerPostAPIV1EventsSlack(cfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func handlerPostAPIV1EventsSlackInitialize(w http.ResponseWriter, r *http.Request) {
|
func handlerPostAPIV1EventsSlackInitialize(cfg Config) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
b, _ := io.ReadAll(r.Body)
|
b, _ := io.ReadAll(r.Body)
|
||||||
var challenge struct {
|
var challenge struct {
|
||||||
Token string
|
Token string
|
||||||
@@ -224,14 +178,32 @@ func handlerPostAPIV1EventsSlackInitialize(w http.ResponseWriter, r *http.Reques
|
|||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
cfg.driver.ExecContext(r.Context(), `
|
||||||
|
CREATE TABLE
|
||||||
|
IF NOT EXISTS
|
||||||
|
initialization (
|
||||||
|
label TEXT,
|
||||||
|
token TEXT,
|
||||||
|
updated TIMESTAMP
|
||||||
|
)
|
||||||
|
`)
|
||||||
|
if _, err := cfg.driver.ExecContext(r.Context(), `
|
||||||
|
INSERT
|
||||||
|
INTO initialization (label, token, updated)
|
||||||
|
VALUES ('slack_events_webhook_token', $1, $2)
|
||||||
|
`, challenge.Token, time.Now().UTC()); err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Println("stashed new slack initialization token", challenge.Token)
|
||||||
encodeResponse(w, r, map[string]any{"challenge": challenge.Challenge})
|
encodeResponse(w, r, map[string]any{"challenge": challenge.Challenge})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func _newHandlerPostAPIV1EventsSlack(cfg Config) http.HandlerFunc {
|
func _newHandlerPostAPIV1EventsSlack(cfg Config) http.HandlerFunc {
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
b, _ := io.ReadAll(r.Body)
|
body, _ := io.ReadAll(r.Body)
|
||||||
r.Body = io.NopCloser(bytes.NewReader(b))
|
r.Body = io.NopCloser(bytes.NewReader(body))
|
||||||
|
|
||||||
var allowList struct {
|
var allowList struct {
|
||||||
Token string
|
Token string
|
||||||
@@ -239,7 +211,7 @@ func _newHandlerPostAPIV1EventsSlack(cfg Config) http.HandlerFunc {
|
|||||||
Channel string
|
Channel string
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := json.Unmarshal(b, &allowList); err != nil {
|
if err := json.Unmarshal(body, &allowList); err != nil {
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
} else if allowList.Token != cfg.SlackToken {
|
} else if allowList.Token != cfg.SlackToken {
|
||||||
@@ -256,7 +228,7 @@ func _newHandlerPostAPIV1EventsSlack(cfg Config) http.HandlerFunc {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := cfg.slackToMessagePipeline.reader.Enqueue(r.Context(), b); err != nil {
|
if err := cfg.slackToModelPipeline.reader.Enqueue(r.Context(), body); err != nil {
|
||||||
log.Printf("failed to ingest: %v", err)
|
log.Printf("failed to ingest: %v", err)
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
|
|||||||
178
main_test.go
178
main_test.go
@@ -3,8 +3,6 @@ package main
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/csv"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -19,6 +17,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestRun(t *testing.T) {
|
func TestRun(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
ctx, can := context.WithTimeout(context.Background(), time.Second*10)
|
ctx, can := context.WithTimeout(context.Background(), time.Second*10)
|
||||||
defer can()
|
defer can()
|
||||||
|
|
||||||
@@ -37,16 +36,25 @@ func TestRun(t *testing.T) {
|
|||||||
return int(port)
|
return int(port)
|
||||||
}()
|
}()
|
||||||
u := fmt.Sprintf("http://localhost:%d", port)
|
u := fmt.Sprintf("http://localhost:%d", port)
|
||||||
|
var err error
|
||||||
|
|
||||||
cfg := Config{}
|
cfg := Config{}
|
||||||
cfg.DatacenterPattern = renderDatacenterPattern
|
cfg.DatacenterPattern = renderDatacenterPattern
|
||||||
cfg.AssetPattern = renderAssetPattern
|
cfg.AssetPattern = renderAssetPattern
|
||||||
cfg.EventNamePattern = renderEventNamePattern
|
cfg.EventNamePattern = renderEventNamePattern
|
||||||
cfg.Port = port
|
cfg.Port = port
|
||||||
cfg.driver, _ = NewDriver(ctx, "")
|
cfg.driver = NewTestDriver(t)
|
||||||
cfg.slackToMessagePipeline, _ = NewSlackToMessagePipeline(ctx, cfg)
|
cfg.storage, _ = NewStorage(ctx, cfg.driver)
|
||||||
|
cfg.ai = NewAINoop()
|
||||||
cfg.SlackToken = "redacted"
|
cfg.SlackToken = "redacted"
|
||||||
cfg.SlackChannels = []string{"C06U1DDBBU4"}
|
cfg.SlackChannels = []string{"C06U1DDBBU4"}
|
||||||
|
cfg.slackToModelPipeline, _ = NewSlackToModelPipeline(ctx, cfg)
|
||||||
|
cfg.slackScrapePipeline, _ = NewSlackScrapePipeline(ctx, cfg)
|
||||||
|
cfg.modelToPersistencePipeline, _ = NewModelToPersistencePipeline(ctx, cfg)
|
||||||
|
cfg.persistenceToRecapPipeline, err = NewPersistenceToRecapPipeline(ctx, cfg)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
if err := run(ctx, cfg); err != nil && ctx.Err() == nil {
|
if err := run(ctx, cfg); err != nil && ctx.Err() == nil {
|
||||||
@@ -83,164 +91,40 @@ func TestRun(t *testing.T) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("GET /api/v1/messages", func(t *testing.T) {
|
t.Run("GET /api/v1/rpc/recapevent", func(t *testing.T) {
|
||||||
resp, err := http.Get(fmt.Sprintf("%s/api/v1/messages", u))
|
b, err := os.ReadFile(path.Join("testdata", "slack_events", "human_thread_message_from_opsgenie_alert.json"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
if err := cfg.slackToModelPipeline.reader.Enqueue(ctx, b); err != nil {
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
b, _ := io.ReadAll(resp.Body)
|
|
||||||
t.Fatalf("(%d) %s", resp.StatusCode, b)
|
|
||||||
}
|
|
||||||
var result struct {
|
|
||||||
Messages []Message
|
|
||||||
}
|
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
} else if len(result.Messages) != 1 {
|
|
||||||
t.Fatal(result.Messages)
|
|
||||||
} else {
|
|
||||||
t.Logf("%+v", result)
|
|
||||||
}
|
}
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("GET /api/v1/eventnames", func(t *testing.T) {
|
b, err = os.ReadFile(path.Join("testdata", "slack_events", "opsgenie_alert.json"))
|
||||||
resp, err := http.Get(fmt.Sprintf("%s/api/v1/eventnames", u))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
if err := cfg.slackToModelPipeline.reader.Enqueue(ctx, b); err != nil {
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
b, _ := io.ReadAll(resp.Body)
|
|
||||||
t.Fatalf("(%d) %s", resp.StatusCode, b)
|
|
||||||
}
|
|
||||||
var result struct {
|
|
||||||
EventNames []string
|
|
||||||
}
|
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
} else if result.EventNames[0] != "Wal Receive Count Alert" {
|
|
||||||
t.Fatal(result.EventNames)
|
|
||||||
} else {
|
|
||||||
t.Logf("%+v", result)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("GET /api/v1/events", func(t *testing.T) {
|
|
||||||
resp, err := http.Get(fmt.Sprintf("%s/api/v1/events", u))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
for ctx.Err() == nil {
|
||||||
b, _ := io.ReadAll(resp.Body)
|
if thread, _ := cfg.storage.GetThread(ctx, "1712927439.728409"); thread.Recap != "" {
|
||||||
t.Fatalf("(%d) %s", resp.StatusCode, b)
|
|
||||||
}
|
|
||||||
var result struct {
|
|
||||||
Events []string
|
|
||||||
}
|
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
} else if result.Events[0] != "11067" {
|
|
||||||
t.Fatal(result.Events)
|
|
||||||
} else {
|
|
||||||
t.Logf("%+v", result)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("GET /api/v1/threads", func(t *testing.T) {
|
|
||||||
resp, err := http.Get(fmt.Sprintf("%s/api/v1/threads", u))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
b, _ := io.ReadAll(resp.Body)
|
|
||||||
t.Fatalf("(%d) %s", resp.StatusCode, b)
|
|
||||||
}
|
|
||||||
var result struct {
|
|
||||||
Threads []string
|
|
||||||
}
|
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
} else if result.Threads[0] != "1712911957.023359" {
|
|
||||||
t.Fatal(result.Threads)
|
|
||||||
} else {
|
|
||||||
t.Logf("%+v", result)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("GET /api/v1/threads/1712911957.023359", func(t *testing.T) {
|
|
||||||
resp, err := http.Get(fmt.Sprintf("%s/api/v1/threads/1712911957.023359", u))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
b, _ := io.ReadAll(resp.Body)
|
|
||||||
t.Fatalf("(%d) %s", resp.StatusCode, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
var result struct {
|
|
||||||
Thread []Message
|
|
||||||
}
|
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
} else if len(result.Thread) != 1 {
|
|
||||||
t.Fatal(result.Thread)
|
|
||||||
} else {
|
|
||||||
t.Logf("%+v", result)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("CSV GET /api/v1/threads/1712911957.023359", func(t *testing.T) {
|
|
||||||
req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("%s/api/v1/threads/1712911957.023359", u), nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
req.Header.Set("Accept", "text/csv")
|
|
||||||
|
|
||||||
resp, err := http.DefaultClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
b, _ := io.ReadAll(resp.Body)
|
|
||||||
t.Fatalf("(%d) %s", resp.StatusCode, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
b, _ := io.ReadAll(resp.Body)
|
|
||||||
t.Logf("whole csv: \n%s", b)
|
|
||||||
|
|
||||||
dec := csv.NewReader(bytes.NewReader(b))
|
|
||||||
var lastLine []string
|
|
||||||
for {
|
|
||||||
line, err := dec.Read()
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
break
|
||||||
} else if err != nil {
|
}
|
||||||
t.Error("unexpected error while reading csv line:", err)
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
case <-time.After(time.Millisecond * 100):
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := ctx.Err(); err != nil {
|
||||||
|
t.Fatal("timed out waiting for recap")
|
||||||
}
|
}
|
||||||
|
|
||||||
if lastLine == nil {
|
thread, _ := cfg.storage.GetThread(ctx, "1712927439.728409")
|
||||||
} else if len(lastLine) != len(line) {
|
if thread.Recap == "" {
|
||||||
t.Errorf("last line had %v elements but this line has %v", len(lastLine), len(line))
|
t.Error(thread.Recap)
|
||||||
}
|
}
|
||||||
|
t.Log(thread.Recap)
|
||||||
t.Logf("CSV line: %+v", line)
|
|
||||||
lastLine = line
|
|
||||||
}
|
|
||||||
if lastLine == nil {
|
|
||||||
t.Error("no lines found")
|
|
||||||
}
|
|
||||||
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
156
message_test.go
156
message_test.go
@@ -1,156 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestParseSlackTestdata(t *testing.T) {
|
|
||||||
cases := map[string]struct {
|
|
||||||
inCh string
|
|
||||||
slackMessage slackMessage
|
|
||||||
message Message
|
|
||||||
}{
|
|
||||||
"human_thread_message_from_opsgenie_alert.json": {
|
|
||||||
slackMessage: slackMessage{
|
|
||||||
TS: 1712930706,
|
|
||||||
Event: slackEvent{
|
|
||||||
ID: "1712930706.598629",
|
|
||||||
Channel: "C06U1DDBBU4",
|
|
||||||
ParentID: "1712927439.728409",
|
|
||||||
Text: "I gotta do this",
|
|
||||||
Blocks: []slackBlock{{
|
|
||||||
Elements: []slackElement{{
|
|
||||||
Elements: []slackElement{{
|
|
||||||
RichText: "I gotta do this",
|
|
||||||
}},
|
|
||||||
}},
|
|
||||||
}},
|
|
||||||
Bot: slackBot{
|
|
||||||
Name: "",
|
|
||||||
},
|
|
||||||
Attachments: []slackAttachment{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
message: Message{
|
|
||||||
ID: "1712927439.728409/1712930706",
|
|
||||||
TS: 1712930706,
|
|
||||||
Source: "https://renderinc.slack.com/archives/C06U1DDBBU4/p1712927439728409",
|
|
||||||
Channel: "C06U1DDBBU4",
|
|
||||||
Thread: "1712927439.728409",
|
|
||||||
EventName: "",
|
|
||||||
Event: "",
|
|
||||||
Plaintext: "I gotta do this",
|
|
||||||
Asset: "",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"opsgenie_alert.json": {
|
|
||||||
slackMessage: slackMessage{
|
|
||||||
TS: 1712927439,
|
|
||||||
Event: slackEvent{
|
|
||||||
ID: "1712927439.728409",
|
|
||||||
Channel: "C06U1DDBBU4",
|
|
||||||
Bot: slackBot{
|
|
||||||
Name: "Opsgenie for Alert Management",
|
|
||||||
},
|
|
||||||
Attachments: []slackAttachment{{
|
|
||||||
Color: "F4511E",
|
|
||||||
Title: "#11071: [Grafana]: Firing: Alertconfig Workflow Failed",
|
|
||||||
Text: "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
|
||||||
Fields: []slackField{
|
|
||||||
{Value: "P3", Title: "Priority"},
|
|
||||||
{Value: "alertname:Alertconfig Workflow Failed, grafana_folder:Datastores, rule_uid:a7639f7e-6950-41be-850a-b22119f74cbb", Title: "Tags"},
|
|
||||||
{Value: "Datastores Non-Critical", Title: "Routed Teams"},
|
|
||||||
},
|
|
||||||
Actions: []slackAction{{}, {}, {}},
|
|
||||||
}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
message: Message{
|
|
||||||
ID: "1712927439.728409/1712927439",
|
|
||||||
TS: 1712927439,
|
|
||||||
Source: "https://renderinc.slack.com/archives/C06U1DDBBU4/p1712927439728409",
|
|
||||||
Channel: "C06U1DDBBU4",
|
|
||||||
Thread: "1712927439.728409",
|
|
||||||
EventName: "Alertconfig Workflow Failed",
|
|
||||||
Event: "11071",
|
|
||||||
Plaintext: "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
|
||||||
Asset: "",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"opsgenie_alert_resolved.json": {
|
|
||||||
slackMessage: slackMessage{
|
|
||||||
TS: 1712916339,
|
|
||||||
Event: slackEvent{
|
|
||||||
ID: "1712916339.000300",
|
|
||||||
Channel: "C06U1DDBBU4",
|
|
||||||
Bot: slackBot{
|
|
||||||
Name: "Opsgenie for Alert Management",
|
|
||||||
},
|
|
||||||
Attachments: []slackAttachment{{
|
|
||||||
Color: "2ecc71",
|
|
||||||
Title: "#11069: [Grafana]: Firing: Alertconfig Workflow Failed",
|
|
||||||
Text: "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
|
||||||
Fields: []slackField{
|
|
||||||
{Value: "P3", Title: "Priority"},
|
|
||||||
{Value: "alertname:Alertconfig Workflow Failed, grafana_folder:Datastores, rule_uid:a7639f7e-6950-41be-850a-b22119f74cbb", Title: "Tags"},
|
|
||||||
{Value: "Datastores Non-Critical", Title: "Routed Teams"},
|
|
||||||
},
|
|
||||||
Actions: []slackAction{},
|
|
||||||
}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
message: Message{
|
|
||||||
ID: "1712916339.000300/1712916339",
|
|
||||||
TS: 1712916339,
|
|
||||||
Source: "https://renderinc.slack.com/archives/C06U1DDBBU4/p1712916339000300",
|
|
||||||
Channel: "C06U1DDBBU4",
|
|
||||||
Thread: "1712916339.000300",
|
|
||||||
EventName: "Alertconfig Workflow Failed",
|
|
||||||
Event: "11069",
|
|
||||||
Plaintext: "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
|
||||||
Asset: "",
|
|
||||||
Resolved: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"reingested_alert.json": {
|
|
||||||
inCh: "C06U1DDBBU4",
|
|
||||||
message: Message{
|
|
||||||
ID: "1712892637.037639/1712892637",
|
|
||||||
TS: 1712892637,
|
|
||||||
Source: "https://renderinc.slack.com/archives/C06U1DDBBU4/p1712892637037639",
|
|
||||||
Channel: "C06U1DDBBU4",
|
|
||||||
Thread: "1712892637.037639",
|
|
||||||
EventName: "Alertconfig Workflow Failed",
|
|
||||||
Event: "11061",
|
|
||||||
Plaintext: "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
|
||||||
Asset: "",
|
|
||||||
Resolved: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, d := range cases {
|
|
||||||
want := d
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
b, err := os.ReadFile(path.Join("testdata", "slack_events", name))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("ParseSlackFromChannel "+want.inCh, func(t *testing.T) {
|
|
||||||
got, err := ParseSlackFromChannel(b, renderAssetPattern, renderDatacenterPattern, renderEventNamePattern, want.inCh)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if got != want.message {
|
|
||||||
t.Errorf("wanted \n\t%+v, got\n\t%+v", want.message, got)
|
|
||||||
}
|
|
||||||
if time := got.Time(); time.Unix() != int64(got.TS) {
|
|
||||||
t.Error("not unix time", got.TS, time)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
37
model/event.go
Normal file
37
model/event.go
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
package model
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
type Event struct {
|
||||||
|
Updated uint64
|
||||||
|
ID string
|
||||||
|
URL string
|
||||||
|
TS uint64
|
||||||
|
Name string
|
||||||
|
Asset string
|
||||||
|
Datacenter string
|
||||||
|
Team string
|
||||||
|
Resolved bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewEvent(ID, URL string, TS uint64, Name, Asset, Datacenter, Team string, Resolved bool) Event {
|
||||||
|
return Event{
|
||||||
|
Updated: updated(),
|
||||||
|
ID: ID,
|
||||||
|
URL: URL,
|
||||||
|
TS: TS,
|
||||||
|
Name: Name,
|
||||||
|
Asset: Asset,
|
||||||
|
Datacenter: Datacenter,
|
||||||
|
Team: Team,
|
||||||
|
Resolved: Resolved,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e Event) Empty() bool {
|
||||||
|
return e == (Event{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func updated() uint64 {
|
||||||
|
return uint64(time.Now().UnixNano() / int64(time.Millisecond))
|
||||||
|
}
|
||||||
26
model/message.go
Normal file
26
model/message.go
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
package model
|
||||||
|
|
||||||
|
// THREAD ||--|{ MESSAGE: "populated by"
|
||||||
|
type Message struct {
|
||||||
|
Updated uint64
|
||||||
|
ID string
|
||||||
|
TS uint64
|
||||||
|
Author string
|
||||||
|
Plaintext string
|
||||||
|
ThreadID string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMessage(ID string, TS uint64, Author, Plaintext string, ThreadID string) Message {
|
||||||
|
return Message{
|
||||||
|
Updated: updated(),
|
||||||
|
ID: ID,
|
||||||
|
TS: TS,
|
||||||
|
Author: Author,
|
||||||
|
Plaintext: Plaintext,
|
||||||
|
ThreadID: ThreadID,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m Message) Empty() bool {
|
||||||
|
return m == (Message{})
|
||||||
|
}
|
||||||
30
model/model.go
Normal file
30
model/model.go
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
package model
|
||||||
|
|
||||||
|
var _ = `
|
||||||
|
erDiagram
|
||||||
|
%% thread event eventName
|
||||||
|
EVENT ||--|{ THREAD: "spawns"
|
||||||
|
THREAD ||--|{ MESSAGE: "populated by"
|
||||||
|
|
||||||
|
MESSAGE {
|
||||||
|
|
||||||
|
ID str
|
||||||
|
URL str
|
||||||
|
TS number
|
||||||
|
Plaintext str
|
||||||
|
Author str
|
||||||
|
}
|
||||||
|
THREAD {
|
||||||
|
ID str
|
||||||
|
URL str
|
||||||
|
Channel str
|
||||||
|
}
|
||||||
|
EVENT {
|
||||||
|
ID str
|
||||||
|
Name str
|
||||||
|
Asset str
|
||||||
|
Resolved bool
|
||||||
|
Datacenter str
|
||||||
|
Team str
|
||||||
|
}
|
||||||
|
`
|
||||||
27
model/thread.go
Normal file
27
model/thread.go
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
package model
|
||||||
|
|
||||||
|
// EVENT ||--|{ THREAD: "spawns"
|
||||||
|
type Thread struct {
|
||||||
|
Updated uint64
|
||||||
|
ID string
|
||||||
|
URL string
|
||||||
|
TS uint64
|
||||||
|
Channel string
|
||||||
|
EventID string
|
||||||
|
Recap string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewThread(ID, URL string, TS uint64, Channel string, EventID string) Thread {
|
||||||
|
return Thread{
|
||||||
|
Updated: updated(),
|
||||||
|
ID: ID,
|
||||||
|
URL: URL,
|
||||||
|
TS: TS,
|
||||||
|
Channel: Channel,
|
||||||
|
EventID: EventID,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t Thread) Empty() bool {
|
||||||
|
return t == (Thread{})
|
||||||
|
}
|
||||||
67
persistence.go
Normal file
67
persistence.go
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ModelToPersistence struct {
|
||||||
|
pipeline Pipeline
|
||||||
|
}
|
||||||
|
|
||||||
|
type ModelIDs struct {
|
||||||
|
Event string
|
||||||
|
Message string
|
||||||
|
Thread string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewModelToPersistencePipeline(ctx context.Context, cfg Config) (Pipeline, error) {
|
||||||
|
reader, err := NewQueue(ctx, "new_models", cfg.driver)
|
||||||
|
if err != nil {
|
||||||
|
return Pipeline{}, err
|
||||||
|
}
|
||||||
|
writer, err := NewQueue(ctx, "new_persistence", cfg.driver)
|
||||||
|
if err != nil {
|
||||||
|
return Pipeline{}, err
|
||||||
|
}
|
||||||
|
return Pipeline{
|
||||||
|
writer: writer,
|
||||||
|
reader: reader,
|
||||||
|
process: newModelToPersistenceProcess(cfg, cfg.storage),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newModelToPersistenceProcess(cfg Config, storage Storage) processFunc {
|
||||||
|
return func(ctx context.Context, models []byte) ([]byte, error) {
|
||||||
|
var m Models
|
||||||
|
if err := json.Unmarshal(models, &m); err != nil {
|
||||||
|
return nil, fmt.Errorf("received non models payload: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.Event.Empty() {
|
||||||
|
} else if err := storage.UpsertEvent(ctx, m.Event); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to persist event: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.Thread.Empty() {
|
||||||
|
} else if err := storage.UpsertThread(ctx, m.Thread); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to persist thread: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.Message.Empty() {
|
||||||
|
} else if err := storage.UpsertMessage(ctx, m.Message); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to persist message: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.Debug {
|
||||||
|
log.Printf("persisted models")
|
||||||
|
}
|
||||||
|
return json.Marshal(ModelIDs{
|
||||||
|
Event: m.Event.ID,
|
||||||
|
Thread: m.Thread.ID,
|
||||||
|
Message: m.Message.ID,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
63
persistence_test.go
Normal file
63
persistence_test.go
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/breel-render/spoc-bot-vr/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestModelToPersistenceProcessor(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
ctx, can := context.WithTimeout(context.Background(), time.Second*10)
|
||||||
|
defer can()
|
||||||
|
|
||||||
|
d := NewTestDriver(t)
|
||||||
|
s, _ := NewStorage(ctx, d)
|
||||||
|
process := newModelToPersistenceProcess(Config{}, s)
|
||||||
|
|
||||||
|
_, _ = ctx, process
|
||||||
|
|
||||||
|
inputModels := Models{
|
||||||
|
Event: model.Event{ID: "event", Asset: "event-asset"},
|
||||||
|
//Thread: {ID: "thread", Channel: "thread-channel"},
|
||||||
|
Message: model.Message{ID: "message", Plaintext: "message-plaintext"},
|
||||||
|
}
|
||||||
|
input, _ := json.Marshal(inputModels)
|
||||||
|
|
||||||
|
var outputModelIDs ModelIDs
|
||||||
|
var n int
|
||||||
|
if output, err := process(ctx, input); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := json.Unmarshal(output, &outputModelIDs); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if outputModelIDs != (ModelIDs{Event: "event", Message: "message"}) {
|
||||||
|
t.Error(outputModelIDs)
|
||||||
|
}
|
||||||
|
|
||||||
|
if row := d.QueryRowContext(ctx, `SELECT COUNT(*) FROM events`); row.Err() != nil {
|
||||||
|
t.Error("cant count events:", row.Err())
|
||||||
|
} else if err := row.Scan(&n); err != nil {
|
||||||
|
t.Error("cant count events:", err)
|
||||||
|
} else if n != 1 {
|
||||||
|
t.Error("bad event count:", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if row := d.QueryRowContext(ctx, `SELECT COUNT(*) FROM threads`); row.Err() != nil {
|
||||||
|
t.Error("cant count threads:", row.Err())
|
||||||
|
} else if err := row.Scan(&n); err != nil {
|
||||||
|
t.Error("cant count threads:", err)
|
||||||
|
} else if n != 0 {
|
||||||
|
t.Error("bad thread count:", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if row := d.QueryRowContext(ctx, `SELECT COUNT(*) FROM messages`); row.Err() != nil {
|
||||||
|
t.Error("cant count messages:", row.Err())
|
||||||
|
} else if err := row.Scan(&n); err != nil {
|
||||||
|
t.Error("cant count messages:", err)
|
||||||
|
} else if n != 1 {
|
||||||
|
t.Error("bad message count:", n)
|
||||||
|
}
|
||||||
|
}
|
||||||
15
pipeline.go
15
pipeline.go
@@ -1,6 +1,9 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import "context"
|
import (
|
||||||
|
"context"
|
||||||
|
"log"
|
||||||
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
Pipeline struct {
|
Pipeline struct {
|
||||||
@@ -22,7 +25,14 @@ func NewPipeline(writer, reader Queue, process processFunc) Pipeline {
|
|||||||
func (p Pipeline) Process(ctx context.Context) error {
|
func (p Pipeline) Process(ctx context.Context) error {
|
||||||
ctx, can := context.WithCancel(ctx)
|
ctx, can := context.WithCancel(ctx)
|
||||||
defer can()
|
defer can()
|
||||||
|
err := p.processUntilErr(ctx)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("pipeline failed to process: %v", err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p Pipeline) processUntilErr(ctx context.Context) error {
|
||||||
for ctx.Err() == nil {
|
for ctx.Err() == nil {
|
||||||
reservation, read, err := p.reader.Syn(ctx)
|
reservation, read, err := p.reader.Syn(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -32,7 +42,8 @@ func (p Pipeline) Process(ctx context.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := p.writer.Enqueue(ctx, processed); err != nil {
|
if processed == nil {
|
||||||
|
} else if err := p.writer.Enqueue(ctx, processed); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := p.reader.Ack(ctx, reservation); err != nil {
|
if err := p.reader.Ack(ctx, reservation); err != nil {
|
||||||
|
|||||||
@@ -6,17 +6,57 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestPipeline(t *testing.T) {
|
func TestPipelineDoesntPushEmptyMessage(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
ctx, can := context.WithTimeout(context.Background(), time.Second*10)
|
ctx, can := context.WithTimeout(context.Background(), time.Second*10)
|
||||||
defer can()
|
defer can()
|
||||||
|
|
||||||
driverOutput, _ := NewDriver(ctx, "")
|
output, _ := NewQueue(ctx, "output", NewTestDriver(t))
|
||||||
output, err := NewQueue(ctx, "output", driverOutput)
|
input, _ := NewQueue(ctx, "input", NewTestDriver(t))
|
||||||
|
|
||||||
|
calls := 0
|
||||||
|
process := func(_ context.Context, v []byte) ([]byte, error) {
|
||||||
|
calls += 1
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := input.Enqueue(ctx, []byte("hello")); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ing := NewPipeline(output, input, process)
|
||||||
|
go func() {
|
||||||
|
defer can()
|
||||||
|
if err := ing.Process(ctx); err != nil && ctx.Err() == nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
for ctx.Err() == nil {
|
||||||
|
if calls != 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
case <-time.After(time.Millisecond * 100):
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if r, _, _ := output.syn(ctx); len(r) != 0 {
|
||||||
|
t.Error("something was pushed to out queue even though processor didnt emit content")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPipeline(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
ctx, can := context.WithTimeout(context.Background(), time.Second*10)
|
||||||
|
defer can()
|
||||||
|
|
||||||
|
output, err := NewQueue(ctx, "output", NewTestDriver(t))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
driverInput, _ := NewDriver(ctx, "")
|
input, err := NewQueue(ctx, "input", NewTestDriver(t))
|
||||||
input, err := NewQueue(ctx, "input", driverInput)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|||||||
42
queue.go
42
queue.go
@@ -3,6 +3,7 @@ package main
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
@@ -20,7 +21,7 @@ func NewNoopQueue() Queue {
|
|||||||
func NewQueue(ctx context.Context, topic string, driver Driver) (Queue, error) {
|
func NewQueue(ctx context.Context, topic string, driver Driver) (Queue, error) {
|
||||||
if _, err := driver.ExecContext(ctx, `
|
if _, err := driver.ExecContext(ctx, `
|
||||||
CREATE TABLE IF NOT EXISTS queue (
|
CREATE TABLE IF NOT EXISTS queue (
|
||||||
id INTEGER PRIMARY KEY,
|
id TEXT PRIMARY KEY,
|
||||||
topic TEXT NOT NULL,
|
topic TEXT NOT NULL,
|
||||||
updated INTEGER NOT NULL,
|
updated INTEGER NOT NULL,
|
||||||
reservation TEXT,
|
reservation TEXT,
|
||||||
@@ -36,14 +37,23 @@ func (q Queue) Enqueue(ctx context.Context, b []byte) error {
|
|||||||
if q.driver.DB == nil {
|
if q.driver.DB == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
_, err := q.driver.ExecContext(ctx, `
|
result, err := q.driver.ExecContext(ctx, `
|
||||||
INSERT INTO queue (topic, updated, payload) VALUES (?, ?, ?)
|
INSERT INTO queue (id, topic, updated, payload) VALUES ($1, $2, $3, $4)
|
||||||
`,
|
`,
|
||||||
|
uuid.New().String(),
|
||||||
q.topic,
|
q.topic,
|
||||||
time.Now().Unix(),
|
time.Now().Unix(),
|
||||||
b,
|
b,
|
||||||
)
|
)
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
}
|
||||||
|
if n, err := result.RowsAffected(); err != nil {
|
||||||
|
return err
|
||||||
|
} else if n != 1 {
|
||||||
|
return fmt.Errorf("insert into queue %s affected %v rows", b, n)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q Queue) Syn(ctx context.Context) (string, []byte, error) {
|
func (q Queue) Syn(ctx context.Context) (string, []byte, error) {
|
||||||
@@ -59,7 +69,7 @@ func (q Queue) Syn(ctx context.Context) (string, []byte, error) {
|
|||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return "", nil, ctx.Err()
|
return "", nil, ctx.Err()
|
||||||
case <-time.After(time.Second):
|
case <-time.After(time.Millisecond * 500):
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -71,16 +81,16 @@ func (q Queue) syn(ctx context.Context) ([]byte, []byte, error) {
|
|||||||
if result, err := q.driver.ExecContext(ctx, `
|
if result, err := q.driver.ExecContext(ctx, `
|
||||||
UPDATE queue
|
UPDATE queue
|
||||||
SET
|
SET
|
||||||
updated = ?, reservation = ?
|
updated = $1, reservation = $2
|
||||||
WHERE
|
WHERE
|
||||||
id IN (
|
id IN (
|
||||||
SELECT id
|
SELECT id
|
||||||
FROM queue
|
FROM queue
|
||||||
WHERE
|
WHERE
|
||||||
topic == ?
|
topic = $3
|
||||||
AND (
|
AND (
|
||||||
reservation IS NULL
|
reservation IS NULL
|
||||||
OR ? - updated > 60
|
OR $4 - updated > 600
|
||||||
)
|
)
|
||||||
LIMIT 1
|
LIMIT 1
|
||||||
)
|
)
|
||||||
@@ -95,12 +105,12 @@ func (q Queue) syn(ctx context.Context) ([]byte, []byte, error) {
|
|||||||
row := q.driver.QueryRowContext(ctx, `
|
row := q.driver.QueryRowContext(ctx, `
|
||||||
SELECT payload
|
SELECT payload
|
||||||
FROM queue
|
FROM queue
|
||||||
WHERE reservation==?
|
WHERE reservation=$1
|
||||||
LIMIT 1
|
LIMIT 1
|
||||||
`, reservation)
|
`, reservation)
|
||||||
if err := row.Err(); err != nil {
|
if err := row.Err(); err != nil {
|
||||||
return nil, nil, fmt.Errorf("failed to query reservation: %w", err)
|
return nil, nil, fmt.Errorf("failed to query reservation: %w", err)
|
||||||
} else if err := row.Scan(&payload); err != nil {
|
} else if err := row.Scan(&payload); err != nil && !strings.Contains(err.Error(), "no rows in result") {
|
||||||
return nil, nil, fmt.Errorf("failed to parse reservation: %w", err)
|
return nil, nil, fmt.Errorf("failed to parse reservation: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -108,12 +118,22 @@ func (q Queue) syn(ctx context.Context) ([]byte, []byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (q Queue) Ack(ctx context.Context, reservation string) error {
|
func (q Queue) Ack(ctx context.Context, reservation string) error {
|
||||||
|
return q.ack(ctx, []byte(reservation))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q Queue) ack(ctx context.Context, reservation []byte) error {
|
||||||
if q.driver.DB == nil {
|
if q.driver.DB == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
_, err := q.driver.ExecContext(ctx, `
|
result, err := q.driver.ExecContext(ctx, `
|
||||||
DELETE FROM queue
|
DELETE FROM queue
|
||||||
WHERE reservation==?
|
WHERE reservation=$1
|
||||||
`, reservation)
|
`, reservation)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if n, _ := result.RowsAffected(); n != 1 {
|
||||||
|
return fmt.Errorf("failed to ack %s: %v rows affected", reservation, n)
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,15 +8,15 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestQueue(t *testing.T) {
|
func TestQueue(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
ctx, can := context.WithTimeout(context.Background(), time.Second*10)
|
ctx, can := context.WithTimeout(context.Background(), time.Second*10)
|
||||||
defer can()
|
defer can()
|
||||||
|
|
||||||
driver, _ := NewDriver(ctx, "")
|
q, err := NewQueue(ctx, "", NewTestDriver(t))
|
||||||
q, err := NewQueue(ctx, "", driver)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
qOther, _ := NewQueue(ctx, "other", driver)
|
qOther, _ := NewQueue(ctx, "other", q.driver)
|
||||||
|
|
||||||
if reservation, _, err := q.syn(ctx); reservation != nil {
|
if reservation, _, err := q.syn(ctx); reservation != nil {
|
||||||
t.Errorf("able to syn before any enqueues created: %v", err)
|
t.Errorf("able to syn before any enqueues created: %v", err)
|
||||||
|
|||||||
87
recap.go
Normal file
87
recap.go
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type PersistenceToRecap struct {
|
||||||
|
pipeline Pipeline
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPersistenceToRecapPipeline(ctx context.Context, cfg Config) (Pipeline, error) {
|
||||||
|
reader, err := NewQueue(ctx, "new_persistence", cfg.driver)
|
||||||
|
if err != nil {
|
||||||
|
return Pipeline{}, err
|
||||||
|
}
|
||||||
|
writer := NewNoopQueue()
|
||||||
|
return Pipeline{
|
||||||
|
writer: writer,
|
||||||
|
reader: reader,
|
||||||
|
process: newPersistenceToRecapProcess(cfg),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newPersistenceToRecapProcess(cfg Config) processFunc {
|
||||||
|
return func(ctx context.Context, modelIDs []byte) ([]byte, error) {
|
||||||
|
var m ModelIDs
|
||||||
|
if err := json.Unmarshal(modelIDs, &m); err != nil {
|
||||||
|
return nil, fmt.Errorf("received non model ids payload: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.Event == "" {
|
||||||
|
} else if event, err := cfg.storage.GetEvent(ctx, m.Event); err != nil {
|
||||||
|
return nil, err
|
||||||
|
} else if !event.Resolved {
|
||||||
|
} else if err := func() error {
|
||||||
|
threads, err := cfg.storage.GetEventThreads(ctx, event.ID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, thread := range threads {
|
||||||
|
messages, err := cfg.storage.GetThreadMessages(ctx, thread.ID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
} else if len(messages) < 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
prompt := []string{
|
||||||
|
cfg.RecapPromptIntro,
|
||||||
|
"---",
|
||||||
|
messages[0].Plaintext,
|
||||||
|
"---",
|
||||||
|
cfg.RecapPrompt,
|
||||||
|
"---",
|
||||||
|
}
|
||||||
|
for _, message := range messages[1:] {
|
||||||
|
prompt = append(prompt, fmt.Sprintf("%s\n%s", message.Author, message.Plaintext))
|
||||||
|
}
|
||||||
|
|
||||||
|
recap, err := cfg.ai.Do(ctx, strings.Join(prompt, "\n\n"))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
thread.Recap = recap
|
||||||
|
if err := cfg.storage.UpsertThread(ctx, thread); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Println("recapped", thread.ID)
|
||||||
|
if cfg.Debug {
|
||||||
|
log.Printf("Recapped %q as %q from %q/%q and %+v", thread.ID, thread.Recap, cfg.RecapPromptIntro, cfg.RecapPrompt, messages)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.Debug {
|
||||||
|
log.Printf("persisted recap")
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
50
recap_test.go
Normal file
50
recap_test.go
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/breel-render/spoc-bot-vr/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewPersistenceToRecapProcess(t *testing.T) {
|
||||||
|
ctx, can := context.WithTimeout(context.Background(), time.Second*10)
|
||||||
|
defer can()
|
||||||
|
|
||||||
|
d := NewTestDriver(t)
|
||||||
|
s, _ := NewStorage(ctx, d)
|
||||||
|
|
||||||
|
cfg := Config{
|
||||||
|
driver: d,
|
||||||
|
storage: s,
|
||||||
|
ai: NewAINoop(),
|
||||||
|
Debug: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
proc := newPersistenceToRecapProcess(cfg)
|
||||||
|
|
||||||
|
if err := s.UpsertEvent(ctx, model.NewEvent("Event", "", 0, "", "", "", "", true)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := s.UpsertThread(ctx, model.NewThread("Thread", "", 0, "", "Event")); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := s.UpsertMessage(ctx, model.NewMessage("Root", 0, "bot", "an alert has fired", "Thread")); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := s.UpsertMessage(ctx, model.NewMessage("Message", 0, "me", "hello world", "Thread")); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
b, _ := json.Marshal(ModelIDs{Event: "Event"})
|
||||||
|
if _, err := proc(ctx, b); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if thread, err := s.GetThread(ctx, "Thread"); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
} else if thread.Recap == "" {
|
||||||
|
t.Error("no recap:", thread.Recap)
|
||||||
|
} else {
|
||||||
|
t.Logf("%+v", thread)
|
||||||
|
}
|
||||||
|
}
|
||||||
262
slack.go
262
slack.go
@@ -2,35 +2,277 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/breel-render/spoc-bot-vr/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
type SlackToMessage struct {
|
var (
|
||||||
|
ErrIrrelevantMessage = errors.New("message isnt relevant to spoc bot vr")
|
||||||
|
)
|
||||||
|
|
||||||
|
type SlackToModel struct {
|
||||||
pipeline Pipeline
|
pipeline Pipeline
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewSlackToMessagePipeline(ctx context.Context, cfg Config) (Pipeline, error) {
|
type Models struct {
|
||||||
reader, err := NewQueue(ctx, "fromSlack", cfg.driver)
|
Event model.Event
|
||||||
|
Message model.Message
|
||||||
|
Thread model.Thread
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSlackToModelPipeline(ctx context.Context, cfg Config) (Pipeline, error) {
|
||||||
|
reader, err := NewQueue(ctx, "slack_event", cfg.driver)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Pipeline{}, err
|
return Pipeline{}, err
|
||||||
}
|
}
|
||||||
writer, err := NewQueue(ctx, "fromMessage", cfg.driver)
|
writer, err := NewQueue(ctx, "new_models", cfg.driver)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Pipeline{}, err
|
return Pipeline{}, err
|
||||||
}
|
}
|
||||||
return Pipeline{
|
return Pipeline{
|
||||||
writer: writer,
|
writer: writer,
|
||||||
reader: reader,
|
reader: reader,
|
||||||
process: newSlackToMessageProcess(cfg),
|
process: newSlackToModelProcess(cfg),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newSlackToMessageProcess(cfg Config) processFunc {
|
func newSlackToModelProcess(cfg Config) processFunc {
|
||||||
return func(ctx context.Context, slack []byte) ([]byte, error) {
|
return func(ctx context.Context, slack []byte) ([]byte, error) {
|
||||||
m, err := ParseSlack(slack, cfg.AssetPattern, cfg.DatacenterPattern, cfg.EventNamePattern)
|
s, err := parseSlack(slack)
|
||||||
if err != nil {
|
if cfg.Debug {
|
||||||
return nil, fmt.Errorf("failed to deserialize slack %w: %s", err, slack)
|
log.Printf("%v: %s => %+v", err, slack, s)
|
||||||
}
|
}
|
||||||
return m.Serialize(), nil
|
if errors.Is(err, ErrIrrelevantMessage) {
|
||||||
|
return nil, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to deserialize slack %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for pattern, ptr := range map[string]*string{
|
||||||
|
cfg.AssetPattern: &s.Asset,
|
||||||
|
cfg.DatacenterPattern: &s.Datacenter,
|
||||||
|
cfg.EventNamePattern: &s.EventName,
|
||||||
|
} {
|
||||||
|
*ptr = withPattern(pattern, *ptr)
|
||||||
|
}
|
||||||
|
|
||||||
|
event := model.Event{}
|
||||||
|
if s.Event != "" && s.Source != "" && s.TS > 0 && s.EventName != "" {
|
||||||
|
event = model.NewEvent(s.Event, s.Source, s.TS, s.EventName, s.Asset, s.Datacenter, s.Team, s.Resolved)
|
||||||
|
}
|
||||||
|
message := model.Message{}
|
||||||
|
if s.ID != "" && s.Source != "" && s.TS > 0 && s.Thread != "" {
|
||||||
|
message = model.NewMessage(s.ID, s.TS, s.Author, s.Plaintext, s.Thread)
|
||||||
|
}
|
||||||
|
thread := model.Thread{}
|
||||||
|
if s.Thread != "" && s.Source != "" && s.TS > 0 && s.Event != "" {
|
||||||
|
thread = model.NewThread(s.Thread, s.Source, s.TS, s.Channel, s.Event)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.Debug {
|
||||||
|
log.Printf("parsed slack message into models")
|
||||||
|
}
|
||||||
|
return json.Marshal(Models{
|
||||||
|
Event: event,
|
||||||
|
Message: message,
|
||||||
|
Thread: thread,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func withPattern(pattern string, given string) string {
|
||||||
|
r := regexp.MustCompile(pattern)
|
||||||
|
parsed := r.FindString(given)
|
||||||
|
for i, name := range r.SubexpNames() {
|
||||||
|
if i > 0 && name != "" {
|
||||||
|
parsed = r.FindStringSubmatch(given)[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return parsed
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
parsedSlackMessage struct {
|
||||||
|
ID string
|
||||||
|
TS uint64
|
||||||
|
Source string
|
||||||
|
Channel string
|
||||||
|
Thread string
|
||||||
|
EventName string
|
||||||
|
Event string
|
||||||
|
Plaintext string
|
||||||
|
Asset string
|
||||||
|
Resolved bool
|
||||||
|
Datacenter string
|
||||||
|
Author string
|
||||||
|
Team string
|
||||||
|
}
|
||||||
|
|
||||||
|
slackMessage struct {
|
||||||
|
slackEvent
|
||||||
|
Type string
|
||||||
|
TS uint64 `json:"event_time"`
|
||||||
|
Event slackEvent
|
||||||
|
MessageTS string `json:"ts"`
|
||||||
|
}
|
||||||
|
|
||||||
|
slackEvent struct {
|
||||||
|
ID string `json:"event_ts"`
|
||||||
|
Channel string
|
||||||
|
// rewrites
|
||||||
|
Nested *slackEvent `json:"message"`
|
||||||
|
PreviousMessage *slackEvent `json:"previous_message"`
|
||||||
|
// human
|
||||||
|
ParentID string `json:"thread_ts"`
|
||||||
|
Text string
|
||||||
|
Blocks []slackBlock
|
||||||
|
User string
|
||||||
|
// bot
|
||||||
|
Bot slackBot `json:"bot_profile"`
|
||||||
|
Attachments []slackAttachment
|
||||||
|
}
|
||||||
|
|
||||||
|
slackBlock struct {
|
||||||
|
Elements []slackElement
|
||||||
|
}
|
||||||
|
|
||||||
|
slackElement struct {
|
||||||
|
Elements []slackElement
|
||||||
|
RichText string `json:"text"`
|
||||||
|
}
|
||||||
|
|
||||||
|
slackBot struct {
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
slackAttachment struct {
|
||||||
|
Color string
|
||||||
|
Title string
|
||||||
|
Text string
|
||||||
|
Fields []slackField
|
||||||
|
Actions []slackAction
|
||||||
|
}
|
||||||
|
|
||||||
|
slackField struct {
|
||||||
|
Value string
|
||||||
|
Title string
|
||||||
|
}
|
||||||
|
|
||||||
|
slackAction struct{}
|
||||||
|
)
|
||||||
|
|
||||||
|
func parseSlack(b []byte) (parsedSlackMessage, error) {
|
||||||
|
s, err := _parseSlack(b)
|
||||||
|
if err != nil {
|
||||||
|
return parsedSlackMessage{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
if ch != "" {
|
||||||
|
s.Event.Channel = ch
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
if s.Event.Bot.Name != "" {
|
||||||
|
if len(s.Event.Attachments) == 0 {
|
||||||
|
return parsedSlackMessage{}, ErrIrrelevantMessage
|
||||||
|
} else if !strings.Contains(s.Event.Attachments[0].Title, ": Firing: ") {
|
||||||
|
return parsedSlackMessage{}, ErrIrrelevantMessage
|
||||||
|
}
|
||||||
|
var tagsField string
|
||||||
|
var teamField string
|
||||||
|
for _, field := range s.Event.Attachments[0].Fields {
|
||||||
|
switch field.Title {
|
||||||
|
case "Tags":
|
||||||
|
tagsField = field.Value
|
||||||
|
case "Routed Teams":
|
||||||
|
teamField = field.Value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return parsedSlackMessage{
|
||||||
|
ID: fmt.Sprintf("%s/%v", s.Event.ID, s.TS),
|
||||||
|
TS: s.TS,
|
||||||
|
Source: fmt.Sprintf(`https://renderinc.slack.com/archives/%s/p%s`, s.Event.Channel, strings.ReplaceAll(s.Event.ID, ".", "")),
|
||||||
|
Channel: s.Event.Channel,
|
||||||
|
Thread: s.Event.ID,
|
||||||
|
EventName: strings.Split(s.Event.Attachments[0].Title, ": Firing: ")[1],
|
||||||
|
Event: strings.TrimPrefix(strings.Split(s.Event.Attachments[0].Title, ":")[0], "#"),
|
||||||
|
Plaintext: s.Event.Attachments[0].Text,
|
||||||
|
Asset: s.Event.Attachments[0].Text,
|
||||||
|
Resolved: !strings.HasPrefix(s.Event.Attachments[0].Color, "F"),
|
||||||
|
Datacenter: tagsField,
|
||||||
|
Author: s.Event.Bot.Name,
|
||||||
|
Team: teamField,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.Event.ParentID == "" {
|
||||||
|
return parsedSlackMessage{}, ErrIrrelevantMessage
|
||||||
|
}
|
||||||
|
return parsedSlackMessage{
|
||||||
|
ID: fmt.Sprintf("%s/%v", s.Event.ParentID, s.TS),
|
||||||
|
TS: s.TS,
|
||||||
|
Source: fmt.Sprintf(`https://renderinc.slack.com/archives/%s/p%s`, s.Event.Channel, strings.ReplaceAll(s.Event.ParentID, ".", "")),
|
||||||
|
Channel: s.Event.Channel,
|
||||||
|
Thread: s.Event.ParentID,
|
||||||
|
EventName: "",
|
||||||
|
Event: "",
|
||||||
|
Plaintext: s.Event.Text,
|
||||||
|
Asset: "",
|
||||||
|
Datacenter: "",
|
||||||
|
Author: s.Event.User,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func _parseSlack(b []byte) (slackMessage, error) {
|
||||||
|
var wrapper ChannelWrapper
|
||||||
|
if err := json.Unmarshal(b, &wrapper); err == nil && len(wrapper.V) > 0 {
|
||||||
|
b = wrapper.V
|
||||||
|
}
|
||||||
|
|
||||||
|
var result slackMessage
|
||||||
|
err := json.Unmarshal(b, &result)
|
||||||
|
switch result.Type {
|
||||||
|
case "message":
|
||||||
|
result.Event = result.slackEvent
|
||||||
|
result.TS, _ = strconv.ParseUint(strings.Split(result.MessageTS, ".")[0], 10, 64)
|
||||||
|
result.Event.ID = result.MessageTS
|
||||||
|
}
|
||||||
|
if result.Event.Nested != nil && !result.Event.Nested.Empty() {
|
||||||
|
result.Event.Blocks = result.Event.Nested.Blocks
|
||||||
|
result.Event.Bot = result.Event.Nested.Bot
|
||||||
|
result.Event.Attachments = result.Event.Nested.Attachments
|
||||||
|
result.Event.Nested = nil
|
||||||
|
}
|
||||||
|
if result.Event.PreviousMessage != nil {
|
||||||
|
if result.Event.PreviousMessage.ID != "" {
|
||||||
|
result.Event.ID = result.Event.PreviousMessage.ID
|
||||||
|
}
|
||||||
|
result.Event.PreviousMessage = nil
|
||||||
|
}
|
||||||
|
if wrapper.Channel != "" {
|
||||||
|
result.Event.Channel = wrapper.Channel
|
||||||
|
}
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this slackEvent) Empty() bool {
|
||||||
|
return fmt.Sprintf("%+v", this) == fmt.Sprintf("%+v", slackEvent{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this parsedSlackMessage) Time() time.Time {
|
||||||
|
return time.Unix(int64(this.TS), 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
type ChannelWrapper struct {
|
||||||
|
Channel string
|
||||||
|
V json.RawMessage
|
||||||
|
}
|
||||||
|
|||||||
267
slack_test.go
267
slack_test.go
@@ -2,19 +2,27 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"os"
|
"os"
|
||||||
|
"path"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"gotest.tools/v3/assert"
|
"github.com/breel-render/spoc-bot-vr/model"
|
||||||
|
"gotest.tools/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestSlackToMessagePipeline(t *testing.T) {
|
func TestSlackToModelPipeline(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
ctx, can := context.WithTimeout(context.Background(), time.Second*5)
|
ctx, can := context.WithTimeout(context.Background(), time.Second*5)
|
||||||
defer can()
|
defer can()
|
||||||
|
|
||||||
driver, _ := NewDriver(ctx, "/tmp/f")
|
pipeline, err := NewSlackToModelPipeline(ctx, Config{
|
||||||
pipeline, err := NewSlackToMessagePipeline(ctx, Config{driver: driver})
|
driver: NewTestDriver(t),
|
||||||
|
AssetPattern: renderAssetPattern,
|
||||||
|
DatacenterPattern: renderDatacenterPattern,
|
||||||
|
EventNamePattern: renderEventNamePattern,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -24,7 +32,32 @@ func TestSlackToMessagePipeline(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
want := Message{
|
want := Models{
|
||||||
|
Event: model.NewEvent(
|
||||||
|
"11071",
|
||||||
|
"https://renderinc.slack.com/archives/C06U1DDBBU4/p1712927439728409",
|
||||||
|
1712927439,
|
||||||
|
"Alertconfig Workflow Failed",
|
||||||
|
"",
|
||||||
|
"",
|
||||||
|
"Datastores Non-Critical",
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
Message: model.NewMessage(
|
||||||
|
"1712927439.728409/1712927439",
|
||||||
|
1712927439,
|
||||||
|
"Opsgenie for Alert Management",
|
||||||
|
"At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
||||||
|
"1712927439.728409",
|
||||||
|
),
|
||||||
|
Thread: model.NewThread(
|
||||||
|
"1712927439.728409",
|
||||||
|
"https://renderinc.slack.com/archives/C06U1DDBBU4/p1712927439728409",
|
||||||
|
1712927439,
|
||||||
|
"C06U1DDBBU4",
|
||||||
|
"11071",
|
||||||
|
),
|
||||||
|
/*
|
||||||
ID: "1712927439.728409/1712927439",
|
ID: "1712927439.728409/1712927439",
|
||||||
TS: 1712927439,
|
TS: 1712927439,
|
||||||
Source: "https://renderinc.slack.com/archives/C06U1DDBBU4/p1712927439728409",
|
Source: "https://renderinc.slack.com/archives/C06U1DDBBU4/p1712927439728409",
|
||||||
@@ -35,16 +68,236 @@ func TestSlackToMessagePipeline(t *testing.T) {
|
|||||||
Plaintext: "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
Plaintext: "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
||||||
Asset: "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
Asset: "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
||||||
Datacenter: "alertname:Alertconfig Workflow Failed, grafana_folder:Datastores, rule_uid:a7639f7e-6950-41be-850a-b22119f74cbb",
|
Datacenter: "alertname:Alertconfig Workflow Failed, grafana_folder:Datastores, rule_uid:a7639f7e-6950-41be-850a-b22119f74cbb",
|
||||||
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
b, _ := os.ReadFile("testdata/slack_events/opsgenie_alert.json")
|
b, _ := os.ReadFile("testdata/slack_events/opsgenie_alert.json")
|
||||||
if err := pipeline.reader.Enqueue(ctx, b); err != nil {
|
if err := pipeline.reader.Enqueue(ctx, b); err != nil {
|
||||||
t.Fatal("failed to enqueue", err)
|
t.Fatal("failed to enqueue", err)
|
||||||
}
|
}
|
||||||
|
var got Models
|
||||||
if _, b2, err := pipeline.writer.Syn(ctx); err != nil {
|
if _, b2, err := pipeline.writer.Syn(ctx); err != nil {
|
||||||
t.Fatal("failed to syn", err)
|
t.Fatal("failed to syn", err)
|
||||||
} else if m := MustDeserialize(b2); false {
|
} else if err := json.Unmarshal(b2, &got); err != nil {
|
||||||
|
t.Fatal("failed to parse outqueue:", err)
|
||||||
} else {
|
} else {
|
||||||
assert.DeepEqual(t, want, m)
|
want.Event.Updated = 0
|
||||||
|
want.Message.Updated = 0
|
||||||
|
want.Thread.Updated = 0
|
||||||
|
got.Event.Updated = 0
|
||||||
|
got.Message.Updated = 0
|
||||||
|
got.Thread.Updated = 0
|
||||||
|
assert.DeepEqual(t, want, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseSlackTestdata(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
cases := map[string]struct {
|
||||||
|
slackMessage slackMessage
|
||||||
|
message parsedSlackMessage
|
||||||
|
}{
|
||||||
|
"human_thread_message_from_opsgenie_alert.json": {
|
||||||
|
slackMessage: slackMessage{
|
||||||
|
TS: 1712930706,
|
||||||
|
Event: slackEvent{
|
||||||
|
ID: "1712930706.598629",
|
||||||
|
Channel: "C06U1DDBBU4",
|
||||||
|
ParentID: "1712927439.728409",
|
||||||
|
Text: "I gotta do this",
|
||||||
|
Blocks: []slackBlock{{
|
||||||
|
Elements: []slackElement{{
|
||||||
|
Elements: []slackElement{{
|
||||||
|
RichText: "I gotta do this",
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
Bot: slackBot{
|
||||||
|
Name: "",
|
||||||
|
},
|
||||||
|
Attachments: []slackAttachment{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
message: parsedSlackMessage{
|
||||||
|
ID: "1712927439.728409/1712930706",
|
||||||
|
TS: 1712930706,
|
||||||
|
Source: "https://renderinc.slack.com/archives/C06U1DDBBU4/p1712927439728409",
|
||||||
|
Channel: "C06U1DDBBU4",
|
||||||
|
Thread: "1712927439.728409",
|
||||||
|
EventName: "",
|
||||||
|
Event: "",
|
||||||
|
Plaintext: "I gotta do this",
|
||||||
|
Asset: "",
|
||||||
|
Author: "U06868T6ADV",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"opsgenie_alert.json": {
|
||||||
|
slackMessage: slackMessage{
|
||||||
|
TS: 1712927439,
|
||||||
|
Event: slackEvent{
|
||||||
|
ID: "1712927439.728409",
|
||||||
|
Channel: "C06U1DDBBU4",
|
||||||
|
Bot: slackBot{
|
||||||
|
Name: "Opsgenie for Alert Management",
|
||||||
|
},
|
||||||
|
Attachments: []slackAttachment{{
|
||||||
|
Color: "2ecc71",
|
||||||
|
Title: "#11071: [Grafana]: Firing: Alertconfig Workflow Failed",
|
||||||
|
Text: "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
||||||
|
Fields: []slackField{
|
||||||
|
{Value: "P3", Title: "Priority"},
|
||||||
|
{Value: "alertname:Alertconfig Workflow Failed, grafana_folder:Datastores, rule_uid:a7639f7e-6950-41be-850a-b22119f74cbb", Title: "Tags"},
|
||||||
|
{Value: "Datastores Non-Critical", Title: "Routed Teams"},
|
||||||
|
},
|
||||||
|
Actions: []slackAction{{}, {}, {}},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
message: parsedSlackMessage{
|
||||||
|
ID: "1712927439.728409/1712927439",
|
||||||
|
TS: 1712927439,
|
||||||
|
Source: "https://renderinc.slack.com/archives/C06U1DDBBU4/p1712927439728409",
|
||||||
|
Channel: "C06U1DDBBU4",
|
||||||
|
Thread: "1712927439.728409",
|
||||||
|
EventName: "Alertconfig Workflow Failed",
|
||||||
|
Event: "11071",
|
||||||
|
Plaintext: "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
||||||
|
Asset: "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
||||||
|
Datacenter: "alertname:Alertconfig Workflow Failed, grafana_folder:Datastores, rule_uid:a7639f7e-6950-41be-850a-b22119f74cbb",
|
||||||
|
Author: "Opsgenie for Alert Management",
|
||||||
|
Team: "Datastores Non-Critical",
|
||||||
|
Resolved: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"opsgenie_alert_resolved.json": {
|
||||||
|
slackMessage: slackMessage{
|
||||||
|
TS: 1712916339,
|
||||||
|
Event: slackEvent{
|
||||||
|
ID: "1712916339.000300",
|
||||||
|
Channel: "C06U1DDBBU4",
|
||||||
|
Bot: slackBot{
|
||||||
|
Name: "Opsgenie for Alert Management",
|
||||||
|
},
|
||||||
|
Attachments: []slackAttachment{{
|
||||||
|
Color: "2ecc71",
|
||||||
|
Title: "#11069: [Grafana]: Firing: Alertconfig Workflow Failed",
|
||||||
|
Text: "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
||||||
|
Fields: []slackField{
|
||||||
|
{Value: "P3", Title: "Priority"},
|
||||||
|
{Value: "alertname:Alertconfig Workflow Failed, grafana_folder:Datastores, rule_uid:a7639f7e-6950-41be-850a-b22119f74cbb", Title: "Tags"},
|
||||||
|
{Value: "Datastores Non-Critical", Title: "Routed Teams"},
|
||||||
|
},
|
||||||
|
Actions: []slackAction{},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
message: parsedSlackMessage{
|
||||||
|
ID: "1712916339.000300/1712916339",
|
||||||
|
TS: 1712916339,
|
||||||
|
Source: "https://renderinc.slack.com/archives/C06U1DDBBU4/p1712916339000300",
|
||||||
|
Channel: "C06U1DDBBU4",
|
||||||
|
Thread: "1712916339.000300",
|
||||||
|
EventName: "Alertconfig Workflow Failed",
|
||||||
|
Event: "11069",
|
||||||
|
Plaintext: "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
||||||
|
Asset: "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
||||||
|
Resolved: true,
|
||||||
|
Datacenter: "alertname:Alertconfig Workflow Failed, grafana_folder:Datastores, rule_uid:a7639f7e-6950-41be-850a-b22119f74cbb",
|
||||||
|
Author: "Opsgenie for Alert Management",
|
||||||
|
Team: "Datastores Non-Critical",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"reingested_alert.json": {
|
||||||
|
message: parsedSlackMessage{
|
||||||
|
ID: "1712892637.037639/1712892637",
|
||||||
|
TS: 1712892637,
|
||||||
|
Source: "https://renderinc.slack.com/archives//p1712892637037639",
|
||||||
|
//Channel: "C06U1DDBBU4",
|
||||||
|
Thread: "1712892637.037639",
|
||||||
|
EventName: "Alertconfig Workflow Failed",
|
||||||
|
Event: "11061",
|
||||||
|
Plaintext: "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
||||||
|
Asset: "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
||||||
|
Resolved: true,
|
||||||
|
Datacenter: "alertname:Alertconfig Workflow Failed, grafana_folder:Datastores, rule_uid:a7639f7e-6950-41be-850a-b22119f74cbb",
|
||||||
|
Author: "Opsgenie for Alert Management",
|
||||||
|
Team: "Datastores Non-Critical",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, d := range cases {
|
||||||
|
want := d
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
b, err := os.ReadFile(path.Join("testdata", "slack_events", name))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("parseSlack", func(t *testing.T) {
|
||||||
|
got, err := parseSlack(b)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if got != want.message {
|
||||||
|
assert.DeepEqual(t, want.message, got)
|
||||||
|
t.Errorf("wanted \n\t%+v, got\n\t%+v", want.message, got)
|
||||||
|
}
|
||||||
|
if time := got.Time(); time.Unix() != int64(got.TS) {
|
||||||
|
t.Error("not unix time", got.TS, time)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWrappedSlack(t *testing.T) {
|
||||||
|
b, _ := os.ReadFile("testdata/slack_events/human_thread_message_from_opsgenie_alert.json")
|
||||||
|
b2, _ := json.Marshal(ChannelWrapper{Channel: "X", V: json.RawMessage(b)})
|
||||||
|
|
||||||
|
if got, err := _parseSlack(b); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if got2, err := _parseSlack(b2); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if got2.Event.Channel != "X" {
|
||||||
|
t.Error(got2.Event.Channel)
|
||||||
|
} else if got2.Event.ParentID == "" {
|
||||||
|
t.Error(got2.Event)
|
||||||
|
} else if got.Event.ParentID != got2.Event.ParentID {
|
||||||
|
t.Error(got, got2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWithPattern(t *testing.T) {
|
||||||
|
cases := map[string]struct {
|
||||||
|
given string
|
||||||
|
pattern string
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
"pods unavailable on node": {
|
||||||
|
given: `pods are unavailable on node ip-12-345-67-890.xx-yyyyy-1.compute.internal.`,
|
||||||
|
pattern: renderAssetPattern,
|
||||||
|
want: `ip-12-345-67-890.xx-yyyyy-1.compute.internal`,
|
||||||
|
},
|
||||||
|
"redis err": {
|
||||||
|
given: `Redis instance red-abc123 is emitting Some error repeatedly`,
|
||||||
|
pattern: renderAssetPattern,
|
||||||
|
want: `red-abc123`,
|
||||||
|
},
|
||||||
|
"pg err": {
|
||||||
|
given: `db dpg-xyz123 is in a pinch`,
|
||||||
|
pattern: renderAssetPattern,
|
||||||
|
want: `dpg-xyz123`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, d := range cases {
|
||||||
|
c := d
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
got := withPattern(c.pattern, c.given)
|
||||||
|
if got != c.want {
|
||||||
|
t.Errorf("withPattern(%q, %q) expected %q but got %q", c.pattern, c.given, c.want, got)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
149
slackscrape.go
Normal file
149
slackscrape.go
Normal file
@@ -0,0 +1,149 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/time/rate"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SlackScrape struct {
|
||||||
|
Latest int64
|
||||||
|
Oldest int64
|
||||||
|
ThreadTS string
|
||||||
|
Channel string
|
||||||
|
Token string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSlackScrapePipeline(ctx context.Context, cfg Config) (Pipeline, error) {
|
||||||
|
writer, err := NewQueue(ctx, "new_persistence", cfg.driver)
|
||||||
|
if err != nil {
|
||||||
|
return Pipeline{}, err
|
||||||
|
}
|
||||||
|
cfg.slackScrapePipeline.reader, err = NewQueue(ctx, "slack_channels_to_scrape", cfg.driver)
|
||||||
|
if err != nil {
|
||||||
|
return Pipeline{}, err
|
||||||
|
}
|
||||||
|
return Pipeline{
|
||||||
|
writer: writer,
|
||||||
|
reader: cfg.slackScrapePipeline.reader,
|
||||||
|
process: newSlackScrapeProcess(cfg),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSlackScrapeProcess(cfg Config) processFunc {
|
||||||
|
limiter := rate.NewLimiter(0.5, 1)
|
||||||
|
return func(ctx context.Context, jobb []byte) ([]byte, error) {
|
||||||
|
if err := limiter.Wait(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var job SlackScrape
|
||||||
|
if err := json.Unmarshal(jobb, &job); err != nil {
|
||||||
|
return nil, fmt.Errorf("received non SlackScrape payload: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
u := url.URL{
|
||||||
|
Scheme: "https",
|
||||||
|
Host: "slack.com",
|
||||||
|
Path: "/api/conversations.history",
|
||||||
|
}
|
||||||
|
q := url.Values{}
|
||||||
|
q.Set("channel", job.Channel)
|
||||||
|
q.Set("latest", strconv.FormatInt(job.Latest, 10))
|
||||||
|
q.Set("limit", "999")
|
||||||
|
q.Set("inclusive", "true")
|
||||||
|
if job.ThreadTS != "" {
|
||||||
|
u.Path = "/api/conversations.replies"
|
||||||
|
q.Set("ts", job.ThreadTS)
|
||||||
|
}
|
||||||
|
if job.Oldest != 0 {
|
||||||
|
q.Set("oldest", strconv.FormatInt(job.Oldest, 10))
|
||||||
|
}
|
||||||
|
u.RawQuery = q.Encode()
|
||||||
|
url := u.String()
|
||||||
|
|
||||||
|
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req.Header.Set("Authorization", "Bearer "+job.Token)
|
||||||
|
req = req.WithContext(ctx)
|
||||||
|
|
||||||
|
httpc := http.Client{Timeout: time.Second}
|
||||||
|
resp, err := httpc.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
defer io.Copy(io.Discard, resp.Body)
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
b, _ := io.ReadAll(resp.Body)
|
||||||
|
return nil, fmt.Errorf("(%d) %s", resp.StatusCode, b)
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var page struct {
|
||||||
|
Messages []json.RawMessage
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(body, &page); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
newLatest := float64(job.Latest)
|
||||||
|
for _, messageJSON := range page.Messages {
|
||||||
|
if cfg.Debug {
|
||||||
|
log.Printf("slackScrapePipeline %s => %s", url, messageJSON)
|
||||||
|
}
|
||||||
|
b, _ := json.Marshal(ChannelWrapper{Channel: job.Channel, V: messageJSON})
|
||||||
|
if err := cfg.slackToModelPipeline.reader.Enqueue(ctx, b); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var peekTS struct {
|
||||||
|
TS float64 `json:"ts,string"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(messageJSON, &peekTS); err == nil && peekTS.TS > 0 && peekTS.TS < newLatest {
|
||||||
|
newLatest = peekTS.TS
|
||||||
|
}
|
||||||
|
if job.ThreadTS == "" {
|
||||||
|
var peek struct {
|
||||||
|
ThreadTS string `json:"thread_ts"`
|
||||||
|
}
|
||||||
|
json.Unmarshal(messageJSON, &peek)
|
||||||
|
if peek.ThreadTS != "" {
|
||||||
|
clone := job
|
||||||
|
clone.ThreadTS = peek.ThreadTS
|
||||||
|
clone.Oldest = 0
|
||||||
|
b, _ := json.Marshal(clone)
|
||||||
|
if err := cfg.slackScrapePipeline.reader.Enqueue(ctx, b); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
log.Printf("fanout thread scrape for %s/%s", job.Channel, peek.ThreadTS)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(page.Messages) == 999 {
|
||||||
|
clone := job
|
||||||
|
clone.Latest = int64(newLatest)
|
||||||
|
b, _ := json.Marshal(clone)
|
||||||
|
if err := cfg.slackScrapePipeline.reader.Enqueue(ctx, b); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
log.Printf("fanout page scrape for %s up to %v", job.Channel, clone.Latest)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("scraped %v from %s", len(page.Messages), url)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
257
storage.go
Normal file
257
storage.go
Normal file
@@ -0,0 +1,257 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/breel-render/spoc-bot-vr/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Storage struct {
|
||||||
|
driver Driver
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewStorage(ctx context.Context, driver Driver) (Storage, error) {
|
||||||
|
if _, err := driver.ExecContext(ctx, `
|
||||||
|
CREATE TABLE IF NOT EXISTS events (ID TEXT UNIQUE);
|
||||||
|
CREATE TABLE IF NOT EXISTS messages (ID TEXT UNIQUE);
|
||||||
|
CREATE TABLE IF NOT EXISTS threads (ID TEXT UNIQUE);
|
||||||
|
`); err != nil {
|
||||||
|
return Storage{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for table, v := range map[string]any{
|
||||||
|
"events": model.Event{},
|
||||||
|
"messages": model.Message{},
|
||||||
|
"threads": model.Thread{},
|
||||||
|
} {
|
||||||
|
b, _ := json.Marshal(v)
|
||||||
|
var m map[string]struct{}
|
||||||
|
json.Unmarshal(b, &m)
|
||||||
|
for k := range m {
|
||||||
|
if k == `ID` {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
driver.ExecContext(ctx, fmt.Sprintf(`ALTER TABLE %s ADD COLUMN %s TEXT`, table, k))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return Storage{driver: driver}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Storage) GetEvent(ctx context.Context, ID string) (model.Event, error) {
|
||||||
|
v := model.Event{}
|
||||||
|
err := s.selectOne(ctx, "events", &v, "ID = $1", ID)
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Storage) UpsertEvent(ctx context.Context, event model.Event) error {
|
||||||
|
return s.upsert(ctx, "events", event)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Storage) GetMessage(ctx context.Context, ID string) (model.Message, error) {
|
||||||
|
v := model.Message{}
|
||||||
|
err := s.selectOne(ctx, "messages", &v, "ID = $1", ID)
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Storage) UpsertMessage(ctx context.Context, message model.Message) error {
|
||||||
|
return s.upsert(ctx, "messages", message)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Storage) GetThread(ctx context.Context, ID string) (model.Thread, error) {
|
||||||
|
v := model.Thread{}
|
||||||
|
err := s.selectOne(ctx, "threads", &v, "ID = $1", ID)
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Storage) GetEventThreads(ctx context.Context, ID string) ([]model.Thread, error) {
|
||||||
|
return s.selectThreadsWhere(ctx, "EventID = $1", ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Storage) GetThreadMessages(ctx context.Context, ID string) ([]model.Message, error) {
|
||||||
|
return s.selectMessagesWhere(ctx, "ThreadID = $1", ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Storage) UpsertThread(ctx context.Context, thread model.Thread) error {
|
||||||
|
return s.upsert(ctx, "threads", thread)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Storage) selectThreadsWhere(ctx context.Context, clause string, args ...any) ([]model.Thread, error) {
|
||||||
|
keys, _, _, _, err := keysArgsKeyargsValues(model.Thread{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
args2 := make([]any, len(args))
|
||||||
|
for i := range args {
|
||||||
|
args2[i], _ = json.Marshal(args[i])
|
||||||
|
}
|
||||||
|
scanTargets := make([]any, len(keys))
|
||||||
|
|
||||||
|
q := fmt.Sprintf(`
|
||||||
|
SELECT %s FROM threads WHERE %s
|
||||||
|
ORDER BY TS ASC
|
||||||
|
`, strings.Join(keys, ", "), clause)
|
||||||
|
rows, err := s.driver.QueryContext(ctx, q, args2...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
var result []model.Thread
|
||||||
|
for rows.Next() {
|
||||||
|
for i := range scanTargets {
|
||||||
|
scanTargets[i] = &[]byte{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := rows.Scan(scanTargets...); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
m := map[string]json.RawMessage{}
|
||||||
|
for i, k := range keys {
|
||||||
|
m[k] = *scanTargets[i].(*[]byte)
|
||||||
|
}
|
||||||
|
b, _ := json.Marshal(m)
|
||||||
|
|
||||||
|
var one model.Thread
|
||||||
|
if err := json.Unmarshal(b, &one); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
result = append(result, one)
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, rows.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Storage) selectMessagesWhere(ctx context.Context, clause string, args ...any) ([]model.Message, error) {
|
||||||
|
keys, _, _, _, err := keysArgsKeyargsValues(model.Message{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
args2 := make([]any, len(args))
|
||||||
|
for i := range args {
|
||||||
|
args2[i], _ = json.Marshal(args[i])
|
||||||
|
}
|
||||||
|
scanTargets := make([]any, len(keys))
|
||||||
|
|
||||||
|
q := fmt.Sprintf(`
|
||||||
|
SELECT %s FROM messages WHERE %s
|
||||||
|
ORDER BY TS ASC
|
||||||
|
`, strings.Join(keys, ", "), clause)
|
||||||
|
rows, err := s.driver.QueryContext(ctx, q, args2...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
var result []model.Message
|
||||||
|
for rows.Next() {
|
||||||
|
for i := range scanTargets {
|
||||||
|
scanTargets[i] = &[]byte{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := rows.Scan(scanTargets...); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
m := map[string]json.RawMessage{}
|
||||||
|
for i, k := range keys {
|
||||||
|
m[k] = *scanTargets[i].(*[]byte)
|
||||||
|
}
|
||||||
|
b, _ := json.Marshal(m)
|
||||||
|
|
||||||
|
var one model.Message
|
||||||
|
if err := json.Unmarshal(b, &one); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
result = append(result, one)
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, rows.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Storage) selectOne(ctx context.Context, table string, v any, clause string, args ...any) error {
|
||||||
|
if questions := strings.Count(clause, "$"); questions != len(args) {
|
||||||
|
return fmt.Errorf("expected %v args for clause but found %v", questions, len(args))
|
||||||
|
}
|
||||||
|
|
||||||
|
keys, _, _, _, err := keysArgsKeyargsValues(v)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range args {
|
||||||
|
args[i], _ = json.Marshal(args[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
q := fmt.Sprintf(`
|
||||||
|
SELECT %s FROM %s WHERE %s
|
||||||
|
`, strings.Join(keys, ", "), table, clause)
|
||||||
|
row := s.driver.QueryRowContext(ctx, q, args...)
|
||||||
|
if err := row.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
scanTargets := make([]any, len(keys))
|
||||||
|
for i := range scanTargets {
|
||||||
|
scanTargets[i] = &[]byte{}
|
||||||
|
}
|
||||||
|
if err := row.Scan(scanTargets...); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
m := map[string]json.RawMessage{}
|
||||||
|
for i, k := range keys {
|
||||||
|
m[k] = *scanTargets[i].(*[]byte)
|
||||||
|
}
|
||||||
|
b, _ := json.Marshal(m)
|
||||||
|
return json.Unmarshal(b, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Storage) upsert(ctx context.Context, table string, v any) error {
|
||||||
|
keys, args, keyArgs, values, err := keysArgsKeyargsValues(v)
|
||||||
|
if err != nil || len(keys) == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
q := fmt.Sprintf(`
|
||||||
|
INSERT INTO %s (%s) VALUES (%s)
|
||||||
|
ON CONFLICT (ID) DO UPDATE SET %s
|
||||||
|
`, table, strings.Join(keys, ", "), strings.Join(args, ", "), strings.Join(keyArgs, ", "))
|
||||||
|
if result, err := s.driver.ExecContext(ctx, q, values...); err != nil {
|
||||||
|
return err
|
||||||
|
} else if n, err := result.RowsAffected(); err != nil {
|
||||||
|
return err
|
||||||
|
} else if n != 1 {
|
||||||
|
return fmt.Errorf("UpsertMessage affected %v rows", n)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func keysArgsKeyargsValues(v any) ([]string, []string, []string, []any, error) {
|
||||||
|
b, _ := json.Marshal(v)
|
||||||
|
var m map[string]json.RawMessage
|
||||||
|
err := json.Unmarshal(b, &m)
|
||||||
|
|
||||||
|
keys := []string{}
|
||||||
|
for k := range m {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
args := make([]string, len(keys))
|
||||||
|
for i := range args {
|
||||||
|
args[i] = fmt.Sprintf("$%d", i+1)
|
||||||
|
}
|
||||||
|
keyArgs := make([]string, len(keys))
|
||||||
|
for i := range keyArgs {
|
||||||
|
keyArgs[i] = fmt.Sprintf("%s=$%d", keys[i], i+1)
|
||||||
|
}
|
||||||
|
values := make([]any, len(keys))
|
||||||
|
for i := range values {
|
||||||
|
values[i] = []byte(m[keys[i]])
|
||||||
|
}
|
||||||
|
|
||||||
|
return keys, args, keyArgs, values, err
|
||||||
|
}
|
||||||
150
storage_test.go
Normal file
150
storage_test.go
Normal file
@@ -0,0 +1,150 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/breel-render/spoc-bot-vr/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestStorage(t *testing.T) {
|
||||||
|
ctx, can := context.WithTimeout(context.Background(), time.Minute)
|
||||||
|
defer can()
|
||||||
|
|
||||||
|
s, err := NewStorage(ctx, NewTestDriver(t))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("upsert get event", func(t *testing.T) {
|
||||||
|
m := model.NewEvent(
|
||||||
|
"ID",
|
||||||
|
"URL",
|
||||||
|
1,
|
||||||
|
"Name",
|
||||||
|
"Asset",
|
||||||
|
"Datacenter",
|
||||||
|
"Team",
|
||||||
|
true,
|
||||||
|
)
|
||||||
|
|
||||||
|
if err := s.UpsertEvent(ctx, m); err != nil {
|
||||||
|
t.Fatal("unexpected error on insert:", err)
|
||||||
|
} else if err := s.UpsertEvent(ctx, m); err != nil {
|
||||||
|
t.Fatal("unexpected error on noop update:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if got, err := s.GetEvent(ctx, m.ID); err != nil {
|
||||||
|
t.Fatal("unexpected error on get:", err)
|
||||||
|
} else if got != m {
|
||||||
|
t.Fatal("unexpected result from get:", got)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("upsert get thread", func(t *testing.T) {
|
||||||
|
m := model.NewThread(
|
||||||
|
"ID",
|
||||||
|
"URL",
|
||||||
|
1,
|
||||||
|
"Channel",
|
||||||
|
"EventID",
|
||||||
|
)
|
||||||
|
|
||||||
|
if err := s.UpsertThread(ctx, m); err != nil {
|
||||||
|
t.Fatal("unexpected error on insert:", err)
|
||||||
|
} else if err := s.UpsertThread(ctx, m); err != nil {
|
||||||
|
t.Fatal("unexpected error on noop update:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if got, err := s.GetThread(ctx, m.ID); err != nil {
|
||||||
|
t.Fatal("unexpected error on get:", err)
|
||||||
|
} else if got != m {
|
||||||
|
t.Fatal("unexpected result from get:", got)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("upsert get message", func(t *testing.T) {
|
||||||
|
m := model.NewMessage(
|
||||||
|
"ID",
|
||||||
|
1,
|
||||||
|
"Author",
|
||||||
|
"Plaintext",
|
||||||
|
"ThreadID",
|
||||||
|
)
|
||||||
|
|
||||||
|
if err := s.UpsertMessage(ctx, m); err != nil {
|
||||||
|
t.Fatal("unexpected error on insert:", err)
|
||||||
|
} else if err := s.UpsertMessage(ctx, m); err != nil {
|
||||||
|
t.Fatal("unexpected error on noop update:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if got, err := s.GetMessage(ctx, m.ID); err != nil {
|
||||||
|
t.Fatal("unexpected error on get:", err)
|
||||||
|
} else if got != m {
|
||||||
|
t.Fatal("unexpected result from get:", got)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("get thread messages", func(t *testing.T) {
|
||||||
|
thread := fmt.Sprintf("thread-%d", rand.Int())
|
||||||
|
m := model.NewMessage(
|
||||||
|
"ID",
|
||||||
|
1,
|
||||||
|
"Author",
|
||||||
|
"Plaintext",
|
||||||
|
thread,
|
||||||
|
)
|
||||||
|
|
||||||
|
if err := s.UpsertMessage(ctx, m); err != nil {
|
||||||
|
t.Fatal("unexpected error on insert:", err)
|
||||||
|
} else if m2, err := s.GetMessage(ctx, m.ID); err != nil {
|
||||||
|
t.Fatal("unexpected error on upsert-get:", err)
|
||||||
|
} else if m2 != m {
|
||||||
|
t.Errorf("expected %+v but got %+v", m, m2)
|
||||||
|
}
|
||||||
|
|
||||||
|
msgs, err := s.GetThreadMessages(ctx, thread)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if len(msgs) != 1 {
|
||||||
|
t.Fatal(msgs)
|
||||||
|
} else if msgs[0].ThreadID != m.ThreadID {
|
||||||
|
t.Fatal(msgs[0].ThreadID)
|
||||||
|
} else if msgs[0] != m {
|
||||||
|
t.Fatalf("wanted msgs like %+v but got %+v", m, msgs[0])
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("get event threads", func(t *testing.T) {
|
||||||
|
event := fmt.Sprintf("event-%d", rand.Int())
|
||||||
|
m := model.NewThread(
|
||||||
|
"ID",
|
||||||
|
"URL",
|
||||||
|
1,
|
||||||
|
"Channel",
|
||||||
|
event,
|
||||||
|
)
|
||||||
|
|
||||||
|
if err := s.UpsertThread(ctx, m); err != nil {
|
||||||
|
t.Fatal("unexpected error on insert:", err)
|
||||||
|
} else if m2, err := s.GetThread(ctx, m.ID); err != nil {
|
||||||
|
t.Fatal("unexpected error on upsert-get:", err)
|
||||||
|
} else if m2 != m {
|
||||||
|
t.Errorf("expected %+v but got %+v", m, m2)
|
||||||
|
}
|
||||||
|
|
||||||
|
msgs, err := s.GetEventThreads(ctx, event)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if len(msgs) != 1 {
|
||||||
|
t.Fatal(msgs)
|
||||||
|
} else if msgs[0].EventID != m.EventID {
|
||||||
|
t.Fatal(msgs[0].EventID)
|
||||||
|
} else if msgs[0] != m {
|
||||||
|
t.Fatalf("wanted msgs like %+v but got %+v", m, msgs[0])
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
3
testdata/slack_events/opsgenie_alert.json
vendored
3
testdata/slack_events/opsgenie_alert.json
vendored
@@ -29,7 +29,8 @@
|
|||||||
"attachments": [
|
"attachments": [
|
||||||
{
|
{
|
||||||
"id": 1,
|
"id": 1,
|
||||||
"color": "F4511E",
|
"realcolor": "F4511E",
|
||||||
|
"color": "2ecc71",
|
||||||
"fallback": "New alert: \"[Grafana]: Firing: Alertconfig Workflow Failed\" <https://opsg.in/a/i/render/38152bc5-bc5d-411d-9feb-d285af5b6481-1712927439305|11071>\nTags: alertname:Alertconfig Workflow Failed, grafana_folder:Datastores, rule_uid:a7639f7e-6950-41be-850a-b22119f74cbb",
|
"fallback": "New alert: \"[Grafana]: Firing: Alertconfig Workflow Failed\" <https://opsg.in/a/i/render/38152bc5-bc5d-411d-9feb-d285af5b6481-1712927439305|11071>\nTags: alertname:Alertconfig Workflow Failed, grafana_folder:Datastores, rule_uid:a7639f7e-6950-41be-850a-b22119f74cbb",
|
||||||
"text": "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
"text": "At least one alertconfig run has failed unexpectedly.\nDashboard: <https://grafana.render.com/d/VLZU83YVk?orgId=1>\nPanel: <https://grafana.render.com/d/VLZU83YVk?orgId=1&viewPanel=17>\nSource: <https://grafana.render.com/alerting/grafana/fa7b06b8-b4d8-4979-bce7-5e1c432edd81/view?orgId=1>",
|
||||||
"title": "#11071: [Grafana]: Firing: Alertconfig Workflow Failed",
|
"title": "#11071: [Grafana]: Firing: Alertconfig Workflow Failed",
|
||||||
|
|||||||
Reference in New Issue
Block a user