// Package qp provides the QProducer for the qmp-client. package qp import ( "context" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/Shopify/sarama" "gitlab-app.eng.qops.net/golang/metrics" "gitlab-app.eng.qops.net/golang/qmp/internal/config" "gitlab-app.eng.qops.net/golang/qmp/internal/types" qsl "gitlab-app.eng.qops.net/golang/qmp/qsl" "golang.org/x/time/rate" ) // Producer implements the qpcl.Producer interface. type Producer struct { static producerStatic reporter metrics.Reporter updateLock *sync.Mutex stopOnce sync.Once startOnce sync.Once closed bool } type producerStatic struct { clientID string brokers []string topic string limiter *rate.Limiter qpcType types.QPCType updates chan saramaProducer sends chan *msgWHandler stop chan bool } type msgWHandler struct { handler func(error, qsl.Message) PMsg *sarama.ProducerMessage QMsg qsl.Message sent time.Time } type saramaProducer interface { AsyncClose() Close() error Input() chan<- *sarama.ProducerMessage Successes() <-chan *sarama.ProducerMessage Errors() <-chan *sarama.ProducerError } type resultRelayer struct { errorsLeft uint32 successesLeft uint32 errors chan *sarama.ProducerError successes chan *sarama.ProducerMessage } type successErrorChannels struct { s chan (<-chan *sarama.ProducerMessage) e chan (<-chan *sarama.ProducerError) stop chan bool } type inputChannels struct { requests chan *msgWHandler puts chan chan<- *sarama.ProducerMessage stop chan bool } type processorChannels struct { stop chan bool updates chan saramaProducer reporter metrics.Reporter topic string typeStr string } // NewProducer creates a Producer ready to publish messages to the // topic with the specified configuration with each .Put() call. func NewProducer(producerType types.QPCType, topic string, configuration map[string]string, reporter metrics.Reporter) (*Producer, error) { kafkaProducer, static, err := makeProducer(configuration, topic) if err != nil { return nil, err } static.qpcType = producerType static.updates = make(chan saramaProducer, 1) static.stop = make(chan bool) static.sends = make(chan *msgWHandler, 100) static.updates <- kafkaProducer p := &Producer{ static: *static, reporter: reporter, updateLock: &sync.Mutex{}, stopOnce: sync.Once{}, startOnce: sync.Once{}, closed: false, } return p, p.process() } func parseConfiguration(configuration map[string]string, topic string) (*producerStatic, *sarama.Config, error) { params, err := config.GetMandatoryParams(configuration, []string{topic}, "producer") if err != nil { return nil, nil, err } // Get limit if it exists limit := rate.Inf limitStr, ok := configuration[config.PutsPerSecond] if ok { limitFloat, err := strconv.ParseFloat(limitStr, 64) if err != nil { return nil, nil, err } limit = rate.Limit(limitFloat) } // The rest of the owl saramaConf, err := config.ToSaramaConfig(configuration) if err != nil { return nil, nil, err } saramaConf.Producer.Return.Successes = true saramaConf.Producer.Return.Errors = true return &producerStatic{ clientID: params.ClientID, brokers: params.Brokers, topic: params.Topic[0], limiter: rate.NewLimiter(limit, 1), }, saramaConf, nil } // Close will stop a Producer from sending any more messages // to the kafka broker and closes the inner sarama.Producer. func (qp *Producer) Close() error { qp.updateLock.Lock() defer qp.updateLock.Unlock() err := ErrStopOnce qp.stopOnce.Do(func() { if qp.IsClosed() { return } qp.closed = true qp.static.stop <- true <-qp.static.stop err = nil }) return err } // GetType returns the QPCType of the Producer. func (qp *Producer) GetType() types.QPCType { return qp.static.qpcType } // GetTopic returns the kafka topic the Producer publishes to. func (qp *Producer) GetTopic() string { return qp.static.topic } // Put sends a QMessage from the Producer to the kafka broker. func (qp *Producer) Put(QMsg qsl.Message, handler func(error, qsl.Message)) error { if qp.IsClosed() { return ErrPutOnStopped } var err error var encodedAvroValue []byte if QMsg.UsingOriginalBytes() { encodedAvroValue, err = QMsg.GetOriginalBytes() } else { encodedAvroValue, err = QMsg.GetSerializedBytes() } if err != nil { return err } PMsg := &sarama.ProducerMessage{ Topic: qp.GetTopic(), Value: sarama.ByteEncoder(encodedAvroValue), } qp.static.sends <- &msgWHandler{PMsg: PMsg, QMsg: QMsg, handler: handler} return nil } func (qp *Producer) process() error { typeString, err := qp.GetType().String() if err != nil { return err } typeString = strings.ToLower(typeString) err = ErrStartOnce qp.startOnce.Do(func() { err = nil inputCh := inputChannels{ requests: qp.static.sends, puts: make(chan chan<- *sarama.ProducerMessage), stop: make(chan bool), } succErrCh := successErrorChannels{ s: make(chan (<-chan *sarama.ProducerMessage)), e: make(chan (<-chan *sarama.ProducerError)), stop: make(chan bool), } resultRelay := &resultRelayer{ errors: make(chan *sarama.ProducerError, 100), successes: make(chan *sarama.ProducerMessage, 100), errorsLeft: uint32(0), successesLeft: uint32(0), } mainCh := &processorChannels{ stop: qp.static.stop, updates: qp.static.updates, reporter: qp.reporter, topic: qp.GetTopic(), typeStr: typeString, } go receivePutRequests(inputCh, qp.static.limiter) go receivePutResponses(succErrCh, resultRelay) go receiveResponsesUpdatesStops(resultRelay, mainCh, succErrCh, inputCh) }) return err } func receivePutRequests(inputCh inputChannels, limiter *rate.Limiter) { input := <-inputCh.puts defer func() { inputCh.stop <- true }() for { select { case msgWithHandle := <-inputCh.requests: msgWithHandle.sent = time.Now() msgWithHandle.PMsg.Metadata = msgWithHandle limiter.Wait(context.Background()) input <- msgWithHandle.PMsg case newInput := <-inputCh.puts: input = newInput case <-inputCh.stop: return } } } func receivePutResponses(succErrCh successErrorChannels, resultRelay *resultRelayer) { succC := <-succErrCh.s errC := <-succErrCh.e defer func() { succErrCh.stop <- true }() for { select { case PErr := <-errC: appendProducerError(PErr, resultRelay) case PMsg := <-succC: appendProducerSuccess(PMsg, resultRelay) case newErr := <-succErrCh.e: errC = newErr case newSucc := <-succErrCh.s: succC = newSucc case <-succErrCh.stop: return } } } func receiveResponsesUpdatesStops(resultRelay *resultRelayer, mainCh *processorChannels, succErrCh successErrorChannels, inputCh inputChannels) { var kafkaProducer saramaProducer var stop chan bool oldProducersDoneClosing := make(chan bool) oldProducersClosing := uint(0) closing := false defer func() { stop <- true }() for { if closing && oldProducersClosing == 0 && resultRelay.errorsLeft == 0 && resultRelay.successesLeft == 0 { succErrCh.stop <- true <-succErrCh.stop return } select { case newKafkaProducer := <-mainCh.updates: oldProducer := kafkaProducer kafkaProducer = newKafkaProducer inputCh.puts <- kafkaProducer.Input() succErrCh.s <- kafkaProducer.Successes() succErrCh.e <- kafkaProducer.Errors() oldProducersClosing++ go closeOldProducer(oldProducer, resultRelay, oldProducersDoneClosing) stop = mainCh.stop case PMsg := <-resultRelay.successes: if msgWHandle := callProducerSuccessHandler(PMsg, resultRelay); msgWHandle != nil { reportPutSuccess(mainCh.reporter, msgWHandle, mainCh.typeStr, mainCh.topic) } case PErr := <-resultRelay.errors: if msgWHandle := callProducerErrorHandler(PErr, resultRelay); msgWHandle != nil { reportPutError(mainCh.reporter, mainCh.typeStr, mainCh.topic) } case <-stop: inputCh.stop <- true <-inputCh.stop oldProducersClosing++ go closeOldProducer(kafkaProducer, resultRelay, oldProducersDoneClosing) closing = true case <-oldProducersDoneClosing: oldProducersClosing-- } } } func callProducerSuccessHandler(pMsg *sarama.ProducerMessage, resultRelay *resultRelayer) *msgWHandler { msgWithHandle, ok := pMsg.Metadata.(*msgWHandler) if ok { if msgWithHandle.handler != nil { msgWithHandle.handler(nil, msgWithHandle.QMsg) atomic.AddUint32(&resultRelay.successesLeft, ^uint32(0)) return msgWithHandle } } return nil } func callProducerErrorHandler(pErr *sarama.ProducerError, resultRelay *resultRelayer) *msgWHandler { msgWithHandle, ok := pErr.Msg.Metadata.(*msgWHandler) if ok { if msgWithHandle.handler != nil { msgWithHandle.handler(pErr.Err, msgWithHandle.QMsg) atomic.AddUint32(&resultRelay.errorsLeft, ^uint32(0)) return msgWithHandle } } return nil } func appendProducerError(pErr *sarama.ProducerError, resultRelay *resultRelayer) { if pErr != nil { resultRelay.errors <- pErr atomic.AddUint32(&resultRelay.errorsLeft, uint32(1)) } } func appendProducerSuccess(pMsg *sarama.ProducerMessage, resultRelay *resultRelayer) { if pMsg != nil { resultRelay.successes <- pMsg atomic.AddUint32(&resultRelay.successesLeft, uint32(1)) } } func closeOldProducer(p saramaProducer, resultRelay *resultRelayer, complete chan bool) { defer func() { complete <- true }() if p == nil { return } p.AsyncClose() for m := range p.Successes() { appendProducerSuccess(m, resultRelay) } for m := range p.Errors() { appendProducerError(m, resultRelay) } } func reportPutError(reporter metrics.Reporter, typed string, topic string) { reporter.IncCounter("execution.error.count", metrics.Tag("measurement", "callback"), metrics.Tag("topic", topic), metrics.Tag("type", typed), ) } func reportPutSuccess(reporter metrics.Reporter, msgWHandle *msgWHandler, typed string, topic string) error { PMsg := msgWHandle.PMsg putValue, err := PMsg.Value.Encode() if err != nil { return err } reporter.UpdateGauge("execution.put.bytes", int64(len(putValue)), metrics.Tag("measurement", "callback"), metrics.Tag("topic", topic), metrics.Tag("type", typed), ) reporter.RecordTiming("execution.put.time", time.Since(msgWHandle.sent), metrics.Tag("measurement", "callback"), metrics.Tag("topic", topic), metrics.Tag("type", typed), ) return nil } // Update changes the configuration of the Producer. func (qp *Producer) Update(conf map[string]string) error { conf[config.ClientID] = qp.static.clientID qp.updateLock.Lock() defer qp.updateLock.Unlock() if qp.IsClosed() { return ErrUpdateClosed } newProducer, _, err := makeProducer(conf, qp.GetTopic()) if err != nil { return err } select { case qp.static.updates <- newProducer: case <-time.After(time.Second * 5): return ErrUpdateNoResponse } return nil } func makeProducer(conf map[string]string, topic string) (saramaProducer, *producerStatic, error) { static, kafkaConf, err := parseConfiguration(conf, topic) if err != nil { return nil, nil, err } prod, err := sarama.NewAsyncProducer(static.brokers, kafkaConf) return prod, static, err } // IsClosed returns whether the Producer has been Close()d. func (qp *Producer) IsClosed() bool { return qp.closed } // GetClientID returns the clientID of the producer. func (qp *Producer) GetClientID() string { return qp.static.clientID }