531 lines
14 KiB
Go
Executable File
531 lines
14 KiB
Go
Executable File
// Package config provides common methods to
|
|
// handle maps of kafka configurations.
|
|
package config
|
|
|
|
import (
|
|
"encoding/json"
|
|
"errors"
|
|
"fmt"
|
|
"strconv"
|
|
"strings"
|
|
|
|
"gitlab-app.eng.qops.net/golang/qmp/internal/types"
|
|
)
|
|
|
|
// QKConfig wraps the kafka configuration options
|
|
// for a QProducer, QConsumer or Topic Class.
|
|
type QKConfig struct {
|
|
// Properties has a map of properties for all producers and consumers.
|
|
// The map is indexed with the producer or consumer type code and the
|
|
// property name as found in a .properties file. For example,
|
|
// bootstrapServers := Properties[config.CBulk]["bootstrap.servers"]
|
|
// has the bulk consumer's bootstrap servers.
|
|
Properties map[types.QPCType]map[string]string
|
|
PropertyPrefix propertyPrefix
|
|
sysEnvironment map[string]string
|
|
}
|
|
|
|
type propertyPrefix interface {
|
|
String() string
|
|
}
|
|
|
|
// NewQConsumerConfig creates a generic environment/system property
|
|
// store using the given environment and properties and creates a
|
|
// QKafkaConfig specific to Kafka consumers.
|
|
func NewQConsumerConfig(env map[string]string) (*QKConfig, error) {
|
|
typeMap := make(map[types.QPCType]string)
|
|
for k, v := range types.GetQCTypes() {
|
|
typeMap[k] = v
|
|
}
|
|
return newQKafkaConfig(env, types.ConsumerPropertyPrefix, typeMap)
|
|
}
|
|
|
|
// NewQProducerConfig creates a generic environment/system property
|
|
// store using the given environment and properties and creates a
|
|
// QKafkaConfig specific to Kafka producers.
|
|
func NewQProducerConfig(env map[string]string) (*QKConfig, error) {
|
|
typeMap := make(map[types.QPCType]string)
|
|
for k, v := range types.GetQPTypes() {
|
|
typeMap[k] = v
|
|
}
|
|
return newQKafkaConfig(env, types.ProducerPropertyPrefix, typeMap)
|
|
}
|
|
|
|
// NewTopicClassConfig creates a generic environment/system property
|
|
// store using the given environment and properties and creates a
|
|
// QKafkaConfig specific to the topic classes A, B, and C defined in
|
|
// the QMP Kafka Specification.
|
|
func NewTopicClassConfig(env map[string]string) (*QKConfig, error) {
|
|
typeMap := make(map[types.QPCType]string)
|
|
for k, v := range types.GetTCTypes() {
|
|
typeMap[k] = v
|
|
}
|
|
return newQKafkaConfig(env, types.TopicClassPropertyPrefix, typeMap)
|
|
}
|
|
|
|
func newQKafkaConfig(sysEnv map[string]string, prefix propertyPrefix, types map[types.QPCType]string) (*QKConfig, error) {
|
|
qkc := QKConfig{
|
|
PropertyPrefix: prefix,
|
|
sysEnvironment: sysEnv,
|
|
}
|
|
loadedProperties, err := loadPropertiesMap(types, prefix, &qkc)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
qkc.Properties = loadedProperties
|
|
return &qkc, nil
|
|
}
|
|
|
|
// GetProperties returns the set of configuration
|
|
// options for the specified QProducer or QConsumer Type.
|
|
func (qkc *QKConfig) GetProperties(pcType types.QPCType) (map[string]string, error) {
|
|
v, ok := qkc.Properties[pcType]
|
|
if !ok {
|
|
return nil, fmt.Errorf("unknown %s type %d", qkc.PropertyPrefix, pcType)
|
|
}
|
|
return v, nil
|
|
}
|
|
|
|
// SetTypeConfiguration sets the given producer or consumer
|
|
// type's configuration to the given map[string]string.
|
|
func (qkc *QKConfig) SetTypeConfiguration(pcType types.QPCType, configuration map[string]string) error {
|
|
if err := qkc.HasType(pcType); err != nil {
|
|
return err
|
|
}
|
|
newConfiguration, err := loadPropertyMap(pcType, qkc.PropertyPrefix, qkc, configuration)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
qkc.Properties[pcType] = newConfiguration
|
|
return nil
|
|
}
|
|
|
|
// SetTypeValue sets the given producer or consumer type's
|
|
// configuration key to the given value.
|
|
func (qkc *QKConfig) SetTypeValue(pcType types.QPCType, key string, value string) error {
|
|
if err := qkc.HasType(pcType); err != nil {
|
|
return err
|
|
}
|
|
qkc.Properties[pcType][key] = value
|
|
return nil
|
|
}
|
|
|
|
// HasType returns an error if the given type is not in the
|
|
// QKConfig.
|
|
func (qkc *QKConfig) HasType(pcType types.QPCType) error {
|
|
if _, ok := qkc.Properties[pcType]; !ok {
|
|
return fmt.Errorf("unknown %s type %d", qkc.PropertyPrefix, pcType)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// GetOverride gets the environment or system property's value if
|
|
// it exists or defaults to the given value.
|
|
func (qkc *QKConfig) GetOverride(name string, defaultValue string) string {
|
|
str, ok := qkc.sysEnvironment[name]
|
|
if ok {
|
|
return str
|
|
}
|
|
return defaultValue
|
|
}
|
|
|
|
// GetOverrideInt gets the environment or system property's value if
|
|
// it exists or defaults to the given value.
|
|
func (qkc *QKConfig) GetOverrideInt(name string, defaultValue int) (int, error) {
|
|
str := qkc.GetOverride(name, strconv.Itoa(defaultValue))
|
|
return strconv.Atoi(str)
|
|
}
|
|
|
|
// ApplyEnvironmentOverrides checks for each key in the properties map
|
|
// for an environment or system property value overriding it.
|
|
func (qkc *QKConfig) ApplyEnvironmentOverrides(prefix string, properties map[string]string) {
|
|
for k := range properties {
|
|
overrideProperty := fmt.Sprintf("%s_%s", strings.ToUpper(prefix), strings.Replace(strings.ToUpper(k), ".", "_", -1))
|
|
override := qkc.GetOverride(overrideProperty, "")
|
|
if override != "" {
|
|
properties[k] = override
|
|
}
|
|
}
|
|
}
|
|
|
|
func loadHardcodedProperties(prefix propertyPrefix, typeString string) (map[string]string, error) {
|
|
switch prefix.String() {
|
|
case "TC":
|
|
switch typeString {
|
|
case "A":
|
|
return tcA(), nil
|
|
case "B":
|
|
return tcB(), nil
|
|
case "C":
|
|
return tcC(), nil
|
|
}
|
|
case "QPL":
|
|
switch typeString {
|
|
case "Bulk":
|
|
return pBulk(), nil
|
|
case "Durable":
|
|
return pDurable(), nil
|
|
}
|
|
case "QCL":
|
|
switch typeString {
|
|
case "Bulk":
|
|
return cBulk(), nil
|
|
case "Background_Job":
|
|
return cBJ(), nil
|
|
case "Record_State":
|
|
return cRS(), nil
|
|
case "Idempotent_Background_Job":
|
|
return cIBJ(), nil
|
|
}
|
|
}
|
|
return nil, fmt.Errorf("cannot find %s_%s properties", prefix.String(), typeString)
|
|
}
|
|
|
|
func loadPropertiesMap(codes map[types.QPCType]string, prefix propertyPrefix, qkc *QKConfig) (map[types.QPCType]map[string]string, error) {
|
|
loadedProperties := make(map[types.QPCType]map[string]string)
|
|
for k, v := range codes {
|
|
templateProperties, err := loadHardcodedProperties(prefix, v)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
overloadedProperty, err := loadPropertyMap(k, prefix, qkc, templateProperties)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
loadedProperties[k] = overloadedProperty
|
|
}
|
|
return loadedProperties, nil
|
|
}
|
|
|
|
func loadPropertyMap(code types.QPCType, prefix propertyPrefix, qkc *QKConfig, inputConf map[string]string) (map[string]string, error) {
|
|
codeStr, err := code.String()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
loadedProperty := make(map[string]string)
|
|
for k, v := range inputConf {
|
|
loadedProperty[k] = v
|
|
}
|
|
if _, ok := loadedProperty["bootstrap.servers"]; !ok {
|
|
loadedProperty["bootstrap.servers"] = ""
|
|
}
|
|
overridePrefix := fmt.Sprintf("%s_%s", prefix.String(), strings.ToUpper(codeStr))
|
|
qkc.ApplyEnvironmentOverrides(overridePrefix, loadedProperty)
|
|
return loadedProperty, nil
|
|
}
|
|
|
|
// MergeMaps returns the maps a and b with values in b
|
|
// overwriting values in a.
|
|
func MergeMaps(a, b map[string]string) map[string]string {
|
|
c := make(map[string]string)
|
|
for k, v := range a {
|
|
c[k] = v
|
|
}
|
|
for k, v := range b {
|
|
c[k] = v
|
|
}
|
|
return c
|
|
}
|
|
|
|
// CONSUMERS
|
|
func cRS() map[string]string {
|
|
v, err := producerConsumerMap("RecordStateConsumer")
|
|
if err == nil {
|
|
return v
|
|
}
|
|
panic(err)
|
|
}
|
|
func cBJ() map[string]string {
|
|
v, err := producerConsumerMap("BackgroundJobConsumer")
|
|
if err == nil {
|
|
return v
|
|
}
|
|
panic(err)
|
|
}
|
|
func cIBJ() map[string]string {
|
|
v, err := producerConsumerMap("IdempotentBackgroundJobConsumer")
|
|
if err == nil {
|
|
return v
|
|
}
|
|
panic(err)
|
|
}
|
|
func cBulk() map[string]string {
|
|
v, err := producerConsumerMap("BulkConsumer")
|
|
if err == nil {
|
|
return v
|
|
}
|
|
panic(err)
|
|
}
|
|
|
|
// PRODUCERS
|
|
func pBulk() map[string]string {
|
|
v, err := producerConsumerMap("BulkProducer")
|
|
if err == nil {
|
|
return v
|
|
}
|
|
panic(err)
|
|
}
|
|
func pDurable() map[string]string {
|
|
v, err := producerConsumerMap("DurableProducer")
|
|
if err == nil {
|
|
return v
|
|
}
|
|
panic(err)
|
|
}
|
|
|
|
func producerConsumerMap(key string) (map[string]string, error) {
|
|
jsonDef := consumerClassDefinitionJSON()
|
|
m := []interface{}{}
|
|
if err := json.Unmarshal([]byte(jsonDef), &m); err != nil {
|
|
return nil, err
|
|
}
|
|
jsonDef = producerClassDefinitionJSON()
|
|
n := []interface{}{}
|
|
if err := json.Unmarshal([]byte(jsonDef), &n); err != nil {
|
|
return nil, err
|
|
}
|
|
n = append(n, m...)
|
|
for _, v := range n {
|
|
mappedV, ok := v.(map[string]interface{})
|
|
if !ok {
|
|
continue
|
|
}
|
|
className, ok := mappedV["class"]
|
|
if !ok {
|
|
continue
|
|
}
|
|
if className == key {
|
|
m, err := mapFromJSON(mappedV)
|
|
switch err {
|
|
case ErrUnexpectedTCJSONArray:
|
|
return nil, ErrUnexpectedPCJSONArray
|
|
case ErrUnexpectedTCJSONType:
|
|
return nil, ErrUnexpectedPCJSONType
|
|
case ErrUnexpectedTCJSONObject:
|
|
return nil, ErrUnexpectedPCJSONObject
|
|
case nil:
|
|
return m, nil
|
|
}
|
|
}
|
|
}
|
|
return nil, errors.New("unknown config lookup " + key)
|
|
}
|
|
|
|
// TOPIC CLASSES
|
|
func tcA() map[string]string {
|
|
v, err := topicClassMap("A")
|
|
if err == nil {
|
|
return v
|
|
}
|
|
panic(err)
|
|
}
|
|
func tcB() map[string]string {
|
|
v, err := topicClassMap("B")
|
|
if err == nil {
|
|
return v
|
|
}
|
|
panic(err)
|
|
}
|
|
func tcC() map[string]string {
|
|
v, err := topicClassMap("C")
|
|
if err == nil {
|
|
return v
|
|
}
|
|
panic(err)
|
|
}
|
|
func topicClassMap(code string) (map[string]string, error) {
|
|
jsonDef := topicClassDefinitionJSON
|
|
m := make(map[string]interface{})
|
|
if err := json.Unmarshal([]byte(jsonDef), &m); err != nil {
|
|
return nil, err
|
|
}
|
|
classMap, err := MapAtMapKey(m, code)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
producerMap, err := MapAtMapKey(classMap, "kafka_producer")
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
consumerMap, err := MapAtMapKey(classMap, "kafka_consumer")
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
for k, v := range consumerMap {
|
|
producerMap[k] = v
|
|
}
|
|
return mapFromJSON(producerMap)
|
|
}
|
|
|
|
// MapAtMapKey extracts a key from a map and asserts it is a map. If
|
|
// not, it returns an empty map.
|
|
func MapAtMapKey(m map[string]interface{}, key string) (map[string]interface{}, error) {
|
|
valueInterface, ok := m[key]
|
|
if !ok {
|
|
return map[string]interface{}{}, ErrExpectedJSONObject
|
|
}
|
|
valueMap, ok := valueInterface.(map[string]interface{})
|
|
if !ok {
|
|
return map[string]interface{}{}, ErrExpectedJSONObject
|
|
}
|
|
return valueMap, nil
|
|
}
|
|
|
|
// mapFromJSON converts a json.Unmarshal() map to a map[string]string.
|
|
func mapFromJSON(input map[string]interface{}) (map[string]string, error) {
|
|
output := make(map[string]string)
|
|
for k, v := range input {
|
|
switch v.(type) {
|
|
case float64:
|
|
output[k] = strconv.FormatInt(int64(v.(float64)), 10)
|
|
case bool:
|
|
output[k] = strconv.FormatBool(v.(bool))
|
|
case []interface{}:
|
|
return nil, ErrUnexpectedTCJSONArray
|
|
case map[string]interface{}:
|
|
return nil, ErrUnexpectedTCJSONObject
|
|
case string:
|
|
output[k] = v.(string)
|
|
case nil:
|
|
default:
|
|
return nil, ErrUnexpectedTCJSONType
|
|
}
|
|
}
|
|
return output, nil
|
|
}
|
|
|
|
var topicClassDefinitionJSON = `
|
|
{
|
|
"A": {
|
|
"kafka_broker": {
|
|
"max.message.bytes": 1048576,
|
|
"retention.bytes": -1,
|
|
"retention.ms": 604800000,
|
|
"segment.ms": 604800000,
|
|
"min.insync.replicas": 1,
|
|
"cleanup.policy": "delete"
|
|
},
|
|
"kafka_producer": {
|
|
"max.request.size": 1048576,
|
|
"buffer.memory": 33554432,
|
|
"batch.size": 16384,
|
|
"linger.ms": 0
|
|
},
|
|
"kafka_consumer": {
|
|
"fetch.max.bytes": 54525952
|
|
}
|
|
},
|
|
"B": {
|
|
"kafka_broker": {
|
|
"max.message.bytes": 16809984,
|
|
"retention.bytes": -1,
|
|
"retention.ms": 86400000,
|
|
"segment.ms": 7200000,
|
|
"min.insync.replicas": 3,
|
|
"cleanup.policy": "delete"
|
|
},
|
|
"kafka_producer": {
|
|
"max.request.size": 16777216,
|
|
"buffer.memory": 33554432,
|
|
"batch.size": 16384,
|
|
"linger.ms": 0
|
|
},
|
|
"kafka_consumer": {
|
|
"fetch.max.bytes": 104857600
|
|
}
|
|
},
|
|
"C": {
|
|
"kafka_broker": {
|
|
"max.message.bytes": 4227072,
|
|
"retention.bytes": -1,
|
|
"retention.ms": -1,
|
|
"segment.ms": 31536000000,
|
|
"min.insync.replicas": 1,
|
|
"cleanup.policy": "compact"
|
|
},
|
|
"kafka_producer": {
|
|
"max.request.size": 4194304,
|
|
"buffer.memory": 33554432,
|
|
"batch.size": 16384,
|
|
"linger.ms": 0
|
|
},
|
|
"kafka_consumer": {
|
|
"fetch.max.bytes": 54525952
|
|
}
|
|
}
|
|
}
|
|
`
|
|
|
|
func consumerClassDefinitionJSON() string {
|
|
return `
|
|
[
|
|
{
|
|
"class": "BulkConsumer",
|
|
"max.lag.ms":5000,
|
|
"bootstrap.servers": "core-kafka.service.consul:9092",
|
|
"max.partition.fetch.bytes": 20971520,
|
|
"enable.auto.commit": false,
|
|
"max.poll.records": 100,
|
|
"qmp.record.retries": 5,
|
|
"qmp.poll.timeout": 100,
|
|
"qmp.sleep.time": 1000,
|
|
"auto.offset.reset": "earliest"
|
|
},
|
|
{
|
|
"class": "BackgroundJobConsumer",
|
|
"max.lag.ms": 900000,
|
|
"bootstrap.servers": "core-kafka.service.consul:9092",
|
|
"max.partition.fetch.bytes": 20971520,
|
|
"enable.auto.commit": false,
|
|
"max.poll.records": 1,
|
|
"qmp.record.retries": 5,
|
|
"qmp.poll.timeout": 100,
|
|
"qmp.sleep.time": 1000,
|
|
"auto.offset.reset": "latest"
|
|
},
|
|
{
|
|
"class": "RecordStateConsumer",
|
|
"bootstrap.servers": "core-kafka.service.consul:9092",
|
|
"max.poll.records":100,
|
|
"auto.offset.reset": "earliest",
|
|
"enable.auto.commit": false
|
|
},
|
|
{
|
|
"class": "IdempotentBackgroundJobConsumer",
|
|
"max.partition.fetch.bytes": 20971520,
|
|
"enable.auto.commit": false,
|
|
"max.poll.records": 1,
|
|
"auto.offset.reset": "earliest",
|
|
"qmp.record.reties": 5,
|
|
"qmp.poll.timeout": 100,
|
|
"qmp.sleep.time": 1000
|
|
}
|
|
]
|
|
`
|
|
}
|
|
func producerClassDefinitionJSON() string {
|
|
return `
|
|
[
|
|
{
|
|
"class": "DurableProducer",
|
|
"bootstrap.servers": "core-kafka.service.consul:9092",
|
|
"batch.size": 16384,
|
|
"linger.ms": 0,
|
|
"retries": 3,
|
|
"max.in.flight.requests.per.connection": 1,
|
|
"max.request.size": 16809984,
|
|
"acks":"all"
|
|
},
|
|
{
|
|
"class": "BulkProducer",
|
|
"bootstrap.servers": "core-kafka.service.consul:9092",
|
|
"batch.size": 16384,
|
|
"linger.ms": 1000,
|
|
"retries": 1,
|
|
"max.in.flight.requests.per.connection": 100,
|
|
"acks":1
|
|
}
|
|
]
|
|
`
|
|
}
|