// Package config provides configuration instances according // to QPCL specifications. package config import ( "fmt" "strconv" "strings" "time" "github.com/Shopify/sarama" "github.com/bsm/sarama-cluster" "github.com/rcrowley/go-metrics" ) // ProducerConsumerParams is the bare necessities required // by both producer and consumer configurations. type ProducerConsumerParams struct { Brokers []string ClientID string Topic []string Group string } // GetMandatoryParams asserts the bare necessity items exist in the // configuration map to create either a producer or a consumer. func GetMandatoryParams(configuration map[string]string, topic []string, group string) (*ProducerConsumerParams, error) { fill := &ProducerConsumerParams{} brokersString, ok := configuration[BootstrapServers] if !ok { return nil, ErrMissingServers } if brokersString == "" { return nil, ErrMissingServers } fill.Brokers = strings.Split(brokersString, ",") fill.ClientID, ok = configuration[ClientID] if !ok { return nil, ErrMissingClientID } if fill.ClientID == "" { return nil, ErrMissingClientID } if len(topic) == 0 { return nil, ErrInvalidTopic } l := len(topic) for i := 0; i < l; i++ { if len(topic[i]) == 0 { l-- topic[i] = topic[l] topic = topic[:l] i-- } } if len(topic) == 0 { return nil, ErrInvalidTopic } fill.Topic = topic if group == "" { return nil, ErrInvalidGroup } if g, ok := configuration[GroupID]; ok && group != g { return nil, ErrConflictingGroup } fill.Group = group return fill, nil } // able to set with cluster const ( KeySerializerClass = "key.serializer" ValueSerializerClass = "value.serializer" ClientID = "client.id" Retries = "retries" BatchSize = "batch.size" LingerMS = "linger.ms" FetchMaxBytes = "fetch.max.bytes" AutoOffsetReset = "auto.offset.reset" Acks = "acks" BufferMemory = "buffer.memory" CompressionType = "compression.type" CompressionLevel = "compression.level" MaxInFlightRequstsPerConnection = "max.in.flight.requests.per.connection" MaxPartitionFetchBytes = "max.partition.fetch.bytes" EnableAutoCommit = "enable.auto.commit" MaxRequestSize = "max.request.size" SegmentMS = "segment.ms" // these mean nothing in go QMReaderKey = "qmreaderkey" PartitionerClass = "partitioner.class" InterceptorClasses = "interceptor.classes" KeySerializer = "key.serializer" ValueSerializer = "value.serializer" KeyDeserializer = "key.deserializer" ValueDeserializer = "value.deserializer" QMPPollTimeout = "qmp.poll.timeout" QMPSleepTime = "qmp.sleep.time" QMPRecordRetries = "qmp.record.reties" QMPRecordRetriesFuture = "qmp.record.retries" CleanupPolicy = "cleanup.policy" RetentionMS = "retention.ms" RetentionBytes = "retention.bytes" MinInsyncReplicas = "min.insync.replicas" ) // able to set indirectly with cluster or on NewQConsumer/Producer const ( MaxMessageBytes = "max.message.bytes" BootstrapServers = "bootstrap.servers" GroupID = "group.id" QMPThreads = "qmp.threads" MaxLagMS = "max.lag.ms" MaxPollRecords = "max.poll.records" PutsPerSecond = "rate.limit" ) const maxRequestSizeDefault = "16777216" // ToSaramaConfig converts a map of configuration options from // strings to an initialized sarama.Config object. func ToSaramaConfig(srcMap map[string]string) (*sarama.Config, error) { metrics.UseNilMetrics = true m := make(map[string]string) for k, v := range srcMap { m[k] = v } config := sarama.NewConfig() if v, ok := m[MaxMessageBytes]; ok { m[MaxRequestSize] = v } if _, ok := m[MaxRequestSize]; !ok { m[MaxRequestSize] = maxRequestSizeDefault } if err := tryAssignInt(&config.Producer.MaxMessageBytes, m[MaxRequestSize], MaxRequestSize); err != nil { return nil, err } for k, v := range m { if !skipProp(k) { if err := initProp(config, k, v); err != nil { return nil, err } } } config.Version = sarama.V1_0_0_0 if err := config.Validate(); err != nil { return nil, err } return config, nil } // skipProp returns 'true' if the configuration option is // handled outside of the sarama.Config/cluster.Config struct. func skipProp(k string) bool { switch k { case BootstrapServers, GroupID, MaxMessageBytes, QMPThreads, MaxRequestSize, MaxLagMS, QMPSleepTime, CleanupPolicy, MinInsyncReplicas, RetentionMS, RetentionBytes, PartitionerClass, InterceptorClasses, KeySerializer, SegmentMS, KeyDeserializer, ValueSerializer, QMPRecordRetries, QMPRecordRetriesFuture, QMReaderKey, ValueDeserializer: return true } return false } // ToClusterConfig converts a map of configuration options from // strings to an initialized cluster.Config object. func ToClusterConfig(m map[string]string) (*cluster.Config, error) { config, err := ToSaramaConfig(m) if err != nil { return nil, err } clusterConfig := cluster.NewConfig() clusterConfig.Config = *config if err := clusterConfig.Validate(); err != nil { return nil, err } return clusterConfig, nil } func initProp(config *sarama.Config, fieldname string, value string) error { switch fieldname { case BufferMemory: v, err := strconv.Atoi(value) if err != nil { return badFieldValue(fieldname, "int") } config.ChannelBufferSize = v / int(config.Producer.MaxMessageBytes) return nil case Acks: if value == "all" { value = "-1" } v, err := strconv.ParseInt(value, 10, 16) if err != nil { return badFieldValue(fieldname, "int16") } config.Producer.RequiredAcks = sarama.RequiredAcks(v) return nil case FetchMaxBytes, MaxPartitionFetchBytes: return tryAssignInt32(&config.Consumer.Fetch.Max, value, fieldname) case ClientID: config.ClientID = value return nil case Retries: return tryAssignInt(&config.Producer.Retry.Max, value, fieldname) case BatchSize: return tryAssignInt(&config.Producer.Flush.MaxMessages, value, fieldname) case LingerMS: if err := tryAssignDuration(&config.Producer.Flush.Frequency, value, fieldname); err != nil { v, err := strconv.ParseInt(value, 10, 64) if err != nil { return badFieldValue(fieldname, "int64 or time.Duration") } config.Producer.Flush.Frequency = time.Duration(v) * time.Millisecond return nil } return nil case AutoOffsetReset: switch value { case "earliest": config.Consumer.Offsets.Initial = sarama.OffsetNewest return nil case "latest": config.Consumer.Offsets.Initial = sarama.OffsetOldest return nil } return badFieldValue(fieldname, "earliest or latest") case CompressionType: switch value { case "uncompressed", sarama.CompressionCodec(int8(0)).String(): config.Producer.Compression = sarama.CompressionCodec(int8(0)) return nil case "gzip", sarama.CompressionCodec(int8(1)).String(): config.Producer.Compression = sarama.CompressionCodec(int8(1)) return nil case "snappy", sarama.CompressionCodec(int8(2)).String(): config.Producer.Compression = sarama.CompressionCodec(int8(2)) return nil case "lz4", sarama.CompressionCodec(int8(3)).String(): config.Producer.Compression = sarama.CompressionCodec(int8(3)) return nil case "producer": return nil } v, err := strconv.ParseInt(value, 10, 8) if err != nil { return badFieldValue(fieldname, "int8") } config.Producer.Compression = sarama.CompressionCodec(int8(v)) return nil case CompressionLevel: return tryAssignInt(&config.Producer.CompressionLevel, value, fieldname) case MaxInFlightRequstsPerConnection: return tryAssignInt(&config.Net.MaxOpenRequests, value, fieldname) case EnableAutoCommit: switch value { case "true": return nil case "false": // can't disable, so set to 280 years before commit return tryAssignDuration(&config.Consumer.Offsets.AutoCommit.Interval, "2452800h", fieldname) } return badFieldValue(fieldname, "bool") } return nil } func badFieldValue(fieldname string, fieldtype string) error { return fmt.Errorf("kafka configuration key %q must have a %q value", fieldname, fieldtype) } func tryAssignDuration(target *time.Duration, potential string, key string) error { v, err := time.ParseDuration(potential) if err == nil { *target = v return nil } i, err := strconv.ParseInt(potential, 10, 64) if err == nil { *target = time.Duration(i) * time.Millisecond return nil } return badFieldValue(key, "time.Duration") } func tryAssignInt32(target *int32, potential string, key string) error { v, err := strconv.ParseInt(potential, 10, 32) if err != nil { return badFieldValue(key, "int32") } *target = int32(v) return nil } func tryAssignInt(target *int, potential string, key string) error { v, err := strconv.Atoi(potential) if err != nil { return badFieldValue(key, "int") } *target = v return nil }