Skip to content

Commit 5d2dac0

Browse files
authored
Create a goroutine worker pool to send data from distributors to ingesters. (#6406)
* Creating a worker pool to be used on distributors Signed-off-by: alanprot <[email protected]> * metric + test Signed-off-by: alanprot <[email protected]> * Changelog Signed-off-by: alanprot <[email protected]> --------- Signed-off-by: alanprot <[email protected]>
1 parent fb783a7 commit 5d2dac0

File tree

9 files changed

+234
-14
lines changed

9 files changed

+234
-14
lines changed

Diff for: CHANGELOG.md

+2-1
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,8 @@
4545
* [ENHANCEMENT] Distributor: Expose `cortex_label_size_bytes` native histogram metric. #6372
4646
* [ENHANCEMENT] Add new option `-server.grpc_server-num-stream-workers` to configure the number of worker goroutines that should be used to process incoming streams. #6386
4747
* [ENHANCEMENT] Distributor: Return HTTP 5XX instead of HTTP 4XX when instance limits are hit. #6358
48-
* [ENHANCEMENT] Ingester: Make sure unregistered ingester joining the ring after WAL replay #6277
48+
* [ENHANCEMENT] Ingester: Add a new `-distributor.num-push-workers` flag to use a goroutine worker pool when sending data from distributor to ingesters. #6406
49+
* [ENHANCEMENT] Distributor: Create a goroutine worker pool to send data from distributors to ingesters.
4950
* [BUGFIX] Runtime-config: Handle absolute file paths when working directory is not / #6224
5051
* [BUGFIX] Ruler: Allow rule evaluation to complete during shutdown. #6326
5152
* [BUGFIX] Ring: update ring with new ip address when instance is lost, rejoins, but heartbeat is disabled #6271

Diff for: docs/configuration/config-file-reference.md

+7
Original file line numberDiff line numberDiff line change
@@ -2692,6 +2692,13 @@ ring:
26922692
# CLI flag: -distributor.ring.instance-interface-names
26932693
[instance_interface_names: <list of string> | default = [eth0 en0]]
26942694
2695+
# EXPERIMENTAL: Number of go routines to handle push calls from distributors to
2696+
# ingesters. When no workers are available, a new goroutine will be spawned
2697+
# automatically. If set to 0 (default), workers are disabled, and a new
2698+
# goroutine will be created for each push request.
2699+
# CLI flag: -distributor.num-push-workers
2700+
[num_push_workers: <int> | default = 0]
2701+
26952702
instance_limits:
26962703
# Max ingestion rate (samples/sec) that this distributor will accept. This
26972704
# limit is per-distributor, not per-tenant. Additional push requests will be

Diff for: pkg/alertmanager/distributor.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,7 @@ func (d *Distributor) doQuorum(userID string, w http.ResponseWriter, r *http.Req
161161
var responses []*httpgrpc.HTTPResponse
162162
var responsesMtx sync.Mutex
163163
grpcHeaders := httpToHttpgrpcHeaders(r.Header)
164-
err = ring.DoBatch(r.Context(), RingOp, d.alertmanagerRing, []uint32{shardByUser(userID)}, func(am ring.InstanceDesc, _ []int) error {
164+
err = ring.DoBatch(r.Context(), RingOp, d.alertmanagerRing, nil, []uint32{shardByUser(userID)}, func(am ring.InstanceDesc, _ []int) error {
165165
// Use a background context to make sure all alertmanagers get the request even if we return early.
166166
localCtx := opentracing.ContextWithSpan(user.InjectOrgID(context.Background(), userID), opentracing.SpanFromContext(r.Context()))
167167
sp, localCtx := opentracing.StartSpanFromContext(localCtx, "Distributor.doQuorum")

Diff for: pkg/alertmanager/multitenant.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -1099,7 +1099,7 @@ func (am *MultitenantAlertmanager) ReplicateStateForUser(ctx context.Context, us
10991099
level.Debug(am.logger).Log("msg", "message received for replication", "user", userID, "key", part.Key)
11001100

11011101
selfAddress := am.ringLifecycler.GetInstanceAddr()
1102-
err := ring.DoBatch(ctx, RingOp, am.ring, []uint32{shardByUser(userID)}, func(desc ring.InstanceDesc, _ []int) error {
1102+
err := ring.DoBatch(ctx, RingOp, am.ring, nil, []uint32{shardByUser(userID)}, func(desc ring.InstanceDesc, _ []int) error {
11031103
if desc.GetAddr() == selfAddress {
11041104
return nil
11051105
}

Diff for: pkg/distributor/distributor.go

+15-1
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,8 @@ type Distributor struct {
123123
latestSeenSampleTimestampPerUser *prometheus.GaugeVec
124124

125125
validateMetrics *validation.ValidateMetrics
126+
127+
asyncExecutor util.AsyncExecutor
126128
}
127129

128130
// Config contains the configuration required to
@@ -160,6 +162,11 @@ type Config struct {
160162
// from quorum number of zones will be included to reduce data merged and improve performance.
161163
ZoneResultsQuorumMetadata bool `yaml:"zone_results_quorum_metadata" doc:"hidden"`
162164

165+
// Number of go routines to handle push calls from distributors to ingesters.
166+
// If set to 0 (default), workers are disabled, and a new goroutine will be created for each push request.
167+
// When no workers are available, a new goroutine will be spawned automatically.
168+
NumPushWorkers int `yaml:"num_push_workers"`
169+
163170
// Limits for distributor
164171
InstanceLimits InstanceLimits `yaml:"instance_limits"`
165172

@@ -193,6 +200,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
193200
f.StringVar(&cfg.ShardingStrategy, "distributor.sharding-strategy", util.ShardingStrategyDefault, fmt.Sprintf("The sharding strategy to use. Supported values are: %s.", strings.Join(supportedShardingStrategies, ", ")))
194201
f.BoolVar(&cfg.ExtendWrites, "distributor.extend-writes", true, "Try writing to an additional ingester in the presence of an ingester not in the ACTIVE state. It is useful to disable this along with -ingester.unregister-on-shutdown=false in order to not spread samples to extra ingesters during rolling restarts with consistent naming.")
195202
f.BoolVar(&cfg.ZoneResultsQuorumMetadata, "distributor.zone-results-quorum-metadata", false, "Experimental, this flag may change in the future. If zone awareness and this both enabled, when querying metadata APIs (labels names and values for now), only results from quorum number of zones will be included.")
203+
f.IntVar(&cfg.NumPushWorkers, "distributor.num-push-workers", 0, "EXPERIMENTAL: Number of go routines to handle push calls from distributors to ingesters. When no workers are available, a new goroutine will be spawned automatically. If set to 0 (default), workers are disabled, and a new goroutine will be created for each push request.")
196204

197205
f.Float64Var(&cfg.InstanceLimits.MaxIngestionRate, "distributor.instance-limits.max-ingestion-rate", 0, "Max ingestion rate (samples/sec) that this distributor will accept. This limit is per-distributor, not per-tenant. Additional push requests will be rejected. Current ingestion rate is computed as exponentially weighted moving average, updated every second. 0 = unlimited.")
198206
f.IntVar(&cfg.InstanceLimits.MaxInflightPushRequests, "distributor.instance-limits.max-inflight-push-requests", 0, "Max inflight push requests that this distributor can handle. This limit is per-distributor, not per-tenant. Additional requests will be rejected. 0 = unlimited.")
@@ -366,6 +374,12 @@ func New(cfg Config, clientConfig ingester_client.Config, limits *validation.Ove
366374
}, []string{"user"}),
367375

368376
validateMetrics: validation.NewValidateMetrics(reg),
377+
asyncExecutor: util.NewNoOpExecutor(),
378+
}
379+
380+
if cfg.NumPushWorkers > 0 {
381+
util_log.WarnExperimentalUse("Distributor: using goroutine worker pool")
382+
d.asyncExecutor = util.NewWorkerPool("distributor", cfg.NumPushWorkers, reg)
369383
}
370384

371385
promauto.With(reg).NewGauge(prometheus.GaugeOpts{
@@ -823,7 +837,7 @@ func (d *Distributor) doBatch(ctx context.Context, req *cortexpb.WriteRequest, s
823837
op = ring.Write
824838
}
825839

826-
return ring.DoBatch(ctx, op, subRing, keys, func(ingester ring.InstanceDesc, indexes []int) error {
840+
return ring.DoBatch(ctx, op, subRing, d.asyncExecutor, keys, func(ingester ring.InstanceDesc, indexes []int) error {
827841
timeseries := make([]cortexpb.PreallocTimeseries, 0, len(indexes))
828842
var metadata []*cortexpb.MetricMetadata
829843

Diff for: pkg/ring/batch.go

+12-3
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,14 @@ import (
88
"go.uber.org/atomic"
99
"google.golang.org/grpc/status"
1010

11+
"github.com/cortexproject/cortex/pkg/util"
1112
"github.com/cortexproject/cortex/pkg/util/httpgrpcutil"
1213
)
1314

15+
var (
16+
noOpExecutor = util.NewNoOpExecutor()
17+
)
18+
1419
type batchTracker struct {
1520
rpcsPending atomic.Int32
1621
rpcsFailed atomic.Int32
@@ -66,12 +71,16 @@ func (i *itemTracker) getError() error {
6671
// cleanup() is always called, either on an error before starting the batches or after they all finish.
6772
//
6873
// Not implemented as a method on Ring so we can test separately.
69-
func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callback func(InstanceDesc, []int) error, cleanup func()) error {
74+
func DoBatch(ctx context.Context, op Operation, r ReadRing, e util.AsyncExecutor, keys []uint32, callback func(InstanceDesc, []int) error, cleanup func()) error {
7075
if r.InstancesCount() <= 0 {
7176
cleanup()
7277
return fmt.Errorf("DoBatch: InstancesCount <= 0")
7378
}
7479

80+
if e == nil {
81+
e = noOpExecutor
82+
}
83+
7584
expectedTrackers := len(keys) * (r.ReplicationFactor() + 1) / r.InstancesCount()
7685
itemTrackers := make([]itemTracker, len(keys))
7786
instances := make(map[string]instance, r.InstancesCount())
@@ -115,11 +124,11 @@ func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callb
115124

116125
wg.Add(len(instances))
117126
for _, i := range instances {
118-
go func(i instance) {
127+
e.Submit(func() {
119128
err := callback(i.desc, i.indexes)
120129
tracker.record(i, err)
121130
wg.Done()
122-
}(i)
131+
})
123132
}
124133

125134
// Perform cleanup at the end.

Diff for: pkg/ring/ring_test.go

+24-7
Original file line numberDiff line numberDiff line change
@@ -73,12 +73,29 @@ func benchmarkBatch(b *testing.B, g TokenGenerator, numInstances, numKeys int) {
7373
}
7474
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
7575
keys := make([]uint32, numKeys)
76-
// Generate a batch of N random keys, and look them up
77-
b.ResetTimer()
78-
for i := 0; i < b.N; i++ {
79-
generateKeys(rnd, numKeys, keys)
80-
err := DoBatch(ctx, Write, &r, keys, callback, cleanup)
81-
require.NoError(b, err)
76+
77+
tc := map[string]struct {
78+
exe util.AsyncExecutor
79+
}{
80+
"noOpExecutor": {
81+
exe: noOpExecutor,
82+
},
83+
"workerExecutor": {
84+
exe: util.NewWorkerPool("test", 100, prometheus.NewPedanticRegistry()),
85+
},
86+
}
87+
88+
for n, c := range tc {
89+
b.Run(n, func(b *testing.B) {
90+
// Generate a batch of N random keys, and look them up
91+
b.ResetTimer()
92+
b.ReportAllocs()
93+
for i := 0; i < b.N; i++ {
94+
generateKeys(rnd, numKeys, keys)
95+
err := DoBatch(ctx, Write, &r, c.exe, keys, callback, cleanup)
96+
require.NoError(b, err)
97+
}
98+
})
8299
}
83100
}
84101

@@ -167,7 +184,7 @@ func TestDoBatchZeroInstances(t *testing.T) {
167184
ringDesc: desc,
168185
strategy: NewDefaultReplicationStrategy(),
169186
}
170-
require.Error(t, DoBatch(ctx, Write, &r, keys, callback, cleanup))
187+
require.Error(t, DoBatch(ctx, Write, &r, nil, keys, callback, cleanup))
171188
}
172189

173190
func TestAddIngester(t *testing.T) {

Diff for: pkg/util/worker_pool.go

+85
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,85 @@
1+
package util
2+
3+
import (
4+
"sync"
5+
6+
"github.com/prometheus/client_golang/prometheus"
7+
"github.com/prometheus/client_golang/prometheus/promauto"
8+
)
9+
10+
// This code was based on: https://github.com/grpc/grpc-go/blob/66ba4b264d26808cb7af3c86eee66e843472915e/server.go
11+
12+
// serverWorkerResetThreshold defines how often the stack must be reset. Every
13+
// N requests, by spawning a new goroutine in its place, a worker can reset its
14+
// stack so that large stacks don't live in memory forever. 2^16 should allow
15+
// each goroutine stack to live for at least a few seconds in a typical
16+
// workload (assuming a QPS of a few thousand requests/sec).
17+
const serverWorkerResetThreshold = 1 << 16
18+
19+
type AsyncExecutor interface {
20+
Submit(f func())
21+
Stop()
22+
}
23+
24+
type noOpExecutor struct{}
25+
26+
func (n noOpExecutor) Stop() {}
27+
28+
func NewNoOpExecutor() AsyncExecutor {
29+
return &noOpExecutor{}
30+
}
31+
32+
func (n noOpExecutor) Submit(f func()) {
33+
go f()
34+
}
35+
36+
type workerPoolExecutor struct {
37+
serverWorkerChannel chan func()
38+
closeOnce sync.Once
39+
40+
fallbackTotal prometheus.Counter
41+
}
42+
43+
func NewWorkerPool(name string, numWorkers int, reg prometheus.Registerer) AsyncExecutor {
44+
wp := &workerPoolExecutor{
45+
serverWorkerChannel: make(chan func()),
46+
fallbackTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
47+
Namespace: "cortex",
48+
Name: "worker_pool_fallback_total",
49+
Help: "The total number additional go routines that needed to be created to run jobs.",
50+
ConstLabels: prometheus.Labels{"name": name},
51+
}),
52+
}
53+
54+
for i := 0; i < numWorkers; i++ {
55+
go wp.run()
56+
}
57+
58+
return wp
59+
}
60+
61+
func (s *workerPoolExecutor) Stop() {
62+
s.closeOnce.Do(func() {
63+
close(s.serverWorkerChannel)
64+
})
65+
}
66+
67+
func (s *workerPoolExecutor) Submit(f func()) {
68+
select {
69+
case s.serverWorkerChannel <- f:
70+
default:
71+
s.fallbackTotal.Inc()
72+
go f()
73+
}
74+
}
75+
76+
func (s *workerPoolExecutor) run() {
77+
for completed := 0; completed < serverWorkerResetThreshold; completed++ {
78+
f, ok := <-s.serverWorkerChannel
79+
if !ok {
80+
return
81+
}
82+
f()
83+
}
84+
go s.run()
85+
}

Diff for: pkg/util/worker_pool_test.go

+87
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
package util
2+
3+
import (
4+
"bytes"
5+
"sync"
6+
"testing"
7+
8+
"github.com/prometheus/client_golang/prometheus"
9+
"github.com/prometheus/client_golang/prometheus/testutil"
10+
"github.com/stretchr/testify/require"
11+
)
12+
13+
func TestNewWorkerPool_CreateMultiplesPoolsWithSameRegistry(t *testing.T) {
14+
reg := prometheus.NewPedanticRegistry()
15+
wp1 := NewWorkerPool("test1", 100, reg)
16+
defer wp1.Stop()
17+
wp2 := NewWorkerPool("test2", 100, reg)
18+
defer wp2.Stop()
19+
}
20+
21+
func TestWorkerPool_TestMetric(t *testing.T) {
22+
reg := prometheus.NewPedanticRegistry()
23+
workerPool := NewWorkerPool("test1", 1, reg)
24+
defer workerPool.Stop()
25+
26+
require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(`
27+
# HELP cortex_worker_pool_fallback_total The total number additional go routines that needed to be created to run jobs.
28+
# TYPE cortex_worker_pool_fallback_total counter
29+
cortex_worker_pool_fallback_total{name="test1"} 0
30+
`), "cortex_worker_pool_fallback_total"))
31+
32+
wg := &sync.WaitGroup{}
33+
wg.Add(1)
34+
35+
// Block the first job
36+
workerPool.Submit(func() {
37+
wg.Wait()
38+
})
39+
40+
// create an extra job to increment the metric
41+
workerPool.Submit(func() {})
42+
require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(`
43+
# HELP cortex_worker_pool_fallback_total The total number additional go routines that needed to be created to run jobs.
44+
# TYPE cortex_worker_pool_fallback_total counter
45+
cortex_worker_pool_fallback_total{name="test1"} 1
46+
`), "cortex_worker_pool_fallback_total"))
47+
48+
wg.Done()
49+
}
50+
51+
func TestWorkerPool_ShouldFallbackWhenAllWorkersAreBusy(t *testing.T) {
52+
reg := prometheus.NewPedanticRegistry()
53+
numberOfWorkers := 10
54+
workerPool := NewWorkerPool("test1", numberOfWorkers, reg)
55+
defer workerPool.Stop()
56+
57+
m := sync.Mutex{}
58+
blockerWg := sync.WaitGroup{}
59+
blockerWg.Add(numberOfWorkers)
60+
61+
// Lets lock all submited jobs
62+
m.Lock()
63+
64+
for i := 0; i < numberOfWorkers; i++ {
65+
workerPool.Submit(func() {
66+
defer blockerWg.Done()
67+
m.Lock()
68+
m.Unlock() //nolint:staticcheck
69+
})
70+
}
71+
72+
// At this point all workers should be busy. lets try to create a new job
73+
wg := sync.WaitGroup{}
74+
wg.Add(1)
75+
workerPool.Submit(func() {
76+
defer wg.Done()
77+
})
78+
79+
// Make sure the last job ran to the end
80+
wg.Wait()
81+
82+
// Lets release the jobs
83+
m.Unlock()
84+
85+
blockerWg.Wait()
86+
87+
}

0 commit comments

Comments
 (0)