diff --git a/go.mod b/go.mod index 0af5337b0..1fdea8f78 100644 --- a/go.mod +++ b/go.mod @@ -21,6 +21,7 @@ require ( k8s.io/apimachinery v0.31.4 k8s.io/client-go v0.31.4 k8s.io/code-generator v0.31.4 + k8s.io/component-base v0.31.4 k8s.io/klog/v2 v2.130.1 sigs.k8s.io/controller-runtime v0.19.3 sigs.k8s.io/structured-merge-diff/v4 v4.5.0 @@ -35,6 +36,7 @@ require ( github.com/Masterminds/sprig/v3 v3.2.3 // indirect github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect github.com/bufbuild/protocompile v0.14.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect @@ -63,6 +65,7 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.17.9 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mitchellh/copystructure v1.0.0 // indirect github.com/mitchellh/reflectwalk v1.0.1 // indirect diff --git a/go.sum b/go.sum index ae47e0d47..0b4ac1ed7 100644 --- a/go.sum +++ b/go.sum @@ -15,6 +15,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafo github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bojand/ghz v0.120.0 h1:6F4wsmZVwFg5UnD+/R+IABWk6sKE/0OKIBdUQUZnOdo= github.com/bojand/ghz v0.120.0/go.mod h1:HfECuBZj1v02XObGnRuoZgyB1PR24/25dIYiJIMjJnE= github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw= @@ -278,6 +280,8 @@ k8s.io/client-go v0.31.4 h1:t4QEXt4jgHIkKKlx06+W3+1JOwAFU/2OPiOo7H92eRQ= k8s.io/client-go v0.31.4/go.mod h1:kvuMro4sFYIa8sulL5Gi5GFqUPvfH2O/dXuKstbaaeg= k8s.io/code-generator v0.31.4 h1:Vu+8fKz+239rKiVDHFVHgjQ162cg5iUQPtTyQbwXeQw= k8s.io/code-generator v0.31.4/go.mod h1:yMDt13Kn7m4MMZ4LxB1KBzdZjEyxzdT4b4qXq+lnI90= +k8s.io/component-base v0.31.4 h1:wCquJh4ul9O8nNBSB8N/o8+gbfu3BVQkVw9jAUY/Qtw= +k8s.io/component-base v0.31.4/go.mod h1:G4dgtf5BccwiDT9DdejK0qM6zTK0jwDGEKnCmb9+u/s= k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo= k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= diff --git a/pkg/ext-proc/handlers/request.go b/pkg/ext-proc/handlers/request.go index 16c3f4f02..abb03c72b 100644 --- a/pkg/ext-proc/handlers/request.go +++ b/pkg/ext-proc/handlers/request.go @@ -5,10 +5,12 @@ import ( "errors" "fmt" "strconv" + "time" configPb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" extProcPb "github.com/envoyproxy/go-control-plane/envoy/service/ext_proc/v3" "inference.networking.x-k8s.io/gateway-api-inference-extension/pkg/ext-proc/backend" + "inference.networking.x-k8s.io/gateway-api-inference-extension/pkg/ext-proc/metrics" "inference.networking.x-k8s.io/gateway-api-inference-extension/pkg/ext-proc/scheduling" klog "k8s.io/klog/v2" ) @@ -18,6 +20,7 @@ import ( // Envoy sends the request body to ext proc before sending the request to the backend server. func (s *Server) HandleRequestBody(reqCtx *RequestContext, req *extProcPb.ProcessingRequest) (*extProcPb.ProcessingResponse, error) { klog.V(3).Infof("Handling request body") + requestReceivedTimestamp := time.Now() // Unmarshal request body (must be JSON). v := req.Request.(*extProcPb.ProcessingRequest_RequestBody) @@ -116,6 +119,7 @@ func (s *Server) HandleRequestBody(reqCtx *RequestContext, req *extProcPb.Proces }, }, } + metrics.MonitorRequest(llmReq.Model, llmReq.ResolvedTargetModel, len(v.RequestBody.Body), time.Since(requestReceivedTimestamp)) return resp, nil } diff --git a/pkg/ext-proc/main.go b/pkg/ext-proc/main.go index e8a416675..d47a6748c 100644 --- a/pkg/ext-proc/main.go +++ b/pkg/ext-proc/main.go @@ -19,6 +19,7 @@ import ( "inference.networking.x-k8s.io/gateway-api-inference-extension/pkg/ext-proc/backend" "inference.networking.x-k8s.io/gateway-api-inference-extension/pkg/ext-proc/backend/vllm" "inference.networking.x-k8s.io/gateway-api-inference-extension/pkg/ext-proc/handlers" + "inference.networking.x-k8s.io/gateway-api-inference-extension/pkg/ext-proc/metrics" "inference.networking.x-k8s.io/gateway-api-inference-extension/pkg/ext-proc/scheduling" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -33,6 +34,8 @@ var ( "port", 9002, "gRPC port") + metricsPort = flag.Int( + "metricsPort", 9090, "metrics port") targetPodHeader = flag.String( "targetPodHeader", "target-pod", @@ -104,6 +107,8 @@ func main() { klog.Fatalf("failed to listen: %v", err) } + metrics.Register() + go metrics.StartMetricsHandler(*metricsPort) datastore := backend.NewK8sDataStore() mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ diff --git a/pkg/ext-proc/metrics/metrics.go b/pkg/ext-proc/metrics/metrics.go new file mode 100644 index 000000000..4ed823a64 --- /dev/null +++ b/pkg/ext-proc/metrics/metrics.go @@ -0,0 +1,72 @@ +package metrics + +import ( + "sync" + "time" + + compbasemetrics "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +const ( + InferenceModelComponent = "inference_model" +) + +var ( + requestCounter = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Subsystem: InferenceModelComponent, + Name: "request_total", + Help: "Counter of inference model requests broken out for each model and target model.", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"model_name", "target_model_name"}, + ) + + requestLatencies = compbasemetrics.NewHistogramVec( + &compbasemetrics.HistogramOpts{ + Subsystem: InferenceModelComponent, + Name: "request_duration_seconds", + Help: "Inference model response latency distribution in seconds for each model and target model.", + Buckets: []float64{0.005, 0.025, 0.05, 0.1, 0.2, 0.4, 0.6, 0.8, 1.0, 1.25, 1.5, 2, 3, + 4, 5, 6, 8, 10, 15, 20, 30, 45, 60, 120, 180, 240, 300, 360, 480, 600, 900, 1200, 1800, 2700, 3600}, + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"model_name", "target_model_name"}, + ) + + requestSizes = compbasemetrics.NewHistogramVec( + &compbasemetrics.HistogramOpts{ + Subsystem: InferenceModelComponent, + Name: "request_sizes", + Help: "Inference model requests size distribution in bytes for each model and target model.", + // Use buckets ranging from 1000 bytes (1KB) to 10^9 bytes (1GB). + Buckets: []float64{ + 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, // More fine-grained up to 64KB + 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608, // Exponential up to 8MB + 16777216, 33554432, 67108864, 134217728, 268435456, 536870912, 1073741824, // Exponential up to 1GB + }, + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"model_name", "target_model_name"}, + ) +) + +var registerMetrics sync.Once + +// Register all metrics. +func Register() { + registerMetrics.Do(func() { + legacyregistry.MustRegister(requestCounter) + legacyregistry.MustRegister(requestLatencies) + legacyregistry.MustRegister(requestSizes) + }) +} + +// MonitorRequest handles monitoring requests. +func MonitorRequest(modelName, targetModelName string, reqSize int, elapsed time.Duration) { + elapsedSeconds := elapsed.Seconds() + requestCounter.WithLabelValues(modelName, targetModelName).Inc() + requestLatencies.WithLabelValues(modelName, targetModelName).Observe(elapsedSeconds) + requestSizes.WithLabelValues(modelName, targetModelName).Observe(float64(reqSize)) +} diff --git a/pkg/ext-proc/metrics/metrics_handler.go b/pkg/ext-proc/metrics/metrics_handler.go new file mode 100644 index 000000000..7cc7b5f42 --- /dev/null +++ b/pkg/ext-proc/metrics/metrics_handler.go @@ -0,0 +1,29 @@ +package metrics + +import ( + "net" + "net/http" + "strconv" + + "github.com/prometheus/client_golang/prometheus/promhttp" + "k8s.io/component-base/metrics/legacyregistry" + "k8s.io/klog/v2" +) + +func StartMetricsHandler(port int) { + klog.Info("Starting metrics HTTP handler ...") + + mux := http.NewServeMux() + mux.Handle("/metrics", promhttp.HandlerFor( + legacyregistry.DefaultGatherer, + promhttp.HandlerOpts{}, + )) + + server := &http.Server{ + Addr: net.JoinHostPort("", strconv.Itoa(port)), + Handler: mux, + } + if err := server.ListenAndServe(); err != http.ErrServerClosed { + klog.Fatalf("failed to start metrics HTTP handler: %v", err) + } +} diff --git a/pkg/ext-proc/metrics/metrics_test.go b/pkg/ext-proc/metrics/metrics_test.go new file mode 100644 index 000000000..df83a5ed4 --- /dev/null +++ b/pkg/ext-proc/metrics/metrics_test.go @@ -0,0 +1,100 @@ +package metrics + +import ( + "os" + "testing" + "time" + + "k8s.io/component-base/metrics/legacyregistry" + "k8s.io/component-base/metrics/testutil" +) + +const RequestTotalMetric = InferenceModelComponent + "_request_total" +const RequestLatenciesMetric = InferenceModelComponent + "_request_duration_seconds" +const RequestSizesMetric = InferenceModelComponent + "_request_sizes" + +func TestMonitorRequest(t *testing.T) { + type requests struct { + modelName string + targetModelName string + reqSize int + elapsed time.Duration + } + scenarios := []struct { + name string + reqs []requests + }{{ + name: "multiple requests", + reqs: []requests{ + { + modelName: "m10", + targetModelName: "t10", + reqSize: 1200, + elapsed: time.Millisecond * 10, + }, + { + modelName: "m10", + targetModelName: "t10", + reqSize: 500, + elapsed: time.Millisecond * 1600, + }, + { + modelName: "m10", + targetModelName: "t11", + reqSize: 2480, + elapsed: time.Millisecond * 60, + }, + { + modelName: "m20", + targetModelName: "t20", + reqSize: 80, + elapsed: time.Millisecond * 120, + }, + }, + }} + Register() + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + for _, req := range scenario.reqs { + MonitorRequest(req.modelName, req.targetModelName, req.reqSize, req.elapsed) + } + wantRequestTotal, err := os.Open("testdata/request_total_metric") + defer func() { + if err := wantRequestTotal.Close(); err != nil { + t.Error(err) + } + }() + if err != nil { + t.Fatal(err) + } + if err := testutil.GatherAndCompare(legacyregistry.DefaultGatherer, wantRequestTotal, RequestTotalMetric); err != nil { + t.Error(err) + } + wantRequestLatencies, err := os.Open("testdata/request_duration_seconds_metric") + defer func() { + if err := wantRequestLatencies.Close(); err != nil { + t.Error(err) + } + }() + if err != nil { + t.Fatal(err) + } + if err := testutil.GatherAndCompare(legacyregistry.DefaultGatherer, wantRequestLatencies, RequestLatenciesMetric); err != nil { + t.Error(err) + } + wantRequestSizes, err := os.Open("testdata/request_sizes_metric") + defer func() { + if err := wantRequestSizes.Close(); err != nil { + t.Error(err) + } + }() + if err != nil { + t.Fatal(err) + } + if err := testutil.GatherAndCompare(legacyregistry.DefaultGatherer, wantRequestSizes, RequestSizesMetric); err != nil { + t.Error(err) + } + + }) + } +} diff --git a/pkg/ext-proc/metrics/testdata/request_duration_seconds_metric b/pkg/ext-proc/metrics/testdata/request_duration_seconds_metric new file mode 100644 index 000000000..6c70b4ba9 --- /dev/null +++ b/pkg/ext-proc/metrics/testdata/request_duration_seconds_metric @@ -0,0 +1,116 @@ +# HELP inference_model_request_duration_seconds [ALPHA] Inference model response latency distribution in seconds for each model and target model. +# TYPE inference_model_request_duration_seconds histogram +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="0.005"} 0 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="0.025"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="0.05"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="0.1"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="0.2"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="0.4"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="0.6"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="0.8"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="1.0"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="1.25"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="1.5"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="2"} 2 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="3"} 2 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="4"} 2 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="5"} 2 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="6"} 2 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="8"} 2 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="10"} 2 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="15"} 2 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="20"} 2 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="30"} 2 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="45"} 2 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="60"} 2 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="120"} 2 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="180"} 2 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="240"} 2 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="300"} 2 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="360"} 2 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="480"} 2 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="600"} 2 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="900"} 2 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="1200"} 2 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="1800"} 2 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="2700"} 2 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="3600"} 2 +inference_model_request_duration_seconds_bucket{model_name="m10", target_model_name="t10", le="Inf"} 2 +inference_model_request_duration_seconds_sum{model_name="m10", target_model_name="t10"} 1.61 +inference_model_request_duration_seconds_count{model_name="m10", target_model_name="t10"} 2 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="0.005"} 0 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="0.025"} 0 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="0.05"} 0 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="0.1"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="0.2"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="0.4"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="0.6"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="0.8"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="1"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="1.25"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="1.5"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="2"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="3"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="4"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="5"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="6"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="8"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="10"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="15"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="20"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="30"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="45"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="60"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="120"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="180"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="240"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="300"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="360"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="480"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="600"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="900"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="1200"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="1800"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="2700"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="3600"} 1 +inference_model_request_duration_seconds_bucket{model_name="m10",target_model_name="t11",le="+Inf"} 1 +inference_model_request_duration_seconds_sum{model_name="m10",target_model_name="t11"} 0.06 +inference_model_request_duration_seconds_count{model_name="m10",target_model_name="t11"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="0.005"} 0 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="0.025"} 0 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="0.05"} 0 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="0.1"} 0 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="0.2"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="0.4"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="0.6"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="0.8"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="1"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="1.25"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="1.5"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="2"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="3"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="4"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="5"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="6"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="8"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="10"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="15"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="20"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="30"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="45"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="60"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="120"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="180"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="240"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="300"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="360"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="480"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="600"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="900"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="1200"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="1800"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="2700"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="3600"} 1 +inference_model_request_duration_seconds_bucket{model_name="m20",target_model_name="t20",le="+Inf"} 1 +inference_model_request_duration_seconds_sum{model_name="m20",target_model_name="t20"} 0.12 +inference_model_request_duration_seconds_count{model_name="m20",target_model_name="t20"} 1 diff --git a/pkg/ext-proc/metrics/testdata/request_sizes_metric b/pkg/ext-proc/metrics/testdata/request_sizes_metric new file mode 100644 index 000000000..ceca532e2 --- /dev/null +++ b/pkg/ext-proc/metrics/testdata/request_sizes_metric @@ -0,0 +1,86 @@ +# HELP inference_model_request_sizes [ALPHA] Inference model requests size distribution in bytes for each model and target model. +# TYPE inference_model_request_sizes histogram +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t10",le="64"} 0 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t10",le="128"} 0 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t10",le="256"} 0 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t10",le="512"} 1 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t10",le="1024"} 1 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t10",le="2048"} 2 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t10",le="4096"} 2 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t10",le="8192"} 2 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t10",le="16384"} 2 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t10",le="32768"} 2 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t10",le="65536"} 2 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t10",le="131072"} 2 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t10",le="262144"} 2 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t10",le="524288"} 2 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t10",le="1.048576e+06"} 2 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t10",le="2.097152e+06"} 2 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t10",le="4.194304e+06"} 2 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t10",le="8.388608e+06"} 2 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t10",le="1.6777216e+07"} 2 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t10",le="3.3554432e+07"} 2 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t10",le="6.7108864e+07"} 2 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t10",le="1.34217728e+08"} 2 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t10",le="2.68435456e+08"} 2 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t10",le="5.36870912e+08"} 2 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t10",le="1.073741824e+09"} 2 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t10",le="+Inf"} 2 +inference_model_request_sizes_sum{model_name="m10",target_model_name="t10"} 1700 +inference_model_request_sizes_count{model_name="m10",target_model_name="t10"} 2 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t11",le="64"} 0 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t11",le="128"} 0 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t11",le="256"} 0 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t11",le="512"} 0 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t11",le="1024"} 0 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t11",le="2048"} 0 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t11",le="4096"} 1 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t11",le="8192"} 1 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t11",le="16384"} 1 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t11",le="32768"} 1 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t11",le="65536"} 1 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t11",le="131072"} 1 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t11",le="262144"} 1 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t11",le="524288"} 1 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t11",le="1.048576e+06"} 1 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t11",le="2.097152e+06"} 1 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t11",le="4.194304e+06"} 1 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t11",le="8.388608e+06"} 1 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t11",le="1.6777216e+07"} 1 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t11",le="3.3554432e+07"} 1 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t11",le="6.7108864e+07"} 1 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t11",le="1.34217728e+08"} 1 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t11",le="2.68435456e+08"} 1 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t11",le="5.36870912e+08"} 1 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t11",le="1.073741824e+09"} 1 +inference_model_request_sizes_bucket{model_name="m10",target_model_name="t11",le="+Inf"} 1 +inference_model_request_sizes_sum{model_name="m10",target_model_name="t11"} 2480 +inference_model_request_sizes_count{model_name="m10",target_model_name="t11"} 1 +inference_model_request_sizes_bucket{model_name="m20",target_model_name="t20",le="64"} 0 +inference_model_request_sizes_bucket{model_name="m20",target_model_name="t20",le="128"} 1 +inference_model_request_sizes_bucket{model_name="m20",target_model_name="t20",le="256"} 1 +inference_model_request_sizes_bucket{model_name="m20",target_model_name="t20",le="512"} 1 +inference_model_request_sizes_bucket{model_name="m20",target_model_name="t20",le="1024"} 1 +inference_model_request_sizes_bucket{model_name="m20",target_model_name="t20",le="2048"} 1 +inference_model_request_sizes_bucket{model_name="m20",target_model_name="t20",le="4096"} 1 +inference_model_request_sizes_bucket{model_name="m20",target_model_name="t20",le="8192"} 1 +inference_model_request_sizes_bucket{model_name="m20",target_model_name="t20",le="16384"} 1 +inference_model_request_sizes_bucket{model_name="m20",target_model_name="t20",le="32768"} 1 +inference_model_request_sizes_bucket{model_name="m20",target_model_name="t20",le="65536"} 1 +inference_model_request_sizes_bucket{model_name="m20",target_model_name="t20",le="131072"} 1 +inference_model_request_sizes_bucket{model_name="m20",target_model_name="t20",le="262144"} 1 +inference_model_request_sizes_bucket{model_name="m20",target_model_name="t20",le="524288"} 1 +inference_model_request_sizes_bucket{model_name="m20",target_model_name="t20",le="1.048576e+06"} 1 +inference_model_request_sizes_bucket{model_name="m20",target_model_name="t20",le="2.097152e+06"} 1 +inference_model_request_sizes_bucket{model_name="m20",target_model_name="t20",le="4.194304e+06"} 1 +inference_model_request_sizes_bucket{model_name="m20",target_model_name="t20",le="8.388608e+06"} 1 +inference_model_request_sizes_bucket{model_name="m20",target_model_name="t20",le="1.6777216e+07"} 1 +inference_model_request_sizes_bucket{model_name="m20",target_model_name="t20",le="3.3554432e+07"} 1 +inference_model_request_sizes_bucket{model_name="m20",target_model_name="t20",le="6.7108864e+07"} 1 +inference_model_request_sizes_bucket{model_name="m20",target_model_name="t20",le="1.34217728e+08"} 1 +inference_model_request_sizes_bucket{model_name="m20",target_model_name="t20",le="2.68435456e+08"} 1 +inference_model_request_sizes_bucket{model_name="m20",target_model_name="t20",le="5.36870912e+08"} 1 +inference_model_request_sizes_bucket{model_name="m20",target_model_name="t20",le="1.073741824e+09"} 1 +inference_model_request_sizes_bucket{model_name="m20",target_model_name="t20",le="+Inf"} 1 +inference_model_request_sizes_sum{model_name="m20",target_model_name="t20"} 80 +inference_model_request_sizes_count{model_name="m20",target_model_name="t20"} 1 diff --git a/pkg/ext-proc/metrics/testdata/request_total_metric b/pkg/ext-proc/metrics/testdata/request_total_metric new file mode 100644 index 000000000..9c6f48a36 --- /dev/null +++ b/pkg/ext-proc/metrics/testdata/request_total_metric @@ -0,0 +1,5 @@ +# HELP inference_model_request_total [ALPHA] Counter of inference model requests broken out for each model and target model. +# TYPE inference_model_request_total counter +inference_model_request_total{model_name="m10", target_model_name="t10"} 2 +inference_model_request_total{model_name="m10", target_model_name="t11"} 1 +inference_model_request_total{model_name="m20", target_model_name="t20"} 1 diff --git a/pkg/manifests/ext_proc.yaml b/pkg/manifests/ext_proc.yaml index baa04d608..dfcfdc3e5 100644 --- a/pkg/manifests/ext_proc.yaml +++ b/pkg/manifests/ext_proc.yaml @@ -59,7 +59,8 @@ spec: - "vllm-llama2-7b-pool" ports: - containerPort: 9002 - + - name: metrics + containerPort: 9090 - name: curl image: curlimages/curl command: ["sleep", "3600"]