@@ -4,8 +4,123 @@ import (
4
4
"testing"
5
5
6
6
"inference.networking.x-k8s.io/llm-instance-gateway/api/v1alpha1"
7
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
7
8
)
8
9
10
+ func TestIsReady (t * testing.T ) {
11
+ tests := []struct {
12
+ name string
13
+ inferencePool * v1alpha1.InferencePool
14
+ inferenceModels []* v1alpha1.InferenceModel
15
+ expectedReady bool
16
+ }{
17
+ {
18
+ name : "Ready when at least one model matches configured pool" ,
19
+ inferencePool : & v1alpha1.InferencePool {
20
+ ObjectMeta : v1.ObjectMeta {
21
+ Name : "test-pool" ,
22
+ Namespace : "default" ,
23
+ },
24
+ },
25
+ inferenceModels : []* v1alpha1.InferenceModel {
26
+ {
27
+ ObjectMeta : v1.ObjectMeta {
28
+ Name : "test-model" ,
29
+ Namespace : "default" ,
30
+ },
31
+ Spec : v1alpha1.InferenceModelSpec {
32
+ PoolRef : v1alpha1.PoolObjectReference {
33
+ Name : "other-pool" ,
34
+ },
35
+ },
36
+ },
37
+ {
38
+ ObjectMeta : v1.ObjectMeta {
39
+ Name : "test-model" ,
40
+ Namespace : "default" ,
41
+ },
42
+ Spec : v1alpha1.InferenceModelSpec {
43
+ PoolRef : v1alpha1.PoolObjectReference {
44
+ Name : "test-pool" ,
45
+ },
46
+ },
47
+ },
48
+ },
49
+ expectedReady : true ,
50
+ },
51
+ {
52
+ name : "Not ready when model references non-matching pool" ,
53
+ inferencePool : & v1alpha1.InferencePool {
54
+ ObjectMeta : v1.ObjectMeta {
55
+ Name : "test-pool" ,
56
+ Namespace : "default" ,
57
+ },
58
+ },
59
+ inferenceModels : []* v1alpha1.InferenceModel {
60
+ {
61
+ ObjectMeta : v1.ObjectMeta {
62
+ Name : "test-model" ,
63
+ Namespace : "default" ,
64
+ },
65
+ Spec : v1alpha1.InferenceModelSpec {
66
+ PoolRef : v1alpha1.PoolObjectReference {
67
+ Name : "other-pool" ,
68
+ },
69
+ },
70
+ },
71
+ },
72
+ expectedReady : false ,
73
+ },
74
+ {
75
+ name : "Not ready when pool is nil" ,
76
+ inferencePool : nil ,
77
+ inferenceModels : []* v1alpha1.InferenceModel {
78
+ {Spec : v1alpha1.InferenceModelSpec {}},
79
+ {Spec : v1alpha1.InferenceModelSpec {}},
80
+ },
81
+ expectedReady : false ,
82
+ },
83
+ {
84
+ name : "Not ready when models are missing" ,
85
+ inferencePool : & v1alpha1.InferencePool {},
86
+ inferenceModels : []* v1alpha1.InferenceModel {
87
+ nil ,
88
+ },
89
+ expectedReady : false ,
90
+ },
91
+ {
92
+ name : "Not ready when models are empty" ,
93
+ inferencePool : & v1alpha1.InferencePool {},
94
+ inferenceModels : []* v1alpha1.InferenceModel {},
95
+ expectedReady : false ,
96
+ },
97
+ }
98
+
99
+ for _ , tt := range tests {
100
+ t .Run (tt .name , func (t * testing.T ) {
101
+ datastore := NewK8sDataStore ()
102
+
103
+ // Set the inference pool
104
+ if tt .inferencePool != nil {
105
+ datastore .setInferencePool (tt .inferencePool )
106
+ }
107
+
108
+ // Set the inference models
109
+ for _ , model := range tt .inferenceModels {
110
+ if model != nil {
111
+ datastore .InferenceModels .Store (model .Spec .ModelName , model )
112
+ }
113
+ }
114
+
115
+ // Check readiness
116
+ isReady := datastore .IsReady ()
117
+ if isReady != tt .expectedReady {
118
+ t .Errorf ("IsReady() = %v, want %v" , isReady , tt .expectedReady )
119
+ }
120
+ })
121
+ }
122
+ }
123
+
9
124
func TestRandomWeightedDraw (t * testing.T ) {
10
125
tests := []struct {
11
126
name string
0 commit comments