Skip to content

Commit 2acfaab

Browse files
committed
copy api v1alpha1 to v1alpha2
1 parent 2a615e9 commit 2acfaab

File tree

4 files changed

+540
-0
lines changed

4 files changed

+540
-0
lines changed

api/v1alpha2/doc.go

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
/*
2+
Copyright 2024 The Kubernetes Authors.
3+
4+
Licensed under the Apache License, Version 2.0 (the "License");
5+
you may not use this file except in compliance with the License.
6+
You may obtain a copy of the License at
7+
8+
http://www.apache.org/licenses/LICENSE-2.0
9+
10+
Unless required by applicable law or agreed to in writing, software
11+
distributed under the License is distributed on an "AS IS" BASIS,
12+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
See the License for the specific language governing permissions and
14+
limitations under the License.
15+
*/
16+
17+
// Package v1alpha1 contains API Schema definitions for the
18+
// inference.networking.x-k8s.io API group.
19+
//
20+
// +k8s:openapi-gen=true
21+
// +kubebuilder:object:generate=true
22+
// +groupName=inference.networking.x-k8s.io
23+
package v1alpha1

api/v1alpha2/groupversion_info.go

Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
/*
2+
Copyright 2024 The Kubernetes Authors.
3+
4+
Licensed under the Apache License, Version 2.0 (the "License");
5+
you may not use this file except in compliance with the License.
6+
You may obtain a copy of the License at
7+
8+
http://www.apache.org/licenses/LICENSE-2.0
9+
10+
Unless required by applicable law or agreed to in writing, software
11+
distributed under the License is distributed on an "AS IS" BASIS,
12+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
See the License for the specific language governing permissions and
14+
limitations under the License.
15+
*/
16+
17+
// Package v1alpha1 contains API Schema definitions for the gateway v1alpha1 API group
18+
// +kubebuilder:object:generate=true
19+
// +groupName=inference.networking.x-k8s.io
20+
package v1alpha1
21+
22+
import (
23+
"k8s.io/apimachinery/pkg/runtime/schema"
24+
"sigs.k8s.io/controller-runtime/pkg/scheme"
25+
)
26+
27+
var (
28+
// GroupVersion is group version used to register these objects
29+
GroupVersion = schema.GroupVersion{Group: "inference.networking.x-k8s.io", Version: "v1alpha1"}
30+
31+
// SchemeGroupVersion is alias to GroupVersion for client-go libraries.
32+
// It is required by pkg/client/informers/externalversions/...
33+
SchemeGroupVersion = GroupVersion
34+
35+
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
36+
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
37+
38+
// AddToScheme adds the types in this group-version to the given scheme.
39+
AddToScheme = SchemeBuilder.AddToScheme
40+
)
41+
42+
// Resource is required by pkg/client/listers/...
43+
func Resource(resource string) schema.GroupResource {
44+
return GroupVersion.WithResource(resource).GroupResource()
45+
}

api/v1alpha2/inferencemodel_types.go

Lines changed: 234 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,234 @@
1+
/*
2+
Copyright 2024 The Kubernetes Authors.
3+
4+
Licensed under the Apache License, Version 2.0 (the "License");
5+
you may not use this file except in compliance with the License.
6+
You may obtain a copy of the License at
7+
8+
http://www.apache.org/licenses/LICENSE-2.0
9+
10+
Unless required by applicable law or agreed to in writing, software
11+
distributed under the License is distributed on an "AS IS" BASIS,
12+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
See the License for the specific language governing permissions and
14+
limitations under the License.
15+
*/
16+
17+
package v1alpha1
18+
19+
import (
20+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
21+
)
22+
23+
// InferenceModel is the Schema for the InferenceModels API.
24+
//
25+
// +kubebuilder:object:root=true
26+
// +kubebuilder:subresource:status
27+
// +genclient
28+
type InferenceModel struct {
29+
metav1.TypeMeta `json:",inline"`
30+
metav1.ObjectMeta `json:"metadata,omitempty"`
31+
32+
Spec InferenceModelSpec `json:"spec,omitempty"`
33+
Status InferenceModelStatus `json:"status,omitempty"`
34+
}
35+
36+
// InferenceModelList contains a list of InferenceModel.
37+
//
38+
// +kubebuilder:object:root=true
39+
type InferenceModelList struct {
40+
metav1.TypeMeta `json:",inline"`
41+
metav1.ListMeta `json:"metadata,omitempty"`
42+
Items []InferenceModel `json:"items"`
43+
}
44+
45+
// InferenceModelSpec represents the desired state of a specific model use case. This resource is
46+
// managed by the "Inference Workload Owner" persona.
47+
//
48+
// The Inference Workload Owner persona is someone that trains, verifies, and
49+
// leverages a large language model from a model frontend, drives the lifecycle
50+
// and rollout of new versions of those models, and defines the specific
51+
// performance and latency goals for the model. These workloads are
52+
// expected to operate within an InferencePool sharing compute capacity with other
53+
// InferenceModels, defined by the Inference Platform Admin.
54+
//
55+
// InferenceModel's modelName (not the ObjectMeta name) is unique for a given InferencePool,
56+
// if the name is reused, an error will be shown on the status of a
57+
// InferenceModel that attempted to reuse. The oldest InferenceModel, based on
58+
// creation timestamp, will be selected to remain valid. In the event of a race
59+
// condition, one will be selected at random.
60+
type InferenceModelSpec struct {
61+
// ModelName is the name of the model as it will be set in the "model" parameter for an incoming request.
62+
// ModelNames must be unique for a referencing InferencePool
63+
// (names can be reused for a different pool in the same cluster).
64+
// The modelName with the oldest creation timestamp is retained, and the incoming
65+
// InferenceModel is sets the Ready status to false with a corresponding reason.
66+
// In the rare case of a race condition, one Model will be selected randomly to be considered valid, and the other rejected.
67+
// Names can be reserved without an underlying model configured in the pool.
68+
// This can be done by specifying a target model and setting the weight to zero,
69+
// an error will be returned specifying that no valid target model is found.
70+
//
71+
// +kubebuilder:validation:MaxLength=256
72+
// +kubebuilder:validation:Required
73+
ModelName string `json:"modelName"`
74+
75+
// Criticality defines how important it is to serve the model compared to other models referencing the same pool.
76+
// Criticality impacts how traffic is handled in resource constrained situations. It handles this by
77+
// queuing or rejecting requests of lower criticality. InferenceModels of an equivalent Criticality will
78+
// fairly share resources over throughput of tokens. In the future, the metric used to calculate fairness,
79+
// and the proportionality of fairness will be configurable.
80+
//
81+
// Default values for this field will not be set, to allow for future additions of new field that may 'one of' with this field.
82+
// Any implementations that may consume this field may treat an unset value as the 'Standard' range.
83+
// +optional
84+
Criticality *Criticality `json:"criticality,omitempty"`
85+
86+
// TargetModels allow multiple versions of a model for traffic splitting.
87+
// If not specified, the target model name is defaulted to the modelName parameter.
88+
// modelName is often in reference to a LoRA adapter.
89+
//
90+
// +optional
91+
// +kubebuilder:validation:MaxItems=10
92+
// +kubebuilder:validation:XValidation:message="Weights should be set for all models, or none of the models.",rule="self.all(model, has(model.weight)) || self.all(model, !has(model.weight))"
93+
TargetModels []TargetModel `json:"targetModels,omitempty"`
94+
95+
// PoolRef is a reference to the inference pool, the pool must exist in the same namespace.
96+
//
97+
// +kubebuilder:validation:Required
98+
PoolRef PoolObjectReference `json:"poolRef"`
99+
}
100+
101+
// PoolObjectReference identifies an API object within the namespace of the
102+
// referrer.
103+
type PoolObjectReference struct {
104+
// Group is the group of the referent.
105+
//
106+
// +optional
107+
// +kubebuilder:default="inference.networking.x-k8s.io"
108+
// +kubebuilder:validation:MaxLength=253
109+
// +kubebuilder:validation:Pattern=`^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`
110+
Group string `json:"group,omitempty"`
111+
112+
// Kind is kind of the referent. For example "InferencePool".
113+
//
114+
// +optional
115+
// +kubebuilder:default="InferencePool"
116+
// +kubebuilder:validation:MinLength=1
117+
// +kubebuilder:validation:MaxLength=63
118+
// +kubebuilder:validation:Pattern=`^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$`
119+
Kind string `json:"kind,omitempty"`
120+
121+
// Name is the name of the referent.
122+
//
123+
// +kubebuilder:validation:MinLength=1
124+
// +kubebuilder:validation:MaxLength=253
125+
// +kubebuilder:validation:Required
126+
Name string `json:"name"`
127+
}
128+
129+
// Criticality defines how important it is to serve the model compared to other models.
130+
// Criticality is intentionally a bounded enum to contain the possibilities that need to be supported by the load balancing algorithm. Any reference to the Criticality field must be optional(use a pointer), and set no default.
131+
// This allows us to union this with a oneOf field in the future should we wish to adjust/extend this behavior.
132+
// +kubebuilder:validation:Enum=Critical;Standard;Sheddable
133+
type Criticality string
134+
135+
const (
136+
// Critical defines the highest level of criticality. Requests to this band will be shed last.
137+
Critical Criticality = "Critical"
138+
139+
// Standard defines the base criticality level and is more important than Sheddable but less
140+
// important than Critical. Requests in this band will be shed before critical traffic.
141+
// Most models are expected to fall within this band.
142+
Standard Criticality = "Standard"
143+
144+
// Sheddable defines the lowest level of criticality. Requests to this band will be shed before
145+
// all other bands.
146+
Sheddable Criticality = "Sheddable"
147+
)
148+
149+
// TargetModel represents a deployed model or a LoRA adapter. The
150+
// Name field is expected to match the name of the LoRA adapter
151+
// (or base model) as it is registered within the model server. Inference
152+
// Gateway assumes that the model exists on the model server and it's the
153+
// responsibility of the user to validate a correct match. Should a model fail
154+
// to exist at request time, the error is processed by the Inference Gateway
155+
// and emitted on the appropriate InferenceModel object.
156+
type TargetModel struct {
157+
// Name is the name of the adapter or base model, as expected by the ModelServer.
158+
//
159+
// +kubebuilder:validation:MaxLength=253
160+
// +kubebuilder:validation:Required
161+
Name string `json:"name"`
162+
163+
// Weight is used to determine the proportion of traffic that should be
164+
// sent to this model when multiple target models are specified.
165+
//
166+
// Weight defines the proportion of requests forwarded to the specified
167+
// model. This is computed as weight/(sum of all weights in this
168+
// TargetModels list). For non-zero values, there may be some epsilon from
169+
// the exact proportion defined here depending on the precision an
170+
// implementation supports. Weight is not a percentage and the sum of
171+
// weights does not need to equal 100.
172+
//
173+
// If a weight is set for any targetModel, it must be set for all targetModels.
174+
// Conversely weights are optional, so long as ALL targetModels do not specify a weight.
175+
//
176+
// +optional
177+
// +kubebuilder:validation:Minimum=0
178+
// +kubebuilder:validation:Maximum=1000000
179+
Weight *int32 `json:"weight,omitempty"`
180+
}
181+
182+
// InferenceModelStatus defines the observed state of InferenceModel
183+
type InferenceModelStatus struct {
184+
// Conditions track the state of the InferenceModel.
185+
//
186+
// Known condition types are:
187+
//
188+
// * "Accepted"
189+
//
190+
// +optional
191+
// +listType=map
192+
// +listMapKey=type
193+
// +kubebuilder:validation:MaxItems=8
194+
// +kubebuilder:default={{type: "Ready", status: "Unknown", reason:"Pending", message:"Waiting for controller", lastTransitionTime: "1970-01-01T00:00:00Z"}}
195+
Conditions []metav1.Condition `json:"conditions,omitempty"`
196+
}
197+
198+
// InferenceModelConditionType is a type of condition for the InferenceModel.
199+
type InferenceModelConditionType string
200+
201+
// InferenceModelConditionReason is the reason for a given InferenceModelConditionType.
202+
type InferenceModelConditionReason string
203+
204+
const (
205+
// ModelConditionAccepted indicates if the model config is accepted, and if not, why.
206+
//
207+
// Possible reasons for this condition to be True are:
208+
//
209+
// * "Accepted"
210+
//
211+
// Possible reasons for this condition to be False are:
212+
//
213+
// * "ModelNameInUse"
214+
//
215+
// Possible reasons for this condition to be Unknown are:
216+
//
217+
// * "Pending"
218+
//
219+
ModelConditionAccepted InferenceModelConditionType = "Accepted"
220+
221+
// ModelReasonAccepted is the desired state. Model conforms to the state of the pool.
222+
ModelReasonAccepted InferenceModelConditionReason = "Accepted"
223+
224+
// ModelReasonNameInUse is used when a given ModelName already exists within the pool.
225+
// Details about naming conflict resolution are on the ModelName field itself.
226+
ModelReasonNameInUse InferenceModelConditionReason = "ModelNameInUse"
227+
228+
// ModelReasonPending is the initial state, and indicates that the controller has not yet reconciled the InferenceModel.
229+
ModelReasonPending InferenceModelConditionReason = "Pending"
230+
)
231+
232+
func init() {
233+
SchemeBuilder.Register(&InferenceModel{}, &InferenceModelList{})
234+
}

0 commit comments

Comments
 (0)