Skip to content

Commit

Permalink
[feat][kubectl-plugin] add get workergroup cmd
Browse files Browse the repository at this point in the history
We plan to add a [command to scale a worker group][1] in an existing
RayCluster, e.g. `kubectl ray scale cluster (CLUSTER_NAME) (WORKER_GROUP)`. This
requires the user to know the group name. There's currently no way to get the
worker group names in a RayCluster other than getting the resource with kubectl
and looking for or parsing out the names. A command to get worker groups
details for a cluster might be helpful.

[1]: #2926

Signed-off-by: David Xia <[email protected]>
  • Loading branch information
davidxia committed Feb 12, 2025
1 parent 9559227 commit 99f6755
Show file tree
Hide file tree
Showing 18 changed files with 1,300 additions and 20 deletions.
8 changes: 2 additions & 6 deletions kubectl-plugin/pkg/cmd/create/create_workergroup.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,6 @@ import (
"k8s.io/kubectl/pkg/util/templates"
)

const (
resourceNvidiaGPU = "nvidia.com/gpu"
)

type CreateWorkerGroupOptions struct {
configFlags *genericclioptions.ConfigFlags
ioStreams *genericclioptions.IOStreams
Expand Down Expand Up @@ -176,8 +172,8 @@ func createWorkerGroupSpec(options *CreateWorkerGroupOptions) rayv1.WorkerGroupS

gpuResource := resource.MustParse(options.workerGPU)
if !gpuResource.IsZero() {
podTemplate.Spec.Containers[0].Resources.Requests[corev1.ResourceName(resourceNvidiaGPU)] = gpuResource
podTemplate.Spec.Containers[0].Resources.Limits[corev1.ResourceName(resourceNvidiaGPU)] = gpuResource
podTemplate.Spec.Containers[0].Resources.Requests[corev1.ResourceName(util.ResourceNvidiaGPU)] = gpuResource
podTemplate.Spec.Containers[0].Resources.Limits[corev1.ResourceName(util.ResourceNvidiaGPU)] = gpuResource
}

return rayv1.WorkerGroupSpec{
Expand Down
14 changes: 8 additions & 6 deletions kubectl-plugin/pkg/cmd/create/create_workergroup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/utils/ptr"

"github.com/ray-project/kuberay/kubectl-plugin/pkg/util"

rayv1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1"
)

Expand All @@ -34,14 +36,14 @@ func TestCreateWorkerGroupSpec(t *testing.T) {
Image: "DEADBEEF",
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("2"),
corev1.ResourceMemory: resource.MustParse("5Gi"),
resourceNvidiaGPU: resource.MustParse("1"),
corev1.ResourceCPU: resource.MustParse("2"),
corev1.ResourceMemory: resource.MustParse("5Gi"),
util.ResourceNvidiaGPU: resource.MustParse("1"),
},
Limits: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("2"),
corev1.ResourceMemory: resource.MustParse("5Gi"),
resourceNvidiaGPU: resource.MustParse("1"),
corev1.ResourceCPU: resource.MustParse("2"),
corev1.ResourceMemory: resource.MustParse("5Gi"),
util.ResourceNvidiaGPU: resource.MustParse("1"),
},
},
},
Expand Down
1 change: 1 addition & 0 deletions kubectl-plugin/pkg/cmd/get/get.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,5 +24,6 @@ func NewGetCommand(streams genericclioptions.IOStreams) *cobra.Command {
}

cmd.AddCommand(NewGetClusterCommand(streams))
cmd.AddCommand(NewGetWorkerGroupCommand(streams))
return cmd
}
1 change: 1 addition & 0 deletions kubectl-plugin/pkg/cmd/get/get_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ func NewGetClusterCommand(streams genericclioptions.IOStreams) *cobra.Command {

cmd := &cobra.Command{
Use: "cluster [NAME]",
Aliases: []string{"clusters"},
Short: "Get cluster information.",
SilenceUsage: true,
ValidArgsFunction: completion.RayClusterCompletionFunc(cmdFactory),
Expand Down
Loading

0 comments on commit 99f6755

Please sign in to comment.