diff --git a/charts/csi-hyperstack/templates/csi.yaml b/charts/csi-hyperstack/templates/csi.yaml index 0b44ba1..e49a71a 100644 --- a/charts/csi-hyperstack/templates/csi.yaml +++ b/charts/csi-hyperstack/templates/csi.yaml @@ -9,4 +9,4 @@ metadata: spec: volumeLifecycleModes: - Persistent - fsGroupPolicy: None + fsGroupPolicy: File diff --git a/pkg/driver/driver.go b/pkg/driver/driver.go index 74a74bb..e83155b 100644 --- a/pkg/driver/driver.go +++ b/pkg/driver/driver.go @@ -92,6 +92,7 @@ func NewDriver(opts *DriverOpts) *Driver { csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME, csi.NodeServiceCapability_RPC_EXPAND_VOLUME, csi.NodeServiceCapability_RPC_GET_VOLUME_STATS, + csi.NodeServiceCapability_RPC_VOLUME_MOUNT_GROUP, }) d.vcap = MapVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{ diff --git a/pkg/driver/nodeserver.go b/pkg/driver/nodeserver.go index 0494857..c205731 100644 --- a/pkg/driver/nodeserver.go +++ b/pkg/driver/nodeserver.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "os/exec" + "strconv" "strings" "time" @@ -51,6 +52,25 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol if err != nil { return nil, err } + + // Apply fsGroup ownership if provided via VOLUME_MOUNT_GROUP capability. + // Sets group on the root inode so non-root pods with matching fsGroup can write. + if mountVolume := req.VolumeCapability.GetMount(); mountVolume != nil { + if volumeMountGroup := mountVolume.GetVolumeMountGroup(); volumeMountGroup != "" { + gid, err := strconv.Atoi(volumeMountGroup) + if err != nil { + return nil, fmt.Errorf("invalid volume_mount_group %q: %w", volumeMountGroup, err) + } + if err := os.Chown(target, -1, gid); err != nil { + return nil, fmt.Errorf("failed to chown staging path %s to gid %d: %w", target, gid, err) + } + if err := os.Chmod(target, 0775|os.ModeSetgid); err != nil { + return nil, fmt.Errorf("failed to chmod staging path %s: %w", target, err) + } + klog.Infof("NodeStageVolume: set group %d and mode 02775 (setgid) on %s", gid, target) + } + } + return &csi.NodeStageVolumeResponse{}, nil } @@ -204,6 +224,25 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis source := req.StagingTargetPath target := req.TargetPath + // Apply fsGroup ownership on the staging path before bind-mounting. + // This runs on every publish so permissions are correct even if the volume + // was staged before the driver was upgraded. + if mountVolume := req.VolumeCapability.GetMount(); mountVolume != nil { + if volumeMountGroup := mountVolume.GetVolumeMountGroup(); volumeMountGroup != "" { + gid, err := strconv.Atoi(volumeMountGroup) + if err != nil { + return nil, status.Errorf(codes.Internal, "invalid volume_mount_group %q: %v", volumeMountGroup, err) + } + if err := os.Chown(source, -1, gid); err != nil { + return nil, status.Errorf(codes.Internal, "failed to chown staging path %s to gid %d: %v", source, gid, err) + } + if err := os.Chmod(source, 0775|os.ModeSetgid); err != nil { + return nil, status.Errorf(codes.Internal, "failed to chmod staging path %s: %v", source, err) + } + klog.Infof("NodePublishVolume: set group %d and mode 02775 (setgid) on %s", gid, source) + } + } + err := mountDevice(source, target, fsType, options) if err != nil { return nil, status.Error(codes.Internal, fmt.Sprintf("Error %s, mounting the volume from staging dir to target dir", err.Error()))