From 15073e0e1c084b3c489a8559a275d2b311e349e6 Mon Sep 17 00:00:00 2001 From: Jacob Cody Wimer Date: Wed, 13 Feb 2019 14:33:39 -0500 Subject: [PATCH] Updated rook --- rook/cluster.yaml | 144 +++++++++++++++++++++++-- rook/dashboard-external.yaml | 6 +- rook/operator.yaml | 197 ++++++++++++++++++++++++++++------- rook/storageclass.yaml | 19 ++-- 4 files changed, 309 insertions(+), 57 deletions(-) diff --git a/rook/cluster.yaml b/rook/cluster.yaml index 33c012b..fe10b5d 100644 --- a/rook/cluster.yaml +++ b/rook/cluster.yaml @@ -6,19 +6,75 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: rook-ceph-cluster + name: rook-ceph-osd + namespace: rook-ceph +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-mgr namespace: rook-ceph --- kind: Role apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: - name: rook-ceph-cluster + name: rook-ceph-osd namespace: rook-ceph rules: - apiGroups: [""] resources: ["configmaps"] verbs: [ "get", "list", "watch", "create", "update", "delete" ] --- +# Aspects of ceph-mgr that require access to the system namespace +kind: Role +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-mgr-system + namespace: rook-ceph +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch +--- +# Aspects of ceph-mgr that operate within the cluster's namespace +kind: Role +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-mgr + namespace: rook-ceph +rules: +- apiGroups: + - "" + resources: + - pods + - services + verbs: + - get + - list + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - create + - update + - delete +- apiGroups: + - ceph.rook.io + resources: + - "*" + verbs: + - "*" +--- # Allow the operator to create resources in this cluster's namespace kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 @@ -34,33 +90,86 @@ subjects: name: rook-ceph-system namespace: rook-ceph-system --- -# Allow the pods in this namespace to work with configmaps +# Allow the osd pods in this namespace to work with configmaps kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: - name: rook-ceph-cluster + name: rook-ceph-osd namespace: rook-ceph roleRef: apiGroup: rbac.authorization.k8s.io kind: Role - name: rook-ceph-cluster + name: rook-ceph-osd subjects: - kind: ServiceAccount - name: rook-ceph-cluster + name: rook-ceph-osd namespace: rook-ceph --- -apiVersion: ceph.rook.io/v1beta1 -kind: Cluster +# Allow the ceph mgr to access the cluster-specific resources necessary for the mgr modules +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-mgr + namespace: rook-ceph +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rook-ceph-mgr +subjects: +- kind: ServiceAccount + name: rook-ceph-mgr + namespace: rook-ceph +--- +# Allow the ceph mgr to access the rook system resources necessary for the mgr modules +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-mgr-system + namespace: rook-ceph-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rook-ceph-mgr-system +subjects: +- kind: ServiceAccount + name: rook-ceph-mgr + namespace: rook-ceph +--- +# Allow the ceph mgr to access cluster-wide resources necessary for the mgr modules +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-mgr-cluster + namespace: rook-ceph +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-mgr-cluster +subjects: +- kind: ServiceAccount + name: rook-ceph-mgr + namespace: rook-ceph +--- +apiVersion: ceph.rook.io/v1 +kind: CephCluster metadata: name: rook-ceph namespace: rook-ceph spec: + cephVersion: + # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw). + # v12 is luminous, v13 is mimic, and v14 is nautilus. + # RECOMMENDATION: In production, use a specific version tag instead of the general v13 flag, which pulls the latest release and could result in different + # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/. + image: ceph/ceph:v13.2.4-20190109 + # Whether to allow unsupported versions of Ceph. Currently only luminous and mimic are supported. + # After nautilus is released, Rook will be updated to support nautilus. + # Do not set to true in production. + allowUnsupported: false # The path on the host where configuration files will be persisted. If not specified, a kubernetes emptyDir will be created (not recommended). # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster. # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment. dataDirHostPath: /var/lib/rook - # The service account under which to run the daemon pods in this cluster if the default account is not sufficient (OSDs) - serviceAccount: rook-ceph-cluster # set the amount of mons to be started mon: count: 3 @@ -68,9 +177,19 @@ spec: # enable the ceph dashboard for viewing cluster status dashboard: enabled: true + # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy) + # urlPrefix: /ceph-dashboard + # serve the dashboard at the given port. + # port: 8443 + # serve the dashboard using SSL + # ssl: true network: # toggle to use hostNetwork hostNetwork: false + rbdMirroring: + # The number of daemons that will perform the rbd mirroring. + # rbd mirroring must be configured with "rbd mirror" from the rook toolbox. + workers: 0 # To control where various services will be scheduled by kubernetes, use the placement configuration sections below. # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and # tolerate taints with a key of 'storage-node'. @@ -116,6 +235,7 @@ spec: # storeType: bluestore databaseSizeMB: "1024" # this value can be removed for environments with normal sized disks (100 GB or larger) journalSizeMB: "1024" # this value can be removed for environments with normal sized disks (20 GB or larger) + osdsPerDevice: "1" # this value can be overridden at the node or device level # Cluster level list of directories to use for storage. These values will be set for all nodes that have no `directories` set. # directories: # - path: /rook/storage-dir @@ -135,7 +255,9 @@ spec: # - name: "172.17.4.201" # devices: # specific devices to use for storage can be specified for each node # - name: "sdb" -# - name: "sdc" +# - name: "nvme01" # multiple osds can be created on high performance devices +# config: +# osdsPerDevice: "5" # config: # configuration can be specified at the node level which overrides the cluster level config # storeType: filestore # - name: "172.17.4.301" diff --git a/rook/dashboard-external.yaml b/rook/dashboard-external.yaml index 0326624..9ab6137 100644 --- a/rook/dashboard-external.yaml +++ b/rook/dashboard-external.yaml @@ -9,11 +9,11 @@ metadata: spec: ports: - name: dashboard - port: 7000 + port: 8443 protocol: TCP - targetPort: 7000 + targetPort: 8443 selector: app: rook-ceph-mgr rook_cluster: rook-ceph sessionAffinity: None - type: NodePort \ No newline at end of file + type: NodePort diff --git a/rook/operator.yaml b/rook/operator.yaml index e4a2d85..642e002 100644 --- a/rook/operator.yaml +++ b/rook/operator.yaml @@ -6,66 +6,145 @@ metadata: apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: clusters.ceph.rook.io + name: cephclusters.ceph.rook.io spec: group: ceph.rook.io names: - kind: Cluster - listKind: ClusterList - plural: clusters - singular: cluster - shortNames: - - rcc + kind: CephCluster + listKind: CephClusterList + plural: cephclusters + singular: cephcluster scope: Namespaced - version: v1beta1 + version: v1 + validation: + openAPIV3Schema: + properties: + spec: + properties: + cephVersion: + properties: + allowUnsupported: + type: boolean + image: + type: string + name: + pattern: ^(luminous|mimic|nautilus)$ + type: string + dashboard: + properties: + enabled: + type: boolean + urlPrefix: + type: string + port: + type: integer + dataDirHostPath: + pattern: ^/(\S+) + type: string + mon: + properties: + allowMultiplePerNode: + type: boolean + count: + maximum: 9 + minimum: 1 + type: integer + required: + - count + network: + properties: + hostNetwork: + type: boolean + storage: + properties: + nodes: + items: {} + type: array + useAllDevices: {} + useAllNodes: + type: boolean + required: + - mon + additionalPrinterColumns: + - name: DataDirHostPath + type: string + description: Directory used on the K8s nodes + JSONPath: .spec.dataDirHostPath + - name: MonCount + type: string + description: Number of MONs + JSONPath: .spec.mon.count + - name: Age + type: date + JSONPath: .metadata.creationTimestamp + - name: State + type: string + description: Current State + JSONPath: .status.state --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: filesystems.ceph.rook.io + name: cephfilesystems.ceph.rook.io spec: group: ceph.rook.io names: - kind: Filesystem - listKind: FilesystemList - plural: filesystems - singular: filesystem - shortNames: - - rcfs + kind: CephFilesystem + listKind: CephFilesystemList + plural: cephfilesystems + singular: cephfilesystem scope: Namespaced - version: v1beta1 + version: v1 + additionalPrinterColumns: + - name: MdsCount + type: string + description: Number of MDSs + JSONPath: .spec.metadataServer.activeCount + - name: Age + type: date + JSONPath: .metadata.creationTimestamp --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: objectstores.ceph.rook.io + name: cephobjectstores.ceph.rook.io spec: group: ceph.rook.io names: - kind: ObjectStore - listKind: ObjectStoreList - plural: objectstores - singular: objectstore - shortNames: - - rco + kind: CephObjectStore + listKind: CephObjectStoreList + plural: cephobjectstores + singular: cephobjectstore scope: Namespaced - version: v1beta1 + version: v1 --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: pools.ceph.rook.io + name: cephobjectstoreusers.ceph.rook.io spec: group: ceph.rook.io names: - kind: Pool - listKind: PoolList - plural: pools - singular: pool - shortNames: - - rcp + kind: CephObjectStoreUser + listKind: CephObjectStoreUserList + plural: cephobjectstoreusers + singular: cephobjectstoreuser scope: Namespaced - version: v1beta1 + version: v1 +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: cephblockpools.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephBlockPool + listKind: CephBlockPoolList + plural: cephblockpools + singular: cephblockpool + scope: Namespaced + version: v1 --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition @@ -97,6 +176,7 @@ rules: resources: - secrets - pods + - pods/log - services - configmaps verbs: @@ -224,6 +304,26 @@ rules: verbs: - "*" --- +# Aspects of ceph-mgr that require cluster-wide access +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-mgr-cluster + labels: + operator: rook + storage-backend: ceph +rules: +- apiGroups: + - "" + resources: + - configmaps + - nodes + - nodes/proxy + verbs: + - get + - list + - watch +--- # The rook system service account used by the operator, agent, and discovery pods apiVersion: v1 kind: ServiceAccount @@ -289,7 +389,7 @@ spec: serviceAccountName: rook-ceph-system containers: - name: rook-ceph-operator - image: rook/ceph:v0.8.3 + image: rook/ceph:v0.9.2 args: ["ceph", "operator"] volumeMounts: - mountPath: /var/lib/rook @@ -303,17 +403,30 @@ spec: # Rook Agent toleration. Will tolerate all taints with all keys. # Choose between NoSchedule, PreferNoSchedule and NoExecute: # - name: AGENT_TOLERATION - # value: "NoSchedule" + # value: "NoSchedule" # (Optional) Rook Agent toleration key. Set this to the key of the taint you want to tolerate # - name: AGENT_TOLERATION_KEY - # value: "" + # value: "" + # (Optional) Rook Agent mount security mode. Can by `Any` or `Restricted`. + # `Any` uses Ceph admin credentials by default/fallback. + # For using `Restricted` you must have a Ceph secret in each namespace storage should be consumed from and + # set `mountUser` to the Ceph user, `mountSecret` to the Kubernetes secret name. + # to the namespace in which the `mountSecret` Kubernetes secret namespace. + # - name: AGENT_MOUNT_SECURITY_MODE + # value: "Any" # Set the path where the Rook agent can find the flex volumes # - name: FLEXVOLUME_DIR_PATH # value: "" + # Set the path where kernel modules can be found + # - name: LIB_MODULES_DIR_PATH + # value: "" + # Mount any extra directories into the agent container + # - name: AGENT_MOUNTS + # value: "somemount=/host/path:/container/path,someothermount=/host/path2:/container/path2" # Rook Discover toleration. Will tolerate all taints with all keys. # Choose between NoSchedule, PreferNoSchedule and NoExecute: # - name: DISCOVER_TOLERATION - # value: "NoSchedule" + # value: "NoSchedule" # (Optional) Rook Discover toleration key. Set this to the key of the taint you want to tolerate # - name: DISCOVER_TOLERATION_KEY # value: "" @@ -333,11 +446,23 @@ spec: # current mon with a new mon (useful for compensating flapping network). - name: ROOK_MON_OUT_TIMEOUT value: "300s" + # The duration between discovering devices in the rook-discover daemonset. + - name: ROOK_DISCOVER_DEVICES_INTERVAL + value: "60m" # Whether to start pods as privileged that mount a host path, which includes the Ceph mon and osd pods. # This is necessary to workaround the anyuid issues when running on OpenShift. # For more details see https://github.com/rook/rook/issues/1314#issuecomment-355799641 - name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED value: "false" + # In some situations SELinux relabelling breaks (times out) on large filesystems, and doesn't work with cephfs ReadWriteMany volumes (last relabel wins). + # Disable it here if you have similiar issues. + # For more details see https://github.com/rook/rook/issues/2417 + - name: ROOK_ENABLE_SELINUX_RELABELING + value: "true" + # In large volumes it will take some time to chown all the files. Disable it here if you have performance issues. + # For more details see https://github.com/rook/rook/issues/2254 + - name: ROOK_ENABLE_FSGROUP + value: "true" # The name of the node to pass with the downward API - name: NODE_NAME valueFrom: diff --git a/rook/storageclass.yaml b/rook/storageclass.yaml index 868df21..4da723b 100644 --- a/rook/storageclass.yaml +++ b/rook/storageclass.yaml @@ -1,11 +1,11 @@ -apiVersion: ceph.rook.io/v1beta1 -kind: Pool +apiVersion: ceph.rook.io/v1 +kind: CephBlockPool metadata: name: replicapool namespace: rook-ceph spec: replicated: - size: 3 + size: 1 --- apiVersion: storage.k8s.io/v1 kind: StorageClass @@ -13,10 +13,15 @@ metadata: name: rook-ceph-block provisioner: ceph.rook.io/block parameters: - pool: replicapool - # The value of "clusterNamespace" MUST be the same as the one in which your rook cluster exist + blockPool: replicapool + # Specify the namespace of the rook cluster from which to create volumes. + # If not specified, it will use `rook` as the default namespace of the cluster. + # This is also the namespace where the cluster will be clusterNamespace: rook-ceph # Specify the filesystem type of the volume. If not specified, it will use `ext4`. fstype: xfs -# Optional, default reclaimPolicy is "Delete". Other options are: "Retain", "Recycle" as documented in https://kubernetes.io/docs/concepts/storage/storage-classes/ -reclaimPolicy: Retain + # (Optional) Specify an existing Ceph user that will be used for mounting storage with this StorageClass. + #mountUser: user1 + # (Optional) Specify an existing Kubernetes secret name containing just one key holding the Ceph user secret. + # The secret must exist in each namespace(s) where the storage will be consumed. + #mountSecret: ceph-user1-secret