mirror of
https://github.com/jcwimer/kubernetes-ansible
synced 2026-03-24 16:44:44 +00:00
Added rook-ceph manual deployments
This commit is contained in:
25
rook/benchmark-rook.sh
Normal file
25
rook/benchmark-rook.sh
Normal file
@@ -0,0 +1,25 @@
|
||||
#!/bin/bash
|
||||
mon_namespace="rook-ceph"
|
||||
mon_pod=$(kubectl -n "${mon_namespace}" get pods | grep mon0 | awk '{print $1}')
|
||||
|
||||
|
||||
function run_command () {
|
||||
local pod="${1}"
|
||||
local namespace="${2}"
|
||||
local command="${3}"
|
||||
|
||||
kubectl -n "${namespace}" exec -it "${pod}" -- ${command}
|
||||
}
|
||||
|
||||
function run_command_on_mon () {
|
||||
local command="${1}"
|
||||
run_command "${mon_pod}" "${mon_namespace}" "${command}"
|
||||
}
|
||||
|
||||
run_command_on_mon "ceph status"
|
||||
run_command_on_mon "ceph osd pool create scbench 100 100"
|
||||
run_command_on_mon "ceph osd pool application enable scbench rbd"
|
||||
run_command_on_mon "rados bench -p scbench 10 write --no-cleanup"
|
||||
run_command_on_mon "rados bench -p scbench 10 seq"
|
||||
run_command_on_mon "rados bench -p scbench 10 rand"
|
||||
#run_command_on_mon "rados -p scbench cleanup"
|
||||
19
rook/ceph-rook-status.sh
Normal file
19
rook/ceph-rook-status.sh
Normal file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
mon_namespace="rook-ceph"
|
||||
mon_pod=$(kubectl -n "${mon_namespace}" get pods | grep mon0 | awk '{print $1}')
|
||||
|
||||
|
||||
function run_command () {
|
||||
local pod="${1}"
|
||||
local namespace="${2}"
|
||||
local command="${3}"
|
||||
|
||||
kubectl -n "${namespace}" exec -it "${pod}" -- ${command}
|
||||
}
|
||||
|
||||
function run_command_on_mon () {
|
||||
local command="${1}"
|
||||
run_command "${mon_pod}" "${mon_namespace}" "${command}"
|
||||
}
|
||||
|
||||
run_command_on_mon "ceph status"
|
||||
142
rook/cluster.yaml
Normal file
142
rook/cluster.yaml
Normal file
@@ -0,0 +1,142 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: rook-ceph
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: rook-ceph-cluster
|
||||
namespace: rook-ceph
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: rook-ceph-cluster
|
||||
namespace: rook-ceph
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: [ "get", "list", "watch", "create", "update", "delete" ]
|
||||
---
|
||||
# Allow the operator to create resources in this cluster's namespace
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: rook-ceph-cluster-mgmt
|
||||
namespace: rook-ceph
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: rook-ceph-cluster-mgmt
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: rook-ceph-system
|
||||
namespace: rook-ceph-system
|
||||
---
|
||||
# Allow the pods in this namespace to work with configmaps
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: rook-ceph-cluster
|
||||
namespace: rook-ceph
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: rook-ceph-cluster
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: rook-ceph-cluster
|
||||
namespace: rook-ceph
|
||||
---
|
||||
apiVersion: ceph.rook.io/v1beta1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: rook-ceph
|
||||
namespace: rook-ceph
|
||||
spec:
|
||||
# The path on the host where configuration files will be persisted. If not specified, a kubernetes emptyDir will be created (not recommended).
|
||||
# Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
|
||||
# In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
|
||||
dataDirHostPath: /var/lib/rook
|
||||
# The service account under which to run the daemon pods in this cluster if the default account is not sufficient (OSDs)
|
||||
serviceAccount: rook-ceph-cluster
|
||||
# set the amount of mons to be started
|
||||
mon:
|
||||
count: 3
|
||||
allowMultiplePerNode: true
|
||||
# enable the ceph dashboard for viewing cluster status
|
||||
dashboard:
|
||||
enabled: true
|
||||
network:
|
||||
# toggle to use hostNetwork
|
||||
hostNetwork: false
|
||||
# To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
|
||||
# The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
|
||||
# tolerate taints with a key of 'storage-node'.
|
||||
# placement:
|
||||
# all:
|
||||
# nodeAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# nodeSelectorTerms:
|
||||
# - matchExpressions:
|
||||
# - key: role
|
||||
# operator: In
|
||||
# values:
|
||||
# - storage-node
|
||||
# podAffinity:
|
||||
# podAntiAffinity:
|
||||
# tolerations:
|
||||
# - key: storage-node
|
||||
# operator: Exists
|
||||
# The above placement information can also be specified for mon, osd, and mgr components
|
||||
# mon:
|
||||
# osd:
|
||||
# mgr:
|
||||
resources:
|
||||
# The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory
|
||||
# mgr:
|
||||
# limits:
|
||||
# cpu: "500m"
|
||||
# memory: "1024Mi"
|
||||
# requests:
|
||||
# cpu: "500m"
|
||||
# memory: "1024Mi"
|
||||
# The above example requests/limits can also be added to the mon and osd components
|
||||
# mon:
|
||||
# osd:
|
||||
storage: # cluster level storage configuration and selection
|
||||
useAllNodes: true
|
||||
useAllDevices: false
|
||||
deviceFilter:
|
||||
location:
|
||||
config:
|
||||
# The default and recommended storeType is dynamically set to bluestore for devices and filestore for directories.
|
||||
# Set the storeType explicitly only if it is required not to use the default.
|
||||
# storeType: bluestore
|
||||
databaseSizeMB: "1024" # this value can be removed for environments with normal sized disks (100 GB or larger)
|
||||
journalSizeMB: "1024" # this value can be removed for environments with normal sized disks (20 GB or larger)
|
||||
# Cluster level list of directories to use for storage. These values will be set for all nodes that have no `directories` set.
|
||||
# directories:
|
||||
# - path: /rook/storage-dir
|
||||
# Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
|
||||
# nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
|
||||
# nodes:
|
||||
# - name: "172.17.4.101"
|
||||
# directories: # specific directories to use for storage can be specified for each node
|
||||
# - path: "/rook/storage-dir"
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: "500m"
|
||||
# memory: "1024Mi"
|
||||
# requests:
|
||||
# cpu: "500m"
|
||||
# memory: "1024Mi"
|
||||
# - name: "172.17.4.201"
|
||||
# devices: # specific devices to use for storage can be specified for each node
|
||||
# - name: "sdb"
|
||||
# - name: "sdc"
|
||||
# config: # configuration can be specified at the node level which overrides the cluster level config
|
||||
# storeType: filestore
|
||||
# - name: "172.17.4.301"
|
||||
# deviceFilter: "^sd."
|
||||
19
rook/dashboard-external.yaml
Normal file
19
rook/dashboard-external.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: rook-ceph-mgr-dashboard-external
|
||||
namespace: rook-ceph
|
||||
labels:
|
||||
app: rook-ceph-mgr
|
||||
rook_cluster: rook-ceph
|
||||
spec:
|
||||
ports:
|
||||
- name: dashboard
|
||||
port: 7000
|
||||
protocol: TCP
|
||||
targetPort: 7000
|
||||
selector:
|
||||
app: rook-ceph-mgr
|
||||
rook_cluster: rook-ceph
|
||||
sessionAffinity: None
|
||||
type: NodePort
|
||||
6
rook/deploy-rook.sh
Normal file
6
rook/deploy-rook.sh
Normal file
@@ -0,0 +1,6 @@
|
||||
kubectl apply -f operator.yaml
|
||||
sleep 30s
|
||||
kubectl apply -f cluster.yaml
|
||||
sleep 60s
|
||||
kubectl apply -f storageclass.yaml
|
||||
kubectl apply -f dashboard-external.yaml
|
||||
1
rook/get-ceph-dashboard-port.sh
Normal file
1
rook/get-ceph-dashboard-port.sh
Normal file
@@ -0,0 +1 @@
|
||||
kubectl -n rook-ceph get service | grep 'dashboard-external' | awk {'print $5'} | sed 's/\/TCP//g' | cut -d ':' -f 2
|
||||
360
rook/operator.yaml
Normal file
360
rook/operator.yaml
Normal file
@@ -0,0 +1,360 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: rook-ceph-system
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: clusters.ceph.rook.io
|
||||
spec:
|
||||
group: ceph.rook.io
|
||||
names:
|
||||
kind: Cluster
|
||||
listKind: ClusterList
|
||||
plural: clusters
|
||||
singular: cluster
|
||||
shortNames:
|
||||
- rcc
|
||||
scope: Namespaced
|
||||
version: v1beta1
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: filesystems.ceph.rook.io
|
||||
spec:
|
||||
group: ceph.rook.io
|
||||
names:
|
||||
kind: Filesystem
|
||||
listKind: FilesystemList
|
||||
plural: filesystems
|
||||
singular: filesystem
|
||||
shortNames:
|
||||
- rcfs
|
||||
scope: Namespaced
|
||||
version: v1beta1
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: objectstores.ceph.rook.io
|
||||
spec:
|
||||
group: ceph.rook.io
|
||||
names:
|
||||
kind: ObjectStore
|
||||
listKind: ObjectStoreList
|
||||
plural: objectstores
|
||||
singular: objectstore
|
||||
shortNames:
|
||||
- rco
|
||||
scope: Namespaced
|
||||
version: v1beta1
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: pools.ceph.rook.io
|
||||
spec:
|
||||
group: ceph.rook.io
|
||||
names:
|
||||
kind: Pool
|
||||
listKind: PoolList
|
||||
plural: pools
|
||||
singular: pool
|
||||
shortNames:
|
||||
- rcp
|
||||
scope: Namespaced
|
||||
version: v1beta1
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: volumes.rook.io
|
||||
spec:
|
||||
group: rook.io
|
||||
names:
|
||||
kind: Volume
|
||||
listKind: VolumeList
|
||||
plural: volumes
|
||||
singular: volume
|
||||
shortNames:
|
||||
- rv
|
||||
scope: Namespaced
|
||||
version: v1alpha2
|
||||
---
|
||||
# The cluster role for managing all the cluster-specific resources in a namespace
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: rook-ceph-cluster-mgmt
|
||||
labels:
|
||||
operator: rook
|
||||
storage-backend: ceph
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
- pods
|
||||
- services
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- patch
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
- replicasets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
---
|
||||
# The role for the operator to manage resources in the system namespace
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: rook-ceph-system
|
||||
namespace: rook-ceph-system
|
||||
labels:
|
||||
operator: rook
|
||||
storage-backend: ceph
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- patch
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- daemonsets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
---
|
||||
# The cluster role for managing the Rook CRDs
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: rook-ceph-global
|
||||
labels:
|
||||
operator: rook
|
||||
storage-backend: ceph
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
# Pod access is needed for fencing
|
||||
- pods
|
||||
# Node access is needed for determining nodes where mons should run
|
||||
- nodes
|
||||
- nodes/proxy
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
# PVs and PVCs are managed by the Rook provisioner
|
||||
- persistentvolumes
|
||||
- persistentvolumeclaims
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- patch
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- apiGroups:
|
||||
- storage.k8s.io
|
||||
resources:
|
||||
- storageclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- batch
|
||||
resources:
|
||||
- jobs
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ceph.rook.io
|
||||
resources:
|
||||
- "*"
|
||||
verbs:
|
||||
- "*"
|
||||
- apiGroups:
|
||||
- rook.io
|
||||
resources:
|
||||
- "*"
|
||||
verbs:
|
||||
- "*"
|
||||
---
|
||||
# The rook system service account used by the operator, agent, and discovery pods
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: rook-ceph-system
|
||||
namespace: rook-ceph-system
|
||||
labels:
|
||||
operator: rook
|
||||
storage-backend: ceph
|
||||
---
|
||||
# Grant the operator, agent, and discovery agents access to resources in the rook-ceph-system namespace
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: rook-ceph-system
|
||||
namespace: rook-ceph-system
|
||||
labels:
|
||||
operator: rook
|
||||
storage-backend: ceph
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: rook-ceph-system
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: rook-ceph-system
|
||||
namespace: rook-ceph-system
|
||||
---
|
||||
# Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: rook-ceph-global
|
||||
namespace: rook-ceph-system
|
||||
labels:
|
||||
operator: rook
|
||||
storage-backend: ceph
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: rook-ceph-global
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: rook-ceph-system
|
||||
namespace: rook-ceph-system
|
||||
---
|
||||
# The deployment for the rook operator
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: rook-ceph-operator
|
||||
namespace: rook-ceph-system
|
||||
labels:
|
||||
operator: rook
|
||||
storage-backend: ceph
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: rook-ceph-operator
|
||||
spec:
|
||||
serviceAccountName: rook-ceph-system
|
||||
containers:
|
||||
- name: rook-ceph-operator
|
||||
image: rook/ceph:v0.8.3
|
||||
args: ["ceph", "operator"]
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/rook
|
||||
name: rook-config
|
||||
- mountPath: /etc/ceph
|
||||
name: default-config-dir
|
||||
env:
|
||||
# To disable RBAC, uncomment the following:
|
||||
# - name: RBAC_ENABLED
|
||||
# value: "false"
|
||||
# Rook Agent toleration. Will tolerate all taints with all keys.
|
||||
# Choose between NoSchedule, PreferNoSchedule and NoExecute:
|
||||
# - name: AGENT_TOLERATION
|
||||
# value: "NoSchedule"
|
||||
# (Optional) Rook Agent toleration key. Set this to the key of the taint you want to tolerate
|
||||
# - name: AGENT_TOLERATION_KEY
|
||||
# value: "<KeyOfTheTaintToTolerate>"
|
||||
# Set the path where the Rook agent can find the flex volumes
|
||||
# - name: FLEXVOLUME_DIR_PATH
|
||||
# value: "<PathToFlexVolumes>"
|
||||
# Rook Discover toleration. Will tolerate all taints with all keys.
|
||||
# Choose between NoSchedule, PreferNoSchedule and NoExecute:
|
||||
# - name: DISCOVER_TOLERATION
|
||||
# value: "NoSchedule"
|
||||
# (Optional) Rook Discover toleration key. Set this to the key of the taint you want to tolerate
|
||||
# - name: DISCOVER_TOLERATION_KEY
|
||||
# value: "<KeyOfTheTaintToTolerate>"
|
||||
# Allow rook to create multiple file systems. Note: This is considered
|
||||
# an experimental feature in Ceph as described at
|
||||
# http://docs.ceph.com/docs/master/cephfs/experimental-features/#multiple-filesystems-within-a-ceph-cluster
|
||||
# which might cause mons to crash as seen in https://github.com/rook/rook/issues/1027
|
||||
- name: ROOK_ALLOW_MULTIPLE_FILESYSTEMS
|
||||
value: "false"
|
||||
# The logging level for the operator: INFO | DEBUG
|
||||
- name: ROOK_LOG_LEVEL
|
||||
value: "INFO"
|
||||
# The interval to check if every mon is in the quorum.
|
||||
- name: ROOK_MON_HEALTHCHECK_INTERVAL
|
||||
value: "45s"
|
||||
# The duration to wait before trying to failover or remove/replace the
|
||||
# current mon with a new mon (useful for compensating flapping network).
|
||||
- name: ROOK_MON_OUT_TIMEOUT
|
||||
value: "300s"
|
||||
# Whether to start pods as privileged that mount a host path, which includes the Ceph mon and osd pods.
|
||||
# This is necessary to workaround the anyuid issues when running on OpenShift.
|
||||
# For more details see https://github.com/rook/rook/issues/1314#issuecomment-355799641
|
||||
- name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED
|
||||
value: "false"
|
||||
# The name of the node to pass with the downward API
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
# The pod name to pass with the downward API
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
# The pod namespace to pass with the downward API
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumes:
|
||||
- name: rook-config
|
||||
emptyDir: {}
|
||||
- name: default-config-dir
|
||||
emptyDir: {}
|
||||
32
rook/other/createkeys.sh
Normal file
32
rook/other/createkeys.sh
Normal file
@@ -0,0 +1,32 @@
|
||||
#!/bin/bash
|
||||
mon_namespace="rook-ceph"
|
||||
mon_pod=$(kubectl -n "${mon_namespace}" get pods | grep mon0 | awk '{print $1}')
|
||||
|
||||
|
||||
function run_command () {
|
||||
local pod="${1}"
|
||||
local namespace="${2}"
|
||||
local command="${3}"
|
||||
|
||||
kubectl -n "${namespace}" exec -it "${pod}" -- ${command}
|
||||
}
|
||||
|
||||
function run_command_on_mon () {
|
||||
local command="${1}"
|
||||
run_command "${mon_pod}" "${mon_namespace}" "${command}"
|
||||
}
|
||||
|
||||
admin_key=$(run_command_on_mon "ceph auth get-key client.admin")
|
||||
kubectl create secret generic ceph-secret \
|
||||
--type="kubernetes.io/rbd" \
|
||||
--from-literal=key="${admin_key}" \
|
||||
--namespace=rook-ceph
|
||||
|
||||
|
||||
run_command_on_mon "ceph osd pool create kube 1024 1024"
|
||||
run_command_on_mon "ceph auth get-or-create client.kube mon 'allow r' osd 'allow rwx pool=kube"
|
||||
kube_key=$(run_command_on_mon "ceph auth get-key client.kube")
|
||||
kubectl create secret generic ceph-secret-kube \
|
||||
--type="kubernetes.io/rbd" \
|
||||
--from-literal=key="${kube_key}" \
|
||||
--namespace=rook-ceph
|
||||
22
rook/storageclass.yaml
Normal file
22
rook/storageclass.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: ceph.rook.io/v1beta1
|
||||
kind: Pool
|
||||
metadata:
|
||||
name: replicapool
|
||||
namespace: rook-ceph
|
||||
spec:
|
||||
replicated:
|
||||
size: 3
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: rook-ceph-block
|
||||
provisioner: ceph.rook.io/block
|
||||
parameters:
|
||||
pool: replicapool
|
||||
# The value of "clusterNamespace" MUST be the same as the one in which your rook cluster exist
|
||||
clusterNamespace: rook-ceph
|
||||
# Specify the filesystem type of the volume. If not specified, it will use `ext4`.
|
||||
fstype: xfs
|
||||
# Optional, default reclaimPolicy is "Delete". Other options are: "Retain", "Recycle" as documented in https://kubernetes.io/docs/concepts/storage/storage-classes/
|
||||
reclaimPolicy: Retain
|
||||
2
rook/wordpress-ceph/deploy-wordpress-ceph.sh
Normal file
2
rook/wordpress-ceph/deploy-wordpress-ceph.sh
Normal file
@@ -0,0 +1,2 @@
|
||||
kubectl apply -f mysql.yaml
|
||||
kubectl apply -f wordpress.yaml
|
||||
1
rook/wordpress-ceph/get-wordpress-port.sh
Normal file
1
rook/wordpress-ceph/get-wordpress-port.sh
Normal file
@@ -0,0 +1 @@
|
||||
kubectl get service | grep 'wordpress' | grep -v 'mysql' | awk {'print $5'} | sed 's/\/TCP//g' | cut -d ':' -f 2
|
||||
59
rook/wordpress-ceph/mysql.yaml
Normal file
59
rook/wordpress-ceph/mysql.yaml
Normal file
@@ -0,0 +1,59 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: wordpress-mysql
|
||||
labels:
|
||||
app: wordpress
|
||||
spec:
|
||||
ports:
|
||||
- port: 3306
|
||||
selector:
|
||||
app: wordpress
|
||||
tier: mysql
|
||||
clusterIP: None
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: mysql-pv-claim
|
||||
labels:
|
||||
app: wordpress
|
||||
spec:
|
||||
storageClassName: rook-ceph-block
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: wordpress-mysql
|
||||
labels:
|
||||
app: wordpress
|
||||
spec:
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: wordpress
|
||||
tier: mysql
|
||||
spec:
|
||||
containers:
|
||||
- image: mysql:5.6
|
||||
name: mysql
|
||||
env:
|
||||
- name: MYSQL_ROOT_PASSWORD
|
||||
value: changeme
|
||||
ports:
|
||||
- containerPort: 3306
|
||||
name: mysql
|
||||
volumeMounts:
|
||||
- name: mysql-persistent-storage
|
||||
mountPath: /var/lib/mysql
|
||||
volumes:
|
||||
- name: mysql-persistent-storage
|
||||
persistentVolumeClaim:
|
||||
claimName: mysql-pv-claim
|
||||
61
rook/wordpress-ceph/wordpress.yaml
Normal file
61
rook/wordpress-ceph/wordpress.yaml
Normal file
@@ -0,0 +1,61 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: wordpress
|
||||
labels:
|
||||
app: wordpress
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
selector:
|
||||
app: wordpress
|
||||
tier: frontend
|
||||
type: NodePort
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: wp-pv-claim
|
||||
labels:
|
||||
app: wordpress
|
||||
spec:
|
||||
storageClassName: rook-ceph-block
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: wordpress
|
||||
labels:
|
||||
app: wordpress
|
||||
spec:
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: wordpress
|
||||
tier: frontend
|
||||
spec:
|
||||
containers:
|
||||
- image: wordpress:4.6.1-apache
|
||||
name: wordpress
|
||||
env:
|
||||
- name: WORDPRESS_DB_HOST
|
||||
value: wordpress-mysql
|
||||
- name: WORDPRESS_DB_PASSWORD
|
||||
value: changeme
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: wordpress
|
||||
volumeMounts:
|
||||
- name: wordpress-persistent-storage
|
||||
mountPath: /var/www/html
|
||||
volumes:
|
||||
- name: wordpress-persistent-storage
|
||||
persistentVolumeClaim:
|
||||
claimName: wp-pv-claim
|
||||
Reference in New Issue
Block a user