Skip to content

Ceph — Commands & Recipes

Cluster Health

# Quick status
ceph status
ceph health detail

# OSD tree (disk layout)
ceph osd tree

# Disk usage
ceph df
ceph osd df tree

# PG status
ceph pg stat
ceph pg dump_stuck

Pool Management

# Create replicated pool
ceph osd pool create mypool 128 128 replicated
ceph osd pool set mypool size 3
ceph osd pool set mypool min_size 2

# Create erasure-coded pool
ceph osd pool create ecpool 128 128 erasure

# Enable application
ceph osd pool application enable mypool rbd

RBD (Block Storage)

# Create RBD image (100GB)
rbd create mypool/myimage --size 102400

# Map to kernel device
rbd device map mypool/myimage
mkfs.xfs /dev/rbd0
mount /dev/rbd0 /mnt/rbd

# Snapshot
rbd snap create mypool/myimage@snap1
rbd snap rollback mypool/myimage@snap1

RGW (Object Storage / S3)

# Create RGW user
radosgw-admin user create --uid=myuser --display-name="My User"
radosgw-admin user info --uid=myuser  # get access/secret keys

# S3 access test (aws cli)
aws --endpoint-url=http://rgw-host:7480 s3 mb s3://mybucket
aws --endpoint-url=http://rgw-host:7480 s3 cp file.txt s3://mybucket/

CephFS

# Create CephFS
ceph fs volume create myfs

# Mount (kernel client)
mount -t ceph mon1:/ /mnt/cephfs -o name=admin,secret=<key>

# Mount (FUSE)
ceph-fuse /mnt/cephfs

Deployment (Cephadm)

# Bootstrap new cluster
cephadm bootstrap --mon-ip 10.0.0.1

# Add hosts
ceph orch host add node2 10.0.0.2
ceph orch host add node3 10.0.0.3

# Deploy OSDs on all available disks
ceph orch apply osd --all-available-devices

# Deploy RGW
ceph orch apply rgw myrgw --placement="count:2"

# Deploy MDS (for CephFS)
ceph orch apply mds myfs --placement="count:2"

Troubleshooting

# Find slow OSDs
ceph osd perf

# Check CRUSH map
ceph osd crush dump | jq '.buckets'

# Recovery status
ceph pg dump | grep -i recovering

# Check for data inconsistency
ceph health detail | grep inconsistent

Sources