distribution/contrib/ceph/ci-setup.sh
Vincent Giersch 394eea0231 Storage Driver: Ceph Object Storage (RADOS)
This driver implements the storagedriver.StorageDriver interface and
uses Ceph Object Storage as storage backend.

Since RADOS is an object storage and no hierarchy notion, the
following convention is used to keep the filesystem notions stored in
this backend:

* All the objects data are stored with opaque UUID names prefixed
  (e.g. "blob:d3d232ff-ab3a-4046-9ab7-930228d4c164).
* All the hierarchy information are stored in rados omaps, where the
  omap object identifier is the virtual directory name, the keys in
  a specific are the relative filenames and the values the blob
  object identifier (or empty value for a sub directory).

  e.g. For the following hierarchy:

     /directory1
     /directory1/object1
     /directory1/object2
     /directory1/directory2/object3

  The omap "/directory1" will contains the following key / values:
    - "object1" "blob:d3d232ff-ab3a-4046-9ab7-930228d4c164"
    - "object2" "blob:db2e359d-4af0-4bfb-ba1d-d2fd029866a0"
    - "directory2" ""

  The omap "/directory1/directory2" will contains:
    - "object3" "blob:9ae2371c-81fc-4945-80ac-8bf7f566a5d9"

* The MOVE is implemented by changing the reference to a specific
  blob in its parent virtual directory omap.

This driver stripes rados objects to a fixed size (e.g. 4M). The idea
is to keep small objects (as done by RBD on the top of RADOS) that
will be easily synchronized accross OSDs. The information of the
original object (i.e total size of the chunks) is stored as a Xattr
in the first chunk object.

Signed-off-by: Vincent Giersch <vincent.giersch@ovh.net>
2015-05-20 01:44:34 +00:00

119 lines
2.5 KiB
Bash
Executable file

#! /bin/bash
#
# Ceph cluster setup in Circle CI
#
set -x
set -e
set -u
NODE=$(hostname)
CEPHDIR=/tmp/ceph
mkdir cluster
pushd cluster
# Install
retries=0
until [ $retries -ge 5 ]; do
pip install ceph-deploy && break
retries=$[$retries+1]
sleep 30
done
retries=0
until [ $retries -ge 5 ]; do
ceph-deploy install --release hammer $NODE && break
retries=$[$retries+1]
sleep 30
done
retries=0
until [ $retries -ge 5 ]; do
ceph-deploy pkg --install librados-dev $NODE && break
retries=$[$retries+1]
sleep 30
done
echo $(ip route get 1 | awk '{print $NF;exit}') $(hostname) >> /etc/hosts
ssh-keygen -t rsa -f ~/.ssh/id_rsa -q -N ""
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
ssh-keyscan $NODE >> ~/.ssh/known_hosts
ceph-deploy new $NODE
cat >> ceph.conf <<EOF
osd objectstore = memstore
memstore device bytes = 2147483648
osd data = $CEPHDIR
osd journal = $CEPHDIR/journal
osd crush chooseleaf type = 0
osd pool default size = 1
osd pool default min size = 1
osd scrub load threshold = 1000
debug_lockdep = 0/0
debug_context = 0/0
debug_crush = 0/0
debug_buffer = 0/0
debug_timer = 0/0
debug_filer = 0/0
debug_objecter = 0/0
debug_rados = 0/0
debug_rbd = 0/0
debug_journaler = 0/0
debug_objectcatcher = 0/0
debug_client = 0/0
debug_osd = 0/0
debug_optracker = 0/0
debug_objclass = 0/0
debug_filestore = 0/0
debug_journal = 0/0
debug_ms = 0/0
debug_monc = 0/0
debug_tp = 0/0
debug_auth = 0/0
debug_finisher = 0/0
debug_heartbeatmap = 0/0
debug_perfcounter = 0/0
debug_asok = 0/0
debug_throttle = 0/0
debug_mon = 0/0
debug_paxos = 0/0
debug_rgw = 0/0
osd_op_num_threads_per_shard = 1 //You may want to try with 1 as well
osd_op_num_shards = 5 //Depends on your cpu util
ms_nocrc = true
cephx_sign_messages = false
cephx_require_signatures = false
ms_dispatch_throttle_bytes = 0
throttler_perf_counter = false
[osd]
osd_client_message_size_cap = 0
osd_client_message_cap = 0
osd_enable_op_tracker = false
EOF
sed -i -r 's/mon_host =.*/mon_host = 127.0.0.1/' ceph.conf
sed -i -r 's/auth_cluster_required =.*/auth_cluster_required = none/' ceph.conf
sed -i -r 's/auth_service_required =.*/auth_service_required = none/' ceph.conf
sed -i -r 's/auth_client_required =.*/auth_client_required = none/' ceph.conf
# Setup monitor and keyrings
ceph-deploy mon create-initial $NODE
ceph-deploy admin $NODE
sudo chmod a+r /etc/ceph/ceph.client.admin.keyring
# Setup OSD
mkdir -p $CEPHDIR
OSD=$(ceph osd create)
ceph osd crush add osd.${OSD} 1 root=default host=$NODE
ceph-osd --id ${OSD} --mkjournal --mkfs
ceph-osd --id ${OSD}
# Status
ceph status
ceph health detail
ceph osd tree
popd