hnuzhoulin
12/19/2015 - 11:23 AM

ceph,cbt test

ceph,cbt test

cluster:
  head: "cbt"
  clients: ["client1","client2","client3"]
  osds: ["osd1","osd2","osd3"]
  mons: ["mon1","mon2","mon3"]
  osds_per_node: 1
  fs: xfs
  mkfs_opts: -f -i size=2048 -n size=64k
  mount_opts: -o inode64,noatime,logbsize=256k
  conf_file: /etc/ceph/ceph.conf
  ceph.conf: /etc/ceph/ceph.conf
  iterations: 3
  rebuild_every_test: False
  tmp_dir: "/tmp/cbt"
  pool_profiles:
    rbd:
      pg_size: 1024
      pgp_size: 1024
      replication: 'replicated'
  recovery_test:
      osds: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ]
benchmarks:
  librbdfio:
    time: 900
    vol_size: 65536
    mode: [ randwrite, randread, randrw ]
    rwmixread: 50
    op_size: [ 4096 ]
    procs_per_volume: [ 1 ]
    volumes_per_client: [ 10 ]
    iodepth: [ 32 ]
    osd_ra: [ 128 ]
    cmd_path: '/home/ceph-admin/fio/fio'
    pool_profile: 'rbd'
    log_avg_msec: 100