diff --git a/skel/bin/dcache b/skel/bin/dcache index abb6ff82efe..6028b5e0696 100755 --- a/skel/bin/dcache +++ b/skel/bin/dcache @@ -56,6 +56,7 @@ usage() echo " pool ls" echo " pool reconstruct " echo " pool yaml " + echo " pool benchmark [fio options] [ -- []]" echo " property [ []]" echo " restart []..." echo " services" @@ -611,6 +612,51 @@ case "$1" in ) | column ;; + benchmark) + fio --version >/dev/null 2>&1 || fail 1 "fio is not installed. Please install fio to run the benchmark command." + + fio_options="" + while [ $# -gt 0 ] + do + case "$1" in + --) + shift + break + ;; + *) + fio_options="$fio_options $1" + shift + ;; + esac + done + + [ $# -gt 1 ] && usage + + if [ $# -eq 1 ] + then + path="$1" + echo "" + echo " Running fio benchmark for directory ${path}" + echo "" + fio --directory=${path} ${fio_options} ${lib}/pool-benchmark.fio + else + for domain in $(getProperty dcache.domains); do + for cell in $(getProperty dcache.domain.cells "$domain"); do + service=$(getProperty dcache.domain.service "$domain" "$cell") + if [ "$service" = "pool" ]; then + name=$(getProperty pool.name "$domain" "$cell") + path=$(getProperty pool.path "$domain" "$cell") + + echo "" + echo " Running fio benchmark for pool $name@$domain." + echo "" + fio --directory=${path}/data ${fio_options} ${lib}/pool-benchmark.fio + fi + done + done + fi + ;; + *) usage ;; diff --git a/skel/man/man8/dcache.8 b/skel/man/man8/dcache.8 index 9be431164a1..a7cc0c42760 100644 --- a/skel/man/man8/dcache.8 +++ b/skel/man/man8/dcache.8 @@ -178,6 +178,14 @@ to manually replace the content of the meta directory of the pool with the reconstructed database. It is recommended to keep a copy of the old database. +.TP +.B pool benchmark [fio options ] [ -- directory] +Run filesystem benchmarks on the file system containing the pool's \fBdata directory. +The benchmark uses the fio tool. The options passed to the command are passed directly to fio. +If a path is specified, the benchmark is run on specified directory only. Otherwise, +the benchmark is run on all file systems for all configured pools. +By default, the output of the benchmark is written to standard out. + .SH DATABASE MANAGEMENT COMMANDS Several services in dCache rely on relational databases. The commands diff --git a/skel/share/lib/pool-benchmark.fio b/skel/share/lib/pool-benchmark.fio new file mode 100644 index 00000000000..5b58b88adb9 --- /dev/null +++ b/skel/share/lib/pool-benchmark.fio @@ -0,0 +1,83 @@ +; +; FIO workload to benchmark disk performance +; +; block verification information is forced on write, which is +; used during read workloads. +; +; +; Run sequential write, sequential read and random read +; +; +; NOTE: ALL READ JOBS DELETE TEST FILE AFTER BENCHMARK +; + +[global] +filename_format=fio-test-$jobnum-$filenum +; file size +size=10g +ioengine=libaio +; do not pre-create files +create_on_open=1 +; checksum type +verify=sha1 +; fail on first checksum error +verify_fatal=1 +; invalidate page cache before running jobs +invalidate=1 + +[seqwrite] +description=Streaming WRITE +rw=write +; do not check sum during write +do_verify=0 +; block until job is done before jumping to the next section +stonewall + +[seqread] +description=Streaming READ +rw=read +; check checksum during read +do_verify=1 +; block until job is done before jumping to the next section +stonewall + +[randread] +description=Random READ +rw=randread +; inremental random offest +rw_sequencer=sequential +; check checksum during read +do_verify=1 +; block until job is done before jumping to the next section +stonewall +; remove file at the end of the test +unlink=1 + +[concurrent_write] +description=Concurrent Streaming WRITE +rw=write +; do not check sum during write +do_verify=0 +; block until job is done before jumping to the next section +stonewall +; write with multiple threads +numjobs=24 +thread=1 +group_reporting=1 + +[concurrent_randread] +description=Concurrent Random READ +rw=randread +rw=read +; check checksum during read +do_verify=1 +; block until job is done before jumping to the next section +stonewall +; read with multiple threads +numjobs=24 +thread=1 +group_reporting=1 +; remove file at the end of the test +unlink=1 + +; -- end job file --