diff --git a/tests/zfs-tests/Makefile.am b/tests/zfs-tests/Makefile.am index 40a361d582a2..8a4b13d0acbb 100644 --- a/tests/zfs-tests/Makefile.am +++ b/tests/zfs-tests/Makefile.am @@ -42,6 +42,7 @@ scripts_zfs_tests_includedir = $(datadir)/$(PACKAGE)/zfs-tests/include dist_scripts_zfs_tests_include_DATA = \ %D%/include/blkdev.shlib \ %D%/include/commands.cfg \ + %D%/include/kstat.shlib \ %D%/include/libtest.shlib \ %D%/include/math.shlib \ %D%/include/properties.shlib \ diff --git a/tests/zfs-tests/include/kstat.shlib b/tests/zfs-tests/include/kstat.shlib new file mode 100644 index 000000000000..c7615760592f --- /dev/null +++ b/tests/zfs-tests/include/kstat.shlib @@ -0,0 +1,516 @@ +# +# CDDL HEADER START +# +# The contents of this file are subject to the terms of the +# Common Development and Distribution License (the "License"). +# You may not use this file except in compliance with the License. +# +# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE +# or https://opensource.org/licenses/CDDL-1.0. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When distributing Covered Code, include this CDDL HEADER in each +# file and include the License file at usr/src/OPENSOLARIS.LICENSE. +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END +# + +# +# Copyright (c) 2025, Klara, Inc. +# + +# +# This file provides the following helpers to read kstats from tests. +# +# kstat [-g] +# kstat_pool [-g] +# kstat_dataset [-N] +# +# `kstat` and `kstat_pool` return the value of of the given , either +# a global or pool-specific state. +# +# $ kstat dbgmsg +# timestamp message +# 1736848201 spa_history.c:304:spa_history_log_sync(): txg 14734896 ... +# 1736848201 spa_history.c:330:spa_history_log_sync(): ioctl ... +# ... +# +# $ kstat_pool garden state +# ONLINE +# +# To get a single stat within a group or collection, separate the name with +# '.' characters. +# +# $ kstat dbufstats.cache_target_bytes +# 3215780693 +# +# $ kstat_pool crayon iostats.arc_read_bytes +# 253671670784 +# +# -g is "group" mode. If the kstat is a group or collection, all stats in that +# group are returned, one stat per line, key and value separated by a space. +# +# $ kstat -g dbufstats +# cache_count 1792 +# cache_size_bytes 87720376 +# cache_size_bytes_max 305187768 +# cache_target_bytes 97668555 +# ... +# +# $ kstat_pool -g crayon iostats +# trim_extents_written 0 +# trim_bytes_written 0 +# trim_extents_skipped 0 +# trim_bytes_skipped 0 +# ... +# +# `kstat_dataset` accesses the per-dataset group kstat. The dataset can be +# specified by name: +# +# $ kstat_dataset crayon/home/robn nunlinks +# 2628514 +# +# or, with the -N switch, as /: +# +# $ kstat_dataset -N crayon/7 writes +# 125135 +# + +#################### +# Public interface + +# +# kstat [-g] +# +function kstat +{ + typeset -i want_group=0 + + OPTIND=1 + while getopts "g" opt ; do + case $opt in + 'g') want_group=1 ;; + *) log_fail "kstat: invalid option '$opt'" ;; + esac + done + shift $(expr $OPTIND - 1) + + typeset stat=$1 + + $_kstat_os 'global' '' "$stat" $want_group +} + +# +# kstat_pool [-g] +# +function kstat_pool +{ + typeset -i want_group=0 + + OPTIND=1 + while getopts "g" opt ; do + case $opt in + 'g') want_group=1 ;; + *) log_fail "kstat_pool: invalid option '$opt'" ;; + esac + done + shift $(expr $OPTIND - 1) + + typeset pool=$1 + typeset stat=$2 + + $_kstat_os 'pool' "$pool" "$stat" $want_group +} + +# +# kstat_dataset [-N] +# +function kstat_dataset +{ + typeset -i opt_objsetid=0 + + OPTIND=1 + while getopts "N" opt ; do + case $opt in + 'N') opt_objsetid=1 ;; + *) log_fail "kstat_dataset: invalid option '$opt'" ;; + esac + done + shift $(expr $OPTIND - 1) + + typeset dsarg=$1 + typeset stat=$2 + + if [[ $opt_objsetid == 0 ]] ; then + typeset pool="${dsarg%%/*}" # clear first / -> end + typeset objsetid=$($_resolve_dsname_os "$pool" "$dsarg") + if [[ -z "$objsetid" ]] ; then + log_fail "kstat_dataset: dataset not found: $dsarg" + fi + dsarg="$pool/$objsetid" + fi + + $_kstat_os 'dataset' "$dsarg" "$stat" 0 +} + +#################### +# Platform-specific interface + +# +# Implementation notes +# +# There's not a lot of uniformity between platforms, so I've written to a rough +# imagined model that seems to fit the majority of OpenZFS kstats. +# +# The main platform entry points look like this: +# +# _kstat_freebsd +# _kstat_linux +# +# - scope: one of 'global', 'pool', 'dataset'. The "kind" of object the kstat +# is attached to. +# - object: name of the scoped object +# global: empty string +# pool: pool name +# dataset: / pair +# - stat: kstat name to get +# - want_group: 0 to get the single value for the kstat, 1 to treat the kstat +# as a group and get all the stat names+values under it. group +# kstats cannot have values, and stat kstats cannot have +# children (by definition) +# +# Stat values can have multiple lines, so be prepared for those. +# +# These functions either succeed and produce the requested output, or call +# log_fail. They should never output empty, or 0, or anything else. +# +# Output: +# +# - want_group=0: the single stat value, followed by newline +# - want_group=1: One stat per line, +# + +# +# To support kstat_dataset(), platforms also need to provide a dataset +# name->object id resolver function. +# +# _resolve_dsname_freebsd +# _resolve_dsname_linux +# +# - pool: pool name. always the first part of the dataset name +# - dsname: dataset name, in the standard // format. +# +# Output is . objsetID is a decimal integer, > 0 +# + +#################### +# FreeBSD + +# +# All kstats are accessed through sysctl. We model "groups" as interior nodes +# in the stat tree, which are normally opaque. Because sysctl has no filtering +# options, and requesting any node produces all nodes below it, we have to +# always get the name and value, and then consider the output to understand +# if we got a group or a single stat, and post-process accordingly. +# +# Scopes are mostly mapped directly to known locations in the tree, but there +# are a handful of stats that are out of position, so we need to adjust. +# + +# +# _kstat_freebsd +# +function _kstat_freebsd +{ + typeset scope=$1 + typeset obj=$2 + typeset stat=$3 + typeset -i want_group=$4 + + typeset oid="" + case "$scope" in + global) + oid="kstat.zfs.misc.$stat" + ;; + pool) + # For reasons unknown, the "multihost", "txgs" and "reads" + # pool-specific kstats are directly under kstat.zfs., + # rather than kstat.zfs..misc like the other pool kstats. + # Adjust for that here. + case "$stat" in + multihost|txgs|reads) + oid="kstat.zfs.$obj.$stat" + ;; + *) + oid="kstat.zfs.$obj.misc.$stat" + ;; + esac + ;; + dataset) + typeset pool="" + typeset -i objsetid=0 + _split_pool_objsetid $obj pool objsetid + oid=$(printf 'kstat.zfs.%s.dataset.objset-0x%x.%s' \ + $pool $objsetid $stat) + ;; + esac + + # Calling sysctl on a "group" node will return everything under that + # node, so we have to inspect the first line to make sure we are + # getting back what we expect. For a single value, the key will have + # the name we requested, while for a group, the key will not have the + # name (group nodes are "opaque", not returned by sysctl by default. + + if [[ $want_group == 0 ]] ; then + sysctl -e "$oid" | awk -v oid="$oid" -v oidre="^$oid=" ' + NR == 1 && $0 !~ oidre { exit 1 } + NR == 1 { print substr($0, length(oid)+2) ; next } + { print } + ' + else + sysctl -e "$oid" | awk -v oid="$oid" -v oidre="^$oid=" ' + NR == 1 && $0 ~ oidre { exit 2 } + { + sub("^" oid "\.", "") + sub("=", " ") + print + } + ' + fi + + typeset -i err=$? + case $err in + 0) return ;; + 1) log_fail "kstat: can't get value for group kstat: $oid" ;; + 2) log_fail "kstat: not a group kstat: $oid" ;; + esac + + log_fail "kstat: unknown error: $oid" +} + +# +# _resolve_dsname_freebsd +# +function _resolve_dsname_freebsd +{ + # we're searching for: + # + # kstat.zfs.shed.dataset.objset-0x8087.dataset_name: shed/poudriere + # + # We split on '.', then get the hex objsetid from field 5. + # + # We convert hex to decimal in the shell because there isn't a _simple_ + # portable way to do it in awk and this code is already too intense to + # do it a complicated way. + typeset pool=$1 + typeset dsname=$2 + sysctl -e kstat.zfs.$pool | \ + awk -F '.' -v dsnamere="=$dsname$" ' + /\.objset-0x[0-9a-f]+\.dataset_name=/ && $6 ~ dsnamere { + print substr($5, 8) + exit + } + ' | xargs printf %d +} + +#################### +# Linux + +# +# kstats all live under /proc/spl/kstat/zfs. They have a flat structure: global +# at top-level, pool in a directory, and dataset in a objset- file inside the +# pool dir. +# +# Groups are challenge. A single stat can be the entire text of a file, or +# a single line that must be extracted from a "group" file. The only way to +# recognise a group from the outside is to look for its header. This naturally +# breaks if a raw file had a matching header, or if a group file chooses to +# hid its header. Fortunately OpenZFS does none of these things at the moment. +# + +# +# _kstat_linux +# +function _kstat_linux +{ + typeset scope=$1 + typeset obj=$2 + typeset stat=$3 + typeset -i want_group=$4 + + typeset singlestat="" + + if [[ $scope == 'dataset' ]] ; then + typeset pool="" + typeset -i objsetid=0 + _split_pool_objsetid $obj pool objsetid + stat=$(printf 'objset-0x%x.%s' $objsetid $stat) + obj=$pool + scope='pool' + fi + + typeset path="" + if [[ $scope == 'global' ]] ; then + path="/proc/spl/kstat/zfs/$stat" + else + path="/proc/spl/kstat/zfs/$obj/$stat" + fi + + if [[ ! -e "$path" && $want_group -eq 0 ]] ; then + # This single stat doesn't have its own file, but the wanted + # stat could be in a group kstat file, which we now need to + # find. To do this, we split a single stat name into two parts: + # the file that would contain the stat, and the key within that + # file to match on. This works by converting all bar the last + # '.' separator to '/', then splitting on the remaining '.' + # separator. If there are no '.' separators, the second arg + # returned will be empty. + # + # foo -> (foo) + # foo.bar -> (foo, bar) + # foo.bar.baz -> (foo/bar, baz) + # foo.bar.baz.quux -> (foo/bar/baz, quux) + # + # This is how we will target single stats within a larger NAMED + # kstat file, eg dbufstats.cache_target_bytes. + typeset -a split=($(echo "$stat" | \ + sed -E 's/^(.+)\.([^\.]+)$/\1 \2/ ; s/\./\//g')) + typeset statfile=${split[0]} + singlestat=${split[1]:-""} + + if [[ $scope == 'global' ]] ; then + path="/proc/spl/kstat/zfs/$statfile" + else + path="/proc/spl/kstat/zfs/$obj/$statfile" + fi + fi + if [[ ! -r "$path" ]] ; then + log_fail "kstat: can't read $path" + fi + + if [[ $want_group == 1 ]] ; then + # "group" (NAMED) kstats on Linux start: + # + # $ cat /proc/spl/kstat/zfs/crayon/iostats + # 70 1 0x01 26 7072 8577844978 661416318663496 + # name type data + # trim_extents_written 4 0 + # trim_bytes_written 4 0 + # + # The second value on the first row is the ks_type. Group + # mode only works for type 1, KSTAT_TYPE_NAMED. So we check + # for that, and eject if it's the wrong type. Otherwise, we + # skip the header row and process the values. + awk ' + NR == 1 && ! /^[0-9]+ 1 / { exit 2 } + NR < 3 { next } + { print $1 " " $NF } + ' "$path" + elif [[ -n $singlestat ]] ; then + # single stat. must be a single line within a group stat, so + # we look for the header again as above. + awk -v singlestat="$singlestat" \ + -v singlestatre="^$singlestat " ' + NR == 1 && /^[0-9]+ [^1] / { exit 2 } + NR < 3 { next } + $0 ~ singlestatre { print $NF ; exit 0 } + ENDFILE { exit 3 } + ' "$path" + else + # raw stat. dump contents, exclude group stats + awk ' + NR == 1 && /^[0-9]+ 1 / { exit 1 } + { print } + ' "$path" + fi + + typeset -i err=$? + case $err in + 0) return ;; + 1) log_fail "kstat: can't get value for group kstat: $path" ;; + 2) log_fail "kstat: not a group kstat: $path" ;; + 3) log_fail "kstat: stat not found in group: $path $singlestat" ;; + esac + + log_fail "kstat: unknown error: $path" +} + +# +# _resolve_dsname_linux +# +function _resolve_dsname_linux +{ + # We look inside all: + # + # /proc/spl/kstat/zfs/crayon/objset-0x113 + # + # and check the dataset_name field inside. If we get a match, we split + # the filename on /, then extract the hex objsetid. + # + # We convert hex to decimal in the shell because there isn't a _simple_ + # portable way to do it in awk and this code is already too intense to + # do it a complicated way. + typeset pool=$1 + typeset dsname=$2 + awk -v dsname="$dsname" ' + $1 == "dataset_name" && $3 == dsname { + split(FILENAME, a, "/") + print substr(a[7], 8) + exit + } + ' /proc/spl/kstat/zfs/$pool/objset-0x* | xargs printf %d +} + +#################### + +# +# _split_pool_objsetid <*pool> <*objsetid> +# +# Splits pool/objsetId string in and fills and . +# +function _split_pool_objsetid +{ + typeset obj=$1 + typeset -n pool=$2 + typeset -n objsetid=$3 + + pool="${obj%%/*}" # clear first / -> end + typeset osidarg="${obj#*/}" # clear start -> first / + + # ensure objsetid arg does not contain a /. we're about to convert it, + # but ksh will treat it as an expression, and a / will give a + # divide-by-zero + if [[ "${osidarg%%/*}" != "$osidarg" ]] ; then + log_fail "kstat: invalid objsetid: $osidarg" + fi + + typeset -i id=$osidarg + if [[ $id -le 0 ]] ; then + log_fail "kstat: invalid objsetid: $osidarg" + fi + objsetid=$id +} + +#################### + +# +# Per-platform function selection. +# +# To avoid needing platform check throughout, we store the names of the +# platform functions and call through them. +# +if is_freebsd ; then + _kstat_os='_kstat_freebsd' + _resolve_dsname_os='_resolve_dsname_freebsd' +elif is_linux ; then + _kstat_os='_kstat_linux' + _resolve_dsname_os='_resolve_dsname_linux' +else + _kstat_os='_kstat_unknown_platform_implement_me' + _resolve_dsname_os='_resolve_dsname_unknown_platform_implement_me' +fi + diff --git a/tests/zfs-tests/include/libtest.shlib b/tests/zfs-tests/include/libtest.shlib index 9cf919c3dd0f..0b6c675cdd2c 100644 --- a/tests/zfs-tests/include/libtest.shlib +++ b/tests/zfs-tests/include/libtest.shlib @@ -28,6 +28,7 @@ # Copyright (c) 2017, Datto Inc. All rights reserved. # Copyright (c) 2017, Open-E Inc. All rights reserved. # Copyright (c) 2021, The FreeBSD Foundation. +# Copyright (c) 2025, Klara, Inc. # Use is subject to license terms. # @@ -37,6 +38,7 @@ . ${STF_SUITE}/include/math.shlib . ${STF_SUITE}/include/blkdev.shlib + # On AlmaLinux 9 we will see $PWD = '.' instead of the full path. This causes # some tests to fail. Fix it up here. if [ "$PWD" = "." ] ; then @@ -3662,41 +3664,6 @@ function ls_xattr # path esac } -function kstat # stat flags? -{ - typeset stat=$1 - typeset flags=${2-"-n"} - - case "$UNAME" in - FreeBSD) - sysctl $flags kstat.zfs.misc.$stat - ;; - Linux) - cat "/proc/spl/kstat/zfs/$stat" 2>/dev/null - ;; - *) - false - ;; - esac -} - -function get_arcstat # stat -{ - typeset stat=$1 - - case "$UNAME" in - FreeBSD) - kstat arcstats.$stat - ;; - Linux) - kstat arcstats | awk "/$stat/"' { print $3 }' - ;; - *) - false - ;; - esac -} - function punch_hole # offset length file { typeset offset=$1 @@ -3748,9 +3715,9 @@ function arcstat_quiescence # stat echo fi while $do_once || [ $stat1 -ne $stat2 ] || [ $stat2 -eq 0 ]; do - typeset stat1=$(get_arcstat $stat) + typeset stat1=$(kstat arcstats.$stat) sleep 0.5 - typeset stat2=$(get_arcstat $stat) + typeset stat2=$(kstat arcstats.$stat) do_once=false done @@ -3916,3 +3883,5 @@ function pop_coredump_pattern ;; esac } + +. ${STF_SUITE}/include/kstat.shlib diff --git a/tests/zfs-tests/tests/functional/arc/dbufstats_001_pos.ksh b/tests/zfs-tests/tests/functional/arc/dbufstats_001_pos.ksh index e51cf179d8ef..552a27e98102 100755 --- a/tests/zfs-tests/tests/functional/arc/dbufstats_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/arc/dbufstats_001_pos.ksh @@ -29,8 +29,8 @@ # # DESCRIPTION: -# Ensure stats presented in /proc/spl/kstat/zfs/dbufstats are correct -# based on /proc/spl/kstat/zfs/dbufs. +# Ensure stats presented in the dbufstats kstat are correct based on the +# dbufs kstat. # # STRATEGY: # 1. Generate a file with random data in it @@ -55,12 +55,7 @@ function testdbufstat # stat_name dbufstat_filter [[ -n "$2" ]] && filter="-F $2" - if is_linux; then - read -r _ _ from_dbufstat _ < <(grep -w "$name" "$DBUFSTATS_FILE") - else - from_dbufstat=$(awk "/dbufstats\.$name:/ { print \$2 }" \ - "$DBUFSTATS_FILE") - fi + from_dbufstat=$(grep "^$name " "$DBUFSTATS_FILE" | cut -f2 -d' ') from_dbufs=$(dbufstat -bxn -i "$DBUFS_FILE" "$filter" | wc -l) within_tolerance $from_dbufstat $from_dbufs 15 \ @@ -77,7 +72,7 @@ log_must file_write -o create -f "$TESTDIR/file" -b 1048576 -c 20 -d R sync_all_pools log_must eval "kstat dbufs > $DBUFS_FILE" -log_must eval "kstat dbufstats '' > $DBUFSTATS_FILE" +log_must eval "kstat -g dbufstats > $DBUFSTATS_FILE" for level in {0..11}; do testdbufstat "cache_level_$level" "dbc=1,level=$level" diff --git a/tests/zfs-tests/tests/functional/cache/cache_012_pos.ksh b/tests/zfs-tests/tests/functional/cache/cache_012_pos.ksh index 20498440bea7..b8deafc5b30c 100755 --- a/tests/zfs-tests/tests/functional/cache/cache_012_pos.ksh +++ b/tests/zfs-tests/tests/functional/cache/cache_012_pos.ksh @@ -96,9 +96,9 @@ export RUNTIME=1 typeset do_once=true while $do_once || [[ $l2_size1 -le $l2_size2 ]]; do - typeset l2_size1=$(get_arcstat l2_size) + typeset l2_size1=$(kstat arcstats.l2_size) log_must fio $FIO_SCRIPTS/random_reads.fio - typeset l2_size2=$(get_arcstat l2_size) + typeset l2_size2=$(kstat arcstats.l2_size) do_once=false done diff --git a/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_objset_id.ksh b/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_objset_id.ksh index fdda9ba22638..9d147f382042 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_objset_id.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_objset_id.ksh @@ -31,10 +31,9 @@ # 7. Run zdb -dddddd pool/objsetID objectID (hex) # 8. Confirm names # 9. Repeat with zdb -NNNNNN pool/objsetID objectID -# 10. Obtain objsetID from /proc/spl/kstat/zfs/testpool/obset-0x -# (linux only) +# 10. Obtain dataset name from testpool.objset-0x.dataset_name kstat # 11. Run zdb -dddddd pool/objsetID (hex) -# 12. Match name from zdb against proc entry +# 12. Match name from zdb against kstat # 13. Create dataset with hex numeric name # 14. Create dataset with decimal numeric name # 15. zdb -d for numeric datasets succeeds @@ -68,7 +67,7 @@ log_note "file $init_data has object number $obj" sync_pool $TESTPOOL IFS=", " read -r _ _ _ _ objset_id _ < <(zdb -d $TESTPOOL/$TESTFS) -objset_hex=$(printf "0x%X" $objset_id) +objset_hex=$(printf "0x%x" $objset_id) log_note "objset $TESTPOOL/$TESTFS has objset ID $objset_id ($objset_hex)" for id in "$objset_id" "$objset_hex" @@ -89,13 +88,9 @@ do log_fail "zdb -NNNNNN $TESTPOOL/$id $obj failed (file1 not in zdb output)" done -if is_linux; then - output=$(ls -1 /proc/spl/kstat/zfs/$TESTPOOL | grep objset- | tail -1) - objset_hex=${output#*-} - name_from_proc=$(grep dataset_name /proc/spl/kstat/zfs/$TESTPOOL/$output | cut -d' ' -f3) - log_note "checking zdb output for $name_from_proc" - log_must eval "zdb -dddddd $TESTPOOL/$objset_hex | grep -q \"$name_from_proc\"" -fi +name_from_proc=$(kstat_dataset -N $TESTPOOL/$objset_id dataset_name) +log_note "checking zdb output for $name_from_proc" +log_must eval "zdb -dddddd $TESTPOOL/$objset_hex | grep -q \"$name_from_proc\"" log_must zfs create $hex_ds log_must zfs create $num_ds diff --git a/tests/zfs-tests/tests/functional/compression/l2arc_compressed_arc.ksh b/tests/zfs-tests/tests/functional/compression/l2arc_compressed_arc.ksh index 1d3cbfc79ee6..1eded81101c1 100755 --- a/tests/zfs-tests/tests/functional/compression/l2arc_compressed_arc.ksh +++ b/tests/zfs-tests/tests/functional/compression/l2arc_compressed_arc.ksh @@ -83,12 +83,12 @@ log_must truncate -s ${cache_sz}M $VDEV_CACHE log_must zpool create -O compression=lz4 -f $TESTPOOL-l2arc $VDEV cache $VDEV_CACHE -l2_cksum_bad_start=$(get_arcstat l2_cksum_bad) +l2_cksum_bad_start=$(kstat arcstats.l2_cksum_bad) log_must fio $FIO_SCRIPTS/mkfiles.fio log_must fio $FIO_SCRIPTS/random_reads.fio -l2_cksum_bad_end=$(get_arcstat l2_cksum_bad) +l2_cksum_bad_end=$(kstat arcstats.l2_cksum_bad) log_note "L2ARC Failed Checksums before: $l2_cksum_bad_start After:"\ "$l2_cksum_bad_end" diff --git a/tests/zfs-tests/tests/functional/compression/l2arc_compressed_arc_disabled.ksh b/tests/zfs-tests/tests/functional/compression/l2arc_compressed_arc_disabled.ksh index c8f4111744eb..b08f8dccc845 100755 --- a/tests/zfs-tests/tests/functional/compression/l2arc_compressed_arc_disabled.ksh +++ b/tests/zfs-tests/tests/functional/compression/l2arc_compressed_arc_disabled.ksh @@ -83,12 +83,12 @@ log_must truncate -s ${cache_sz}M $VDEV_CACHE log_must zpool create -O compression=lz4 -f $TESTPOOL-l2arc $VDEV cache $VDEV_CACHE -l2_cksum_bad_start=$(get_arcstat l2_cksum_bad) +l2_cksum_bad_start=$(kstat arcstats.l2_cksum_bad) log_must fio $FIO_SCRIPTS/mkfiles.fio log_must fio $FIO_SCRIPTS/random_reads.fio -l2_cksum_bad_end=$(get_arcstat l2_cksum_bad) +l2_cksum_bad_end=$(kstat arcstats.l2_cksum_bad) log_note "L2ARC Failed Checksums before: $l2_cksum_bad_start After:"\ "$l2_cksum_bad_end" diff --git a/tests/zfs-tests/tests/functional/compression/l2arc_encrypted.ksh b/tests/zfs-tests/tests/functional/compression/l2arc_encrypted.ksh index 460c95bb6051..8da3441330a6 100755 --- a/tests/zfs-tests/tests/functional/compression/l2arc_encrypted.ksh +++ b/tests/zfs-tests/tests/functional/compression/l2arc_encrypted.ksh @@ -88,12 +88,12 @@ log_must eval "echo $PASSPHRASE | zfs create -o compression=zstd " \ "-o encryption=on -o keyformat=passphrase -o keylocation=prompt " \ "$TESTPOOL-l2arc/encrypted" -l2_cksum_bad_start=$(get_arcstat l2_cksum_bad) +l2_cksum_bad_start=$(kstat arcstats.l2_cksum_bad) log_must fio $FIO_SCRIPTS/mkfiles.fio log_must fio $FIO_SCRIPTS/random_reads.fio -l2_cksum_bad_end=$(get_arcstat l2_cksum_bad) +l2_cksum_bad_end=$(kstat arcstats.l2_cksum_bad) log_note "L2ARC Failed Checksums before: $l2_cksum_bad_start After:"\ "$l2_cksum_bad_end" diff --git a/tests/zfs-tests/tests/functional/compression/l2arc_encrypted_no_compressed_arc.ksh b/tests/zfs-tests/tests/functional/compression/l2arc_encrypted_no_compressed_arc.ksh index 2f352e2af5d4..e571016f6e2a 100755 --- a/tests/zfs-tests/tests/functional/compression/l2arc_encrypted_no_compressed_arc.ksh +++ b/tests/zfs-tests/tests/functional/compression/l2arc_encrypted_no_compressed_arc.ksh @@ -88,12 +88,12 @@ log_must eval "echo $PASSPHRASE | zfs create -o compression=zstd " \ "-o encryption=on -o keyformat=passphrase -o keylocation=prompt " \ "$TESTPOOL-l2arc/encrypted" -l2_cksum_bad_start=$(get_arcstat l2_cksum_bad) +l2_cksum_bad_start=$(kstat arcstats.l2_cksum_bad) log_must fio $FIO_SCRIPTS/mkfiles.fio log_must fio $FIO_SCRIPTS/random_reads.fio -l2_cksum_bad_end=$(get_arcstat l2_cksum_bad) +l2_cksum_bad_end=$(kstat arcstats.l2_cksum_bad) log_note "L2ARC Failed Checksums before: $l2_cksum_bad_start After:"\ "$l2_cksum_bad_end" diff --git a/tests/zfs-tests/tests/functional/deadman/deadman_sync.ksh b/tests/zfs-tests/tests/functional/deadman/deadman_sync.ksh index f1561b7282e5..5c165523fefd 100755 --- a/tests/zfs-tests/tests/functional/deadman/deadman_sync.ksh +++ b/tests/zfs-tests/tests/functional/deadman/deadman_sync.ksh @@ -73,11 +73,7 @@ log_must zinject -c all sync_all_pools # Log txg sync times for reference and the zpool event summary. -if is_freebsd; then - log_must sysctl -n kstat.zfs.$TESTPOOL.txgs -else - log_must cat /proc/spl/kstat/zfs/$TESTPOOL/txgs -fi +log_must kstat_pool $TESTPOOL txgs log_must zpool events # Verify at least 3 deadman events were logged. The first after 5 seconds, diff --git a/tests/zfs-tests/tests/functional/direct/dio.kshlib b/tests/zfs-tests/tests/functional/direct/dio.kshlib index 5b3f893e1ce1..49c43a0aaca3 100644 --- a/tests/zfs-tests/tests/functional/direct/dio.kshlib +++ b/tests/zfs-tests/tests/functional/direct/dio.kshlib @@ -140,29 +140,6 @@ function check_dio_chksum_verify_failures # pool vdev_type op expect_errors } -# -# Get the value of a counter from -# Linux: /proc/spl/kstat/zfs/$pool/iostats file. -# FreeBSD: kstat.zfs.$pool.msic.iostats.$stat -# -function get_iostats_stat # pool stat -{ - typeset pool=$1 - typeset stat=$2 - - if is_linux; then - iostats_file=/proc/spl/kstat/zfs/$pool/iostats - val=$(grep -m1 "$stat" $iostats_file | awk '{ print $3 }') - else - val=$(sysctl -n kstat.zfs.$pool.misc.iostats.$stat) - fi - if [[ -z "$val" ]]; then - log_fail "Unable to read $stat counter" - fi - - echo "$val" -} - # # Evict any buffered blocks by overwritting them using an O_DIRECT request. # @@ -190,17 +167,13 @@ function verify_dio_write_count #pool bs size mnpnt log_note "Checking for $dio_wr_expected Direct I/O writes" - prev_dio_wr=$(get_iostats_stat $pool direct_write_count) + prev_dio_wr=$(kstat_pool $pool iostats.direct_write_count) dio_and_verify write $size $bs $mntpnt "sync" - curr_dio_wr=$(get_iostats_stat $pool direct_write_count) + curr_dio_wr=$(kstat_pool $pool iostats.direct_write_count) dio_wr_actual=$((curr_dio_wr - prev_dio_wr)) if [[ $dio_wr_actual -lt $dio_wr_expected ]]; then - if is_linux; then - cat /proc/spl/kstat/zfs/$pool/iostats - else - sysctl kstat.zfs.$pool.misc.iostats - fi + kstat_pool -g $pool iostats log_fail "Direct writes $dio_wr_actual of $dio_wr_expected" fi } @@ -223,33 +196,25 @@ function check_write # pool file bs count seek flags buf_wr dio_wr log_note "Checking $count * $bs write(s) at offset $seek, $flags" - prev_buf_wr=$(get_iostats_stat $pool arc_write_count) - prev_dio_wr=$(get_iostats_stat $pool direct_write_count) + prev_buf_wr=$(kstat_pool $pool iostats.arc_write_count) + prev_dio_wr=$(kstat_pool $pool iostats.direct_write_count) log_must stride_dd -i /dev/urandom -o $file -b $bs -c $count \ -k $seek $flags - curr_buf_wr=$(get_iostats_stat $pool arc_write_count) + curr_buf_wr=$(kstat_pool $pool iostats.arc_write_count) buf_wr_actual=$((curr_buf_wr - prev_buf_wr)) - curr_dio_wr=$(get_iostats_stat $pool direct_write_count) + curr_dio_wr=$(kstat_pool $pool iostats.direct_write_count) dio_wr_actual=$((curr_dio_wr - prev_dio_wr)) if [[ $buf_wr_actual -lt $buf_wr_expect ]]; then - if is_linux; then - cat /proc/spl/kstat/zfs/$pool/iostats - else - sysctl kstat.zfs.$pool.misc.iostats - fi + kstat_pool -g $pool iostats log_fail "Buffered writes $buf_wr_actual of $buf_wr_expect" fi if [[ $dio_wr_actual -lt $dio_wr_expect ]]; then - if is_linux; then - cat /proc/spl/kstat/zfs/$pool/iostats - else - sysctl kstat.zfs.$pool.misc.iostats - fi + kstat_pool -g $pool iostats log_fail "Direct writes $dio_wr_actual of $dio_wr_expect" fi } @@ -272,33 +237,25 @@ function check_read # pool file bs count skip flags buf_rd dio_rd log_note "Checking $count * $bs read(s) at offset $skip, $flags" - prev_buf_rd=$(get_iostats_stat $pool arc_read_count) - prev_dio_rd=$(get_iostats_stat $pool direct_read_count) + prev_buf_rd=$(kstat_pool $pool iostats.arc_read_count) + prev_dio_rd=$(kstat_pool $pool iostats.direct_read_count) log_must stride_dd -i $file -o /dev/null -b $bs -c $count \ -p $skip $flags - curr_buf_rd=$(get_iostats_stat $pool arc_read_count) + curr_buf_rd=$(kstat_pool $pool iostats.arc_read_count) buf_rd_actual=$((curr_buf_rd - prev_buf_rd)) - curr_dio_rd=$(get_iostats_stat $pool direct_read_count) + curr_dio_rd=$(kstat_pool $pool iostats.direct_read_count) dio_rd_actual=$((curr_dio_rd - prev_dio_rd)) if [[ $buf_rd_actual -lt $buf_rd_expect ]]; then - if is_linux; then - cat /proc/spl/kstat/zfs/$pool/iostats - else - sysctl kstat.zfs.$pool.misc.iostats - fi + kstat_pool -g $pool iostats log_fail "Buffered reads $buf_rd_actual of $buf_rd_expect" fi if [[ $dio_rd_actual -lt $dio_rd_expect ]]; then - if is_linux; then - cat /proc/spl/kstat/zfs/$pool/iostats - else - sysctl kstat.zfs.$pool.misc.iostats - fi + kstat_pool -g $pool iostats log_fail "Direct reads $dio_rd_actual of $dio_rd_expect" fi } diff --git a/tests/zfs-tests/tests/functional/direct/dio_read_verify.ksh b/tests/zfs-tests/tests/functional/direct/dio_read_verify.ksh index 456d429b1d99..67e0b4a7c700 100755 --- a/tests/zfs-tests/tests/functional/direct/dio_read_verify.ksh +++ b/tests/zfs-tests/tests/functional/direct/dio_read_verify.ksh @@ -72,8 +72,8 @@ for type in "" "mirror" "raidz" "draid"; do $TESTPOOL1/$TESTFS1" mntpnt=$(get_prop mountpoint $TESTPOOL1/$TESTFS1) - prev_dio_rd=$(get_iostats_stat $TESTPOOL1 direct_read_count) - prev_arc_rd=$(get_iostats_stat $TESTPOOL1 arc_read_count) + prev_dio_rd=$(kstat_pool $TESTPOOL1 iostats.direct_read_count) + prev_arc_rd=$(kstat_pool $TESTPOOL1 iostats.arc_read_count) # Create the file before trying to manipulate the contents log_must stride_dd -o "$mntpnt/direct-write.iso" -i /dev/urandom \ @@ -83,8 +83,8 @@ for type in "" "mirror" "raidz" "draid"; do -n $NUMBLOCKS -b $BS -r # Getting new Direct I/O and ARC Write counts. - curr_dio_rd=$(get_iostats_stat $TESTPOOL1 direct_read_count) - curr_arc_rd=$(get_iostats_stat $TESTPOOL1 arc_read_count) + curr_dio_rd=$(kstat_pool $TESTPOOL1 iostats.direct_read_count) + curr_arc_rd=$(kstat_pool $TESTPOOL1 iostats.arc_read_count) total_dio_rd=$((curr_dio_rd - prev_dio_rd)) total_arc_rd=$((curr_arc_rd - prev_arc_rd)) diff --git a/tests/zfs-tests/tests/functional/direct/dio_unaligned_filesize.ksh b/tests/zfs-tests/tests/functional/direct/dio_unaligned_filesize.ksh index 8bb363f1a983..6e2982ad7d46 100755 --- a/tests/zfs-tests/tests/functional/direct/dio_unaligned_filesize.ksh +++ b/tests/zfs-tests/tests/functional/direct/dio_unaligned_filesize.ksh @@ -73,11 +73,11 @@ log_must zpool export $TESTPOOL log_must zpool import $TESTPOOL # Reading the file back using Direct I/O -prev_dio_read=$(get_iostats_stat $TESTPOOL direct_read_count) -prev_arc_read=$(get_iostats_stat $TESTPOOL arc_read_count) +prev_dio_read=$(kstat_pool $TESTPOOL iostats.direct_read_count) +prev_arc_read=$(kstat_pool $TESTPOOL iostats.arc_read_count) log_must stride_dd -i $filename -o /dev/null -b $bs -e -d -curr_dio_read=$(get_iostats_stat $TESTPOOL direct_read_count) -curr_arc_read=$(get_iostats_stat $TESTPOOL arc_read_count) +curr_dio_read=$(kstat_pool $TESTPOOL iostats.direct_read_count) +curr_arc_read=$(kstat_pool $TESTPOOL iostats.arc_read_count) total_dio_read=$((curr_dio_read - prev_dio_read)) total_arc_read=$((curr_arc_read - prev_arc_read)) diff --git a/tests/zfs-tests/tests/functional/direct/dio_write_stable_pages.ksh b/tests/zfs-tests/tests/functional/direct/dio_write_stable_pages.ksh index ccdabc678a68..3d7f7089d7c8 100755 --- a/tests/zfs-tests/tests/functional/direct/dio_write_stable_pages.ksh +++ b/tests/zfs-tests/tests/functional/direct/dio_write_stable_pages.ksh @@ -72,7 +72,7 @@ do log_note "Verifying stable pages for Direct I/O writes \ iteration $i of $ITERATIONS" - prev_dio_wr=$(get_iostats_stat $TESTPOOL direct_write_count) + prev_dio_wr=$(kstat_pool $TESTPOOL iostats.direct_write_count) # Manipulate the user's buffer while running O_DIRECT write # workload with the buffer. @@ -83,7 +83,7 @@ do log_must stride_dd -i $mntpnt/direct-write.iso -o /dev/null \ -b $BS -c $NUMBLOCKS - curr_dio_wr=$(get_iostats_stat $TESTPOOL direct_write_count) + curr_dio_wr=$(kstat_pool $TESTPOOL iostats.direct_write_count) total_dio_wr=$((curr_dio_wr - prev_dio_wr)) log_note "Making sure we have Direct I/O writes logged" diff --git a/tests/zfs-tests/tests/functional/direct/dio_write_verify.ksh b/tests/zfs-tests/tests/functional/direct/dio_write_verify.ksh index 4eb9efe95ef1..1c1565cbbefb 100755 --- a/tests/zfs-tests/tests/functional/direct/dio_write_verify.ksh +++ b/tests/zfs-tests/tests/functional/direct/dio_write_verify.ksh @@ -90,10 +90,10 @@ log_must set_tunable32 VDEV_DIRECT_WR_VERIFY 0 # failures log_note "Verifying no panics for Direct I/O writes with compression" log_must zfs set compression=on $TESTPOOL/$TESTFS -prev_dio_wr=$(get_iostats_stat $TESTPOOL direct_write_count) +prev_dio_wr=$(kstat_pool $TESTPOOL iostats.direct_write_count) log_must manipulate_user_buffer -f "$mntpnt/direct-write.iso" -n $NUMBLOCKS \ -b $BS -w -curr_dio_wr=$(get_iostats_stat $TESTPOOL direct_write_count) +curr_dio_wr=$(kstat_pool $TESTPOOL iostats.direct_write_count) total_dio_wr=$((curr_dio_wr - prev_dio_wr)) log_note "Making sure we have Direct I/O writes logged" @@ -115,7 +115,7 @@ for i in $(seq 1 $ITERATIONS); do log_note "Verifying Direct I/O write checksums iteration \ $i of $ITERATIONS with zfs_vdev_direct_write_verify=0" - prev_dio_wr=$(get_iostats_stat $TESTPOOL direct_write_count) + prev_dio_wr=$(kstat_pool $TESTPOOL iostats.direct_write_count) log_must manipulate_user_buffer -f "$mntpnt/direct-write.iso" \ -n $NUMBLOCKS -b $BS -w @@ -126,7 +126,7 @@ for i in $(seq 1 $ITERATIONS); do -c $num_blocks # Getting new Direct I/O and ARC write counts. - curr_dio_wr=$(get_iostats_stat $TESTPOOL direct_write_count) + curr_dio_wr=$(kstat_pool $TESTPOOL iostats.direct_write_count) total_dio_wr=$((curr_dio_wr - prev_dio_wr)) # Verifying there are checksum errors @@ -165,7 +165,7 @@ for i in $(seq 1 $ITERATIONS); do log_note "Verifying every Direct I/O write checksums iteration $i of \ $ITERATIONS with zfs_vdev_direct_write_verify=1" - prev_dio_wr=$(get_iostats_stat $TESTPOOL direct_write_count) + prev_dio_wr=$(kstat_pool $TESTPOOL iostats.direct_write_count) log_must manipulate_user_buffer -f "$mntpnt/direct-write.iso" \ -n $NUMBLOCKS -b $BS -e -w @@ -176,7 +176,7 @@ for i in $(seq 1 $ITERATIONS); do -c $num_blocks # Getting new Direct I/O write counts. - curr_dio_wr=$(get_iostats_stat $TESTPOOL direct_write_count) + curr_dio_wr=$(kstat_pool $TESTPOOL iostats.direct_write_count) total_dio_wr=$((curr_dio_wr - prev_dio_wr)) log_note "Making sure there are no checksum errors with the ZPool" diff --git a/tests/zfs-tests/tests/functional/fadvise/fadvise_sequential.ksh b/tests/zfs-tests/tests/functional/fadvise/fadvise_sequential.ksh index 7b7d1d379ac6..daeb93273a54 100755 --- a/tests/zfs-tests/tests/functional/fadvise/fadvise_sequential.ksh +++ b/tests/zfs-tests/tests/functional/fadvise/fadvise_sequential.ksh @@ -54,10 +54,6 @@ function cleanup [[ -e $TESTDIR ]] && log_must rm -Rf $TESTDIR/* } -getstat() { - awk -v c="$1" '$1 == c {print $3; exit}' /proc/spl/kstat/zfs/arcstats -} - log_assert "Ensure fadvise prefetch data" log_onexit cleanup @@ -67,12 +63,12 @@ log_must zfs set primarycache=metadata $TESTPOOL log_must file_write -o create -f $FILE -b $BLKSZ -c 1000 sync_pool $TESTPOOL -data_size1=$(getstat data_size) +data_size1=$(kstat arcstats.data_size) log_must file_fadvise -f $FILE -a 2 sleep 10 -data_size2=$(getstat data_size) +data_size2=$(kstat arcstats.data_size) log_note "original data_size is $data_size1, final data_size is $data_size2" log_must [ $data_size1 -le $data_size2 ] diff --git a/tests/zfs-tests/tests/functional/fault/suspend_on_probe_errors.ksh b/tests/zfs-tests/tests/functional/fault/suspend_on_probe_errors.ksh index d9261bb5d274..3f6edad6da9b 100755 --- a/tests/zfs-tests/tests/functional/fault/suspend_on_probe_errors.ksh +++ b/tests/zfs-tests/tests/functional/fault/suspend_on_probe_errors.ksh @@ -119,14 +119,14 @@ log_must dd if=/dev/urandom of=$MNTPOINT/writes bs=1M count=1 # Wait until sync starts, and the pool suspends log_note "waiting for pool to suspend" typeset -i tries=30 -until [[ $(cat /proc/spl/kstat/zfs/$TESTPOOL/state) == "SUSPENDED" ]] ; do +until [[ $(kstat_pool $TESTPOOL state) == "SUSPENDED" ]] ; do if ((tries-- == 0)); then zpool status -s log_fail "UNEXPECTED -- pool did not suspend" fi sleep 1 done -log_note $(cat /proc/spl/kstat/zfs/$TESTPOOL/state) +log_note $(kstat_pool $TESTPOOL state) # Put the missing disks back into service log_must eval "echo running > /sys/block/$sd/device/state" @@ -137,7 +137,7 @@ log_must zpool clear $TESTPOOL # Wait until the pool resumes log_note "waiting for pool to resume" tries=30 -until [[ $(cat /proc/spl/kstat/zfs/$TESTPOOL/state) != "SUSPENDED" ]] ; do +until [[ $(kstat_pool $TESTPOOL state) != "SUSPENDED" ]] ; do if ((tries-- == 0)); then log_fail "pool did not resume" fi diff --git a/tests/zfs-tests/tests/functional/fault/suspend_resume_single.ksh b/tests/zfs-tests/tests/functional/fault/suspend_resume_single.ksh index b67059158a57..0dc5584e4fd5 100755 --- a/tests/zfs-tests/tests/functional/fault/suspend_resume_single.ksh +++ b/tests/zfs-tests/tests/functional/fault/suspend_resume_single.ksh @@ -26,8 +26,6 @@ . $STF_SUITE/include/libtest.shlib -set -x - DATAFILE="$TMPDIR/datafile" function cleanup @@ -62,7 +60,7 @@ log_must cp $DATAFILE /$TESTPOOL/file # wait until sync starts, and the pool suspends log_note "waiting for pool to suspend" typeset -i tries=10 -until [[ $(cat /proc/spl/kstat/zfs/$TESTPOOL/state) == "SUSPENDED" ]] ; do +until [[ $(kstat_pool $TESTPOOL state) == "SUSPENDED" ]] ; do if ((tries-- == 0)); then log_fail "pool didn't suspend" fi @@ -82,7 +80,7 @@ log_note "giving pool time to settle and complete txg" sleep 7 # if the pool suspended, then everything is bad -if [[ $(cat /proc/spl/kstat/zfs/$TESTPOOL/state) == "SUSPENDED" ]] ; then +if [[ $(kstat_pool $TESTPOOL state) == "SUSPENDED" ]] ; then log_fail "pool suspended" fi diff --git a/tests/zfs-tests/tests/functional/l2arc/l2arc_arcstats_pos.ksh b/tests/zfs-tests/tests/functional/l2arc/l2arc_arcstats_pos.ksh index 69d60ab8bb90..dc6bb9f9a163 100755 --- a/tests/zfs-tests/tests/functional/l2arc/l2arc_arcstats_pos.ksh +++ b/tests/zfs-tests/tests/functional/l2arc/l2arc_arcstats_pos.ksh @@ -73,18 +73,18 @@ arcstat_quiescence_noecho l2_size log_must zpool offline $TESTPOOL $VDEV_CACHE arcstat_quiescence_noecho l2_size -typeset l2_mfu_init=$(get_arcstat l2_mfu_asize) -typeset l2_mru_init=$(get_arcstat l2_mru_asize) -typeset l2_prefetch_init=$(get_arcstat l2_prefetch_asize) -typeset l2_asize_init=$(get_arcstat l2_asize) +typeset l2_mfu_init=$(kstat arcstats.l2_mfu_asize) +typeset l2_mru_init=$(kstat arcstats.l2_mru_asize) +typeset l2_prefetch_init=$(kstat arcstats.l2_prefetch_asize) +typeset l2_asize_init=$(kstat arcstats.l2_asize) log_must zpool online $TESTPOOL $VDEV_CACHE arcstat_quiescence_noecho l2_size log_must zpool export $TESTPOOL arcstat_quiescence_noecho l2_feeds -log_must test $(get_arcstat l2_mfu_asize) -eq 0 -log_must test $(get_arcstat l2_mru_asize) -eq 0 +log_must test $(kstat arcstats.l2_mfu_asize) -eq 0 +log_must test $(kstat arcstats.l2_mru_asize) -eq 0 log_must zpool import -d $VDIR $TESTPOOL arcstat_quiescence_noecho l2_size @@ -93,10 +93,10 @@ arcstat_quiescence_noecho l2_size log_must zpool offline $TESTPOOL $VDEV_CACHE arcstat_quiescence_noecho l2_size -typeset l2_mfu_end=$(get_arcstat l2_mfu_asize) -typeset l2_mru_end=$(get_arcstat l2_mru_asize) -typeset l2_prefetch_end=$(get_arcstat l2_prefetch_asize) -typeset l2_asize_end=$(get_arcstat l2_asize) +typeset l2_mfu_end=$(kstat arcstats.l2_mfu_asize) +typeset l2_mru_end=$(kstat arcstats.l2_mru_asize) +typeset l2_prefetch_end=$(kstat arcstats.l2_prefetch_asize) +typeset l2_asize_end=$(kstat arcstats.l2_asize) log_must test $(( $l2_mru_end + $l2_mfu_end + $l2_prefetch_end - \ $l2_asize_end )) -eq 0 diff --git a/tests/zfs-tests/tests/functional/l2arc/l2arc_l2miss_pos.ksh b/tests/zfs-tests/tests/functional/l2arc/l2arc_l2miss_pos.ksh index c9d5d7ffe1f1..8a9e4fa41b7c 100755 --- a/tests/zfs-tests/tests/functional/l2arc/l2arc_l2miss_pos.ksh +++ b/tests/zfs-tests/tests/functional/l2arc/l2arc_l2miss_pos.ksh @@ -71,10 +71,10 @@ log_must fio $FIO_SCRIPTS/random_reads.fio log_must zpool export $TESTPOOL1 log_must zpool import $TESTPOOL1 -d $VDEV1 -typeset starting_miss_count=$(get_arcstat l2_misses) +typeset starting_miss_count=$(kstat arcstats.l2_misses) log_must fio $FIO_SCRIPTS/random_reads.fio -log_must test $(get_arcstat l2_misses) -eq $starting_miss_count +log_must test $(kstat arcstats.l2_misses) -eq $starting_miss_count # I/O to pool with l2arc - expect that l2_misses rises export DIRECTORY=/$TESTPOOL @@ -88,7 +88,7 @@ log_must zpool export $TESTPOOL log_must zpool import $TESTPOOL -d $VDEV log_must fio $FIO_SCRIPTS/random_reads.fio -log_must test $(get_arcstat l2_misses) -gt $starting_miss_count +log_must test $(kstat arcstats.l2_misses) -gt $starting_miss_count log_must zpool destroy -f $TESTPOOL log_must zpool destroy -f $TESTPOOL1 diff --git a/tests/zfs-tests/tests/functional/l2arc/l2arc_mfuonly_pos.ksh b/tests/zfs-tests/tests/functional/l2arc/l2arc_mfuonly_pos.ksh index 89ab940334ee..2c5fc6753152 100755 --- a/tests/zfs-tests/tests/functional/l2arc/l2arc_mfuonly_pos.ksh +++ b/tests/zfs-tests/tests/functional/l2arc/l2arc_mfuonly_pos.ksh @@ -72,7 +72,7 @@ export FILE_SIZE=$(( floor($fill_mb / $NUMJOBS) ))M log_must truncate -s ${cache_sz}M $VDEV_CACHE -typeset log_blk_start=$(get_arcstat l2_log_blk_writes) +typeset log_blk_start=$(kstat arcstats.l2_log_blk_writes) log_must zpool create -f $TESTPOOL $VDEV cache $VDEV_CACHE @@ -89,7 +89,7 @@ log_must zpool import -N -d $VDIR $TESTPOOL # will not be 0 (mentioned also in zfs.4) # For the purposes of this test we mitigate this by disabling (predictive) # ZFS prefetches with zfs_prefetch_disable=1. -log_must test $(get_arcstat l2_mru_asize) -eq 0 +log_must test $(kstat arcstats.l2_mru_asize) -eq 0 log_must zpool destroy -f $TESTPOOL diff --git a/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_001_pos.ksh b/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_001_pos.ksh index a9968723c3ca..a999f96971fd 100755 --- a/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_001_pos.ksh @@ -88,7 +88,7 @@ arcstat_quiescence_noecho l2_feeds typeset l2_dh_log_blk=$(zdb -l $VDEV_CACHE | awk '/log_blk_count/ {print $2}') -typeset l2_rebuild_log_blk_start=$(get_arcstat l2_rebuild_log_blks) +typeset l2_rebuild_log_blk_start=$(kstat arcstats.l2_rebuild_log_blks) log_must zpool import -d $VDIR $TESTPOOL arcstat_quiescence_noecho l2_size diff --git a/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_002_pos.ksh b/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_002_pos.ksh index 3b893d28da6a..4c6bc2e2e720 100755 --- a/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_002_pos.ksh +++ b/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_002_pos.ksh @@ -94,7 +94,7 @@ arcstat_quiescence_noecho l2_feeds typeset l2_dh_log_blk=$(zdb -l $VDEV_CACHE | awk '/log_blk_count/ {print $2}') -typeset l2_rebuild_log_blk_start=$(get_arcstat l2_rebuild_log_blks) +typeset l2_rebuild_log_blk_start=$(kstat arcstats.l2_rebuild_log_blks) log_must zpool import -d $VDIR $TESTPOOL log_must eval "echo $PASSPHRASE | zfs mount -l $TESTPOOL/$TESTFS1" diff --git a/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_003_neg.ksh b/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_003_neg.ksh index f8dc2b108f0d..104d1d484ff2 100755 --- a/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_003_neg.ksh +++ b/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_003_neg.ksh @@ -32,8 +32,7 @@ # 4. Export pool. # 5. Import pool. # 6. Check in zpool iostat if the cache device has space allocated. -# 7. Read the file written in (3) and check if l2_hits in -# /proc/spl/kstat/zfs/arcstats increased. +# 7. Read the file written in (3) and check if arcstats.l2_hits increased. # verify_runnable "global" @@ -74,12 +73,12 @@ log_must fio $FIO_SCRIPTS/random_reads.fio log_must zpool export $TESTPOOL -typeset l2_success_start=$(get_arcstat l2_rebuild_success) +typeset l2_success_start=$(kstat arcstats.l2_rebuild_success) log_must zpool import -d $VDIR $TESTPOOL log_mustnot test "$(zpool iostat -Hpv $TESTPOOL $VDEV_CACHE | awk '{print $2}')" -gt 80000000 -typeset l2_success_end=$(get_arcstat l2_rebuild_success) +typeset l2_success_end=$(kstat arcstats.l2_rebuild_success) log_mustnot test $l2_success_end -gt $l2_success_start diff --git a/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_004_pos.ksh b/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_004_pos.ksh index 8a572c26469c..6460b9a0e7a1 100755 --- a/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_004_pos.ksh +++ b/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_004_pos.ksh @@ -79,7 +79,7 @@ arcstat_quiescence_noecho l2_size log_must zpool export $TESTPOOL arcstat_quiescence_noecho l2_feeds -typeset l2_rebuild_log_blk_start=$(get_arcstat l2_rebuild_log_blks) +typeset l2_rebuild_log_blk_start=$(kstat arcstats.l2_rebuild_log_blks) typeset l2_dh_log_blk=$(zdb -l $VDEV_CACHE | awk '/log_blk_count/ {print $2}') log_must zpool import -d $VDIR $TESTPOOL diff --git a/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_005_pos.ksh b/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_005_pos.ksh index 9663437c6597..ce379a566f18 100755 --- a/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_005_pos.ksh +++ b/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_005_pos.ksh @@ -76,7 +76,7 @@ arcstat_quiescence_noecho l2_size log_must zpool offline $TESTPOOL $VDEV_CACHE arcstat_quiescence_noecho l2_size -typeset l2_rebuild_log_blk_start=$(get_arcstat l2_rebuild_log_blks) +typeset l2_rebuild_log_blk_start=$(kstat arcstats.l2_rebuild_log_blks) typeset l2_dh_log_blk=$(zdb -l $VDEV_CACHE | awk '/log_blk_count/ {print $2}') log_must zpool online $TESTPOOL $VDEV_CACHE diff --git a/tests/zfs-tests/tests/functional/mmp/mmp.kshlib b/tests/zfs-tests/tests/functional/mmp/mmp.kshlib index 5071830c489a..01e4f2b735fa 100644 --- a/tests/zfs-tests/tests/functional/mmp/mmp.kshlib +++ b/tests/zfs-tests/tests/functional/mmp/mmp.kshlib @@ -199,20 +199,20 @@ function count_skipped_mmp_writes # pool duration { typeset pool=$1 typeset -i duration=$2 - typeset hist_path="/proc/spl/kstat/zfs/$pool/multihost" sleep $duration - awk 'BEGIN {count=0}; $NF == "-" {count++}; END {print count};' "$hist_path" + kstat_pool $pool multihost | \ + awk 'BEGIN {count=0}; $NF == "-" {count++}; END {print count};' } function count_mmp_writes # pool duration { typeset pool=$1 typeset -i duration=$2 - typeset hist_path="/proc/spl/kstat/zfs/$pool/multihost" sleep $duration - awk 'BEGIN {count=0}; $NF != "-" {count++}; END {print count};' "$hist_path" + kstat_pool $pool multihost | \ + awk 'BEGIN {count=0}; $NF != "-" {count++}; END {print count};' } function summarize_uberblock_mmp # device diff --git a/tests/zfs-tests/tests/functional/mmp/mmp_write_distribution.ksh b/tests/zfs-tests/tests/functional/mmp/mmp_write_distribution.ksh index 1ac254aa1dab..6f34974770d1 100755 --- a/tests/zfs-tests/tests/functional/mmp/mmp_write_distribution.ksh +++ b/tests/zfs-tests/tests/functional/mmp/mmp_write_distribution.ksh @@ -47,7 +47,6 @@ log_assert "mmp writes are evenly distributed across leaf vdevs" log_onexit cleanup MMP_HISTORY_TMP=$MMP_DIR/history -MMP_HISTORY=/proc/spl/kstat/zfs/$MMP_POOL/multihost # Step 1 log_must mkdir -p $MMP_DIR @@ -69,7 +68,7 @@ typeset -i min_writes=999 typeset -i max_writes=0 typeset -i write_count # copy to get as close to a consistent view as possible -cp $MMP_HISTORY $MMP_HISTORY_TMP +kstat_pool $MMP_POOL multihost > $MMP_HISTORY_TMP for x in {0..7}; do write_count=$(grep -c file.${x} $MMP_HISTORY_TMP) if [ $write_count -lt $min_writes ]; then diff --git a/tests/zfs-tests/tests/functional/mmp/mmp_write_slow_disk.ksh b/tests/zfs-tests/tests/functional/mmp/mmp_write_slow_disk.ksh index 8b118684aa7f..e45aedd450d2 100755 --- a/tests/zfs-tests/tests/functional/mmp/mmp_write_slow_disk.ksh +++ b/tests/zfs-tests/tests/functional/mmp/mmp_write_slow_disk.ksh @@ -58,7 +58,7 @@ function cleanup log_assert "A long VDEV probe doesn't cause a MMP check suspend" log_onexit cleanup -MMP_HISTORY_URL=/proc/spl/kstat/zfs/$MMP_POOL/multihost +MMP_HISTORY_TMP=$MMP_DIR/history # Create a multiple drive pool log_must zpool events -c @@ -83,8 +83,9 @@ sleep 10 sync_pool $MMP_POOL # Confirm mmp writes to the non-slow disks have taken place +kstat_pool $MMP_POOL multihost > $MMP_HISTORY_TMP for x in {0,1,2,4}; do - write_count=$(grep -c file.${x} $MMP_HISTORY_URL) + write_count=$(grep -c file.${x} $MMP_HISTORY_TMP) [[ $write_count -gt 0 ]] || log_fail "expecting mmp writes" done diff --git a/tests/zfs-tests/tests/functional/mount/umount_unlinked_drain.ksh b/tests/zfs-tests/tests/functional/mount/umount_unlinked_drain.ksh index 40045a7a96b5..9e93c1784dbf 100755 --- a/tests/zfs-tests/tests/functional/mount/umount_unlinked_drain.ksh +++ b/tests/zfs-tests/tests/functional/mount/umount_unlinked_drain.ksh @@ -42,13 +42,15 @@ function cleanup function unlinked_size_is { + typeset -i expect=$1 + typeset dataset=$2 + MAX_ITERS=5 # iteration to do before we consider reported number stable iters=0 last_usize=0 while [[ $iters -le $MAX_ITERS ]]; do - kstat_file=$(grep -nrwl /proc/spl/kstat/zfs/$2/objset-0x* -e $3) - nunlinks=$(awk '/nunlinks/ {print $3}' $kstat_file) - nunlinked=$(awk '/nunlinked/ {print $3}' $kstat_file) + nunlinks=$(kstat_dataset $dataset nunlinks) + nunlinked=$(kstat_dataset $dataset nunlinked) usize=$(($nunlinks - $nunlinked)) if [[ $iters == $MAX_ITERS && $usize == $1 ]]; then return 0 @@ -89,20 +91,20 @@ for fs in 1 2 3; do fi log_must set_tunable32 UNLINK_SUSPEND_PROGRESS 1 - log_must unlinked_size_is 0 $TESTPOOL $TESTPOOL/$TESTFS.$fs + log_must unlinked_size_is 0 $TESTPOOL/$TESTFS.$fs # build up unlinked set for fn in $(seq 1 100); do log_must eval "rm $TESTDIR.$fs/file-$fn &" done - log_must unlinked_size_is 100 $TESTPOOL $TESTPOOL/$TESTFS.$fs + log_must unlinked_size_is 100 $TESTPOOL/$TESTFS.$fs # test that we can mount fs without emptying the unlinked list log_must zfs umount $TESTPOOL/$TESTFS.$fs log_must unmounted $TESTDIR.$fs log_must zfs mount $TESTPOOL/$TESTFS.$fs log_must mounted $TESTDIR.$fs - log_must unlinked_size_is 100 $TESTPOOL $TESTPOOL/$TESTFS.$fs + log_must unlinked_size_is 100 $TESTPOOL/$TESTFS.$fs # confirm we can drain and add to unlinked set at the same time log_must set_tunable32 UNLINK_SUSPEND_PROGRESS 0 @@ -111,7 +113,7 @@ for fs in 1 2 3; do for fn in $(seq 101 175); do log_must eval "rm $TESTDIR.$fs/file-$fn &" done - log_must unlinked_size_is 0 $TESTPOOL $TESTPOOL/$TESTFS.$fs + log_must unlinked_size_is 0 $TESTPOOL/$TESTFS.$fs done done diff --git a/tests/zfs-tests/tests/functional/trim/trim_l2arc.ksh b/tests/zfs-tests/tests/functional/trim/trim_l2arc.ksh index 62563e0dd4cb..fc7824ec6ce5 100755 --- a/tests/zfs-tests/tests/functional/trim/trim_l2arc.ksh +++ b/tests/zfs-tests/tests/functional/trim/trim_l2arc.ksh @@ -89,9 +89,9 @@ log_must fio $FIO_SCRIPTS/random_reads.fio export RUNTIME=1 typeset do_once=true while $do_once || [[ $l2_size1 -le $l2_size2 ]]; do - typeset l2_size1=$(get_arcstat l2_size) + typeset l2_size1=$(kstat arcstats.l2_size) log_must fio $FIO_SCRIPTS/random_reads.fio - typeset l2_size2=$(get_arcstat l2_size) + typeset l2_size2=$(kstat arcstats.l2_size) do_once=false done