nfsometer-1.9/0000755000000000000000000000000013125073406013324 5ustar rootroot00000000000000nfsometer-1.9/nfsometer.egg-info/0000755000000000000000000000000013125073406017020 5ustar rootroot00000000000000nfsometer-1.9/nfsometer.egg-info/PKG-INFO0000644000000000000000000000041513125073406020115 0ustar rootroot00000000000000Metadata-Version: 1.0 Name: nfsometer Version: 1.9 Summary: NFS performance measurement tool Home-page: http://wiki.linux-nfs.org/wiki/index.php/NFSometer Author: Weston Andros Adamson Author-email: dros@monkey.org License: GPLv2 Description: UNKNOWN Platform: UNKNOWN nfsometer-1.9/nfsometer.egg-info/SOURCES.txt0000644000000000000000000000275313125073406020713 0ustar rootroot00000000000000COPYING DESIGN MANIFEST.in README howto-contribute.txt nfsometer.1 nfsometer.py setup.py nfsometer.egg-info/PKG-INFO nfsometer.egg-info/SOURCES.txt nfsometer.egg-info/dependency_links.txt nfsometer.egg-info/top_level.txt nfsometerlib/__init__.py nfsometerlib/cmd.py nfsometerlib/collection.py nfsometerlib/config.py nfsometerlib/graph.py nfsometerlib/options.py nfsometerlib/parse.py nfsometerlib/report.py nfsometerlib/selector.py nfsometerlib/trace.py nfsometerlib/workloads.py nfsometerlib/html/data_info_pane.html nfsometerlib/html/dataset.html nfsometerlib/html/index.html nfsometerlib/html/report.html nfsometerlib/html/report_info.html nfsometerlib/html/reportlist.html nfsometerlib/html/script.js nfsometerlib/html/style.css nfsometerlib/html/table.html nfsometerlib/html/toc.html nfsometerlib/html/tocnode.html nfsometerlib/html/widget.html nfsometerlib/workloads/bonnie++.nfsometer nfsometerlib/workloads/cthon.nfsometer nfsometerlib/workloads/custom.nfsometer nfsometerlib/workloads/dd_100m_100k.nfsometer nfsometerlib/workloads/dd_100m_1k.nfsometer nfsometerlib/workloads/filebench_fileserver.nfsometer nfsometerlib/workloads/filebench_networkfs.nfsometer nfsometerlib/workloads/filebench_varmail.nfsometer nfsometerlib/workloads/filebench_webserver.nfsometer nfsometerlib/workloads/gitclone.nfsometer nfsometerlib/workloads/iozone.nfsometer nfsometerlib/workloads/iozone_direct.nfsometer nfsometerlib/workloads/kernel.nfsometer nfsometerlib/workloads/python.nfsometer nfsometerlib/workloads/workload.shnfsometer-1.9/nfsometer.egg-info/dependency_links.txt0000644000000000000000000000000113125073406023066 0ustar rootroot00000000000000 nfsometer-1.9/nfsometer.egg-info/top_level.txt0000644000000000000000000000001513125073406021546 0ustar rootroot00000000000000nfsometerlib nfsometer-1.9/nfsometerlib/0000755000000000000000000000000013125073406016015 5ustar rootroot00000000000000nfsometer-1.9/nfsometerlib/html/0000755000000000000000000000000013125073406016761 5ustar rootroot00000000000000nfsometer-1.9/nfsometerlib/html/data_info_pane.html0000644000000000000000000000157513125073146022607 0ustar rootroot00000000000000<%doc> Copyright 2012 NetApp, Inc. All Rights Reserved, contribution by Weston Andros Adamson This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
% for hdr in table_hdrs: % endfor % for row in table_rows: % for col in row: % endfor % endfor
${hdr}
${col}
avg: ${avg}
stddev: ${std}
nfsometer-1.9/nfsometerlib/html/dataset.html0000644000000000000000000001042013125073146021272 0ustar rootroot00000000000000<%doc> Copyright 2012 NetApp, Inc. All Rights Reserved, contribution by Weston Andros Adamson This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
${dataset.toc.anchor()} % if dataset.anchor: % endif

${dataset.toc.num()} ${dataset.toc.title()}

% if dataset.subtitle:

${dataset.subtitle}

% endif
% if dataset.bucket_pie or dataset.bucket_legend:
${dataset.description}
${dataset.bucket_legend}
${dataset.bucket_pie} % if dataset.bucket_pie:
breakdown of group by operation across all traces
% endif
% endif
${dataset.graph_html}
graph view
${dataset.better_sym} ${dataset.better_str} % for more in dataset.better_more: ${more} % endfor
${dataset.tab.html()}
nfsometer-1.9/nfsometerlib/html/index.html0000644000000000000000000000165313125073146020764 0ustar rootroot00000000000000<%doc> Copyright 2012 NetApp, Inc. All Rights Reserved, contribution by Weston Andros Adamson This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. NFSometer: Report Index
nfsometer

Report Index

${index.reportset_info.html()}
nfsometer-1.9/nfsometerlib/html/report.html0000644000000000000000000000232313125073146021163 0ustar rootroot00000000000000<%doc> Copyright 2012 NetApp, Inc. All Rights Reserved, contribution by Weston Andros Adamson This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. NFSometer: ${report.title}
nfsometer

${report.title}

${report.toc.html()} ${report.report_info.html()}
% for w in report.widgets: ${w.html()} % endfor
nfsometer-1.9/nfsometerlib/html/report_info.html0000644000000000000000000000413113125073146022175 0ustar rootroot00000000000000<%doc> Copyright 2012 NetApp, Inc. All Rights Reserved, contribution by Weston Andros Adamson This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. % if info.toc: ${info.toc.anchor()} % endif
% if info.toc:

${info.toc.num()} ${info.toc.title()}

% endif % if info.workload: ${info.workload}
command: ${info.command}
% endif

${info.total_runs} traces ${info.times}

% for i in info.selector_infos: % endfor
runs workload kernel client server path config
${i['runs']} ${i['workload']} ${i['kernel']} ${i['client']} ${i['server']} ${i['path']} ${i['mdt']}

Options from mount

% for mdt in info.seen_mdts: % endfor
${mdt} ${'
'.join(info.mount_options[mdt])}
% if info.warnings:

Warnings

${info.warnings}
% endif % if info.usernotes:

Notes

   ${''.join(info.usernotes)}
   
% endif
% for more in info.more:
${more}
% endfor nfsometer-1.9/nfsometerlib/html/reportlist.html0000644000000000000000000000152513125073146022062 0ustar rootroot00000000000000<%doc> Copyright 2012 NetApp, Inc. All Rights Reserved, contribution by Weston Andros Adamson This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
Reports: % for r in index.report_list: % if r: % if current_title == r.title:
  • ${r.title}
  • % else:
  • ${r.title}
  • % endif % else:
    % endif % endfor
    nfsometer-1.9/nfsometerlib/html/script.js0000644000000000000000000000324213125073146020625 0ustar rootroot00000000000000/* * Copyright 2012 NetApp, Inc. All Rights Reserved, * contribution by Weston Andros Adamson * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) any later * version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more * details. */ function table_view(obj) { var view = $(obj).find('option:selected').attr('value'); var ds = $(obj).parents('div[class=dataset]'); var all_divs = ds.find('div[class^=compare_]'); var this_divs = ds.find('div[class=compare_' + view + ']'); var label_detail = ds.find('div[class=group_normal]') var label_normal = ds.find('div[class=group_detail]') if (view == 'rundata') { label_detail.hide(); label_normal.show(); } else { label_normal.hide(); label_detail.show(); } all_divs.hide(); this_divs.show(); } function graph_view(obj) { var nfsvers = $(obj).find('option:selected').attr('value'); var ds = $(obj).parents('div[class=dataset]'); var newsrc = ds.find('input[name="data_graph_' + nfsvers + '"]').attr('value'); var img = ds.find('img[class="data_graph"]'); img.attr('src', newsrc); } $(document).ready(function() { $('.graph_view').change(function(){ graph_view(this); }); $('.table_view').change(function(){ table_view(this); }); }); nfsometer-1.9/nfsometerlib/html/style.css0000644000000000000000000002051513125073146020637 0ustar rootroot00000000000000/* * Copyright 2012 NetApp, Inc. All Rights Reserved, * contribution by Weston Andros Adamson * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) any later * version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more * details. */ * { font-family: "Times New Roman", "Times", serif; } div.cell { display: table-cell; } a:link { text-decoration: none; color: rgb(7, 102, 153); } a:visited { text-decoration: none; color: rgb(7, 102, 153); } a:hover { text-decoration: none; color: rgb(7, 102, 153); } div.toc a:link { text-decoration: none; color: rgb(51, 163, 223); } div.toc a:visited { text-decoration: none; color: rgb(51, 163, 223); } div.toc a:hover { text-decoration: none; color: rgb(51, 163, 223); } li { list-style: none; } /* * Tables */ table.data tr td.label { background-color: #f9f9f9; color: #303030; font-size: 0.8em; border: 1px solid #cccccc; text-align: left; padding: 2px; margin: 0px; -moz-border-radius: 5px; border-radius: 5px; } table.data tr th.label { background-color: #f1f1f1; color: #303030; font-size: 0.8em; text-align: center; padding: 2px; margin: 0px; border: 1px solid #8f8f8f; -moz-border-radius: 5px; border-radius: 5px; } table.data tr td.selected { background-color: rgb(247, 247, 234); border-color: rgb(158, 140, 4); } table.data tr td div td { border: 0px; } table.data tr td { color: #303030; border: 1px solid #8f8f8f; text-align: right; vertical-align: middle; padding: 2px; margin: 0px; -moz-border-radius: 5px; border-radius: 5px; font-size: 0.9em; } table.data tr td.no_data { background: #f1f1f1; text-align: center; vertical-align: middle; font-size: 0.7em; font-style: oblique; } div.description { clear: both; padding: 20px 0px 30px 0px; margin: 0px 0px 0px 5px; } div.data { } h1, h2, h3, h4, h5 { /* color: #101010; */ color: #000000; margin: 0px; } h2 { font-size: 2.5em; } h3 { font-size: 1.5em; } div.widget h1, div.widget h2 { border-color: rgb(51, 163, 223); border-style: solid; border-width: 0px 0px 1px 0px; } div.widget h3, div.widget h4, div.widget h5 { border-color: #cccccc; border-style: solid; border-width: 0px 0px 1px 0px; } div.widget_container { margin-top: 20px; } div.widget { display: block; margin: 10px; } body { /* background-color: rgb(106, 153, 185); */ background-color: #fefefe; } div.dataset { display: table; } div.dataset img { display: table-cell; clear: both; } div.dataset div.dataset_legend { padding: 10px 0px 10px 0px; } div.dataset div.dataset_legend div { margin-bottom: 10px; } div.dataset div.dataset_legend span { font-weight: bold; line-height: 1.5em; } span.better_sym { color: #003D00; font-weight: bold; font-size: 1.1em; } div.dataset_better { float: right; clear: both; } span.better_str { color: #003D00; font-weight: bold; } span.better_more_str { color: #003D00; font-weight: bold; font-size: 0.8em; } div.datasets div.dataset { overflow: hidden; padding: 4px; width: 100%; } div.dataset h4 { padding: 0px 0px 2px 0px; } div.dataset ul { margin: 0px; } div.datasets { clear : both; margin: 0px 0px 0px 10px; } div.report_header { margin: 10px 0px; } div.toc, div.report_index { font-size: 0.7em; padding: 10px; -moz-border-radius: 5px; border-radius: 5px; background-color: #f6f6f6; } div.toc ul { padding: 0px 0px 2px 8px; margin: 0px; } ul.toc li { margin: 0px; padding: 1px; } span.section_num { color: #fefefe; background-color: rgb(51, 163, 223); font-size: 0.7em; padding: 5px; border-radius: 5px; } ul.toc span.section_num { color: rgb(51, 163, 223); background-color: #f6f6f6; padding: 2px; } span.workload_name { font-size: 1.6em; display: block; } span.workload_description { display: block; padding: 10px; } span.workload_command { display: block; margin-left: 20px; padding: 0px 0px 10px 0px; } span.workload_reports { display: block; margin-left: 20px; font-size: 1.4em; padding: 0px 0px 10px 0px; } div.group_normal { float: left; } div.group_detail { float: left; font-size: 0.7em; } span.kernel, span.client, span.server, span.path { color: #020202; font-size: 8px; clear: both; white-space: nowrap; float: left; } span.mountopt { font-weight: bold; color: #020202; font-size: 1.0em; line-height: 1.4em; white-space: nowrap; float: left; } span.detect { font-weight: bold; color: #020202; font-size: 1.0em; line-height: 1.4em; white-space: nowrap; float: left; padding: 0px 0px 0px 2px; } span.tag { font-weight: bold; color: #020202; font-size: 1.0em; line-height: 1.4em; white-space: nowrap; float: left; padding: 0px 0px 0px 4px; } div.statnotes { font-size: 1.0em; padding: 10px 0px; } div.statnotes li { padding: 5px 0px; } div.dataset_container { clear: both; margin-bottom: 60px; } div.cellhits { clear: both; font-size: 0.7em; } table.data_info_panes { -moz-border-radius: 5px; border-radius: 5px; clear: both; padding: 10px; } div.data_info_pane { font-size: 0.7em; border: 1px solid #e8e8e8; clear: both; padding: 10px; vertical-align: middle; } div.data_info_pane td.data_info_cell { display: table-cell; text-align: left; vertical-align: top; padding: 10px; vertical-align: middle; background-color: #f6f6f6; } span.stddev { font-size: 0.8em; color: #CF2727; } span.units { font-size: 0.7em; } span.hatch { border: 1px solid #707070; color: #101010; font-size: 12px; font-weight: bold; width: 6px; height: 12px; display: inline-block; vertical-align: top; text-align: center; } span.statlink { cursor: pointer; } div.data_info_pane table { padding: 0px; margin: 0px; text-align: center; } div.data_info_pane tbody { padding: 0px; margin: 0px; } div.data_info_pane table th { border-bottom: 1px solid #e9dddd; border-right: 1px solid #e9dddd; padding: 1px; margin: 0px; font-size: 0.7em; vertical-align: bottom; } div.data_info_pane table td { border-bottom: 1px solid #e9dddd; padding: 1px; margin: 0px; font-size: 0.7em; } div.dataset_top, div.dataset_bottom { clear: both; } div.dataset_top_left { } div.dataset_top_right { } div.dataset_tables { padding-left: 10px; } /* make table as wide as elements above */ div.dataset_tables table { width: 100%; } div.dataset_tables td { vertical-align: top; } span.zero_data { clear: both; } div.dataset_toggle { clear: both; } div.hatch_legend table { font-size: 0.7em; margin-left: 10px; padding: 4px; } div.hatch_legend div { margin-left: 10px; } div.shortcut { margin-left: 15px; display: table-cell; border-radius: 5px; border: 1px solid rgb(51, 163, 223); } div.shortcut div { padding: 5px; font-size: 0.6em; float: left; } div.dataset img.cmp_ref { float: left; clear: none; } div.cmp_op { font-weight: bold; padding-right: 2px; float: left; } div.cmp_value { float: left; } img.color_box { padding: 0px; margin: 0px; float: left; } div.dataset img.hatch_hit { float: right; clear: none; } div.data_info_pane img.data_info_hatch { display: inline; } div.dataset_graph_and_table { float: left; } div.subgraphs img { display: none; } table.toc_and_info td { vertical-align: top; } div.nfsometer_title { color: rgb(51, 163, 223); font-size: 2.5em; border-color: rgb(51, 163, 223); border-style: solid; border-width: 0px 0px 1px 1px; padding: 10px; vertical-align: middle; } div.nfsometer_title h1 { font-size: 1.0em; display: inline; padding: 0px 0px 0px 10px; } span.lowlite { color: #dddddd; } table.info_traces { width: 100%; } table.info_traces th { font-size: 0.6em; color: rgb(51, 163, 223); text-align: left; } table.info_traces td { font-size: 0.7em; text-align: left; } div.mount_options table td { font-size: 0.6em; } table.workload_attrs td { font-size: 0.6em; } table.workload_attrs { margin: 0px 0px 10px 10px; } div.traces, div.mount_options, div.warnings, div.notes { margin: 20px 0px 10px 10px; } div.pie_caption { font-size: 0.3em; } nfsometer-1.9/nfsometerlib/html/table.html0000644000000000000000000000215413125073146020741 0ustar rootroot00000000000000<%doc> Copyright 2012 NetApp, Inc. All Rights Reserved, contribution by Weston Andros Adamson This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
    % if len(table.keys) > 1 and not table.noheader: % if not table.nolabels: % endif % for k in table.keys: % endfor % endif % for g in table.groups: % if not table.nolabels: % endif % for k in table.keys: ${table.formatted_cells[(g, k)]} % endfor % endfor
    ${table.formatted_keys[k]}
    ${table.formatted_groups[g]}
    nfsometer-1.9/nfsometerlib/html/toc.html0000644000000000000000000000141713125073146020440 0ustar rootroot00000000000000<%doc> Copyright 2012 NetApp, Inc. All Rights Reserved, contribution by Weston Andros Adamson This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.

    Report Index


    Contents:

      % for child in node.children: ${child.html()} % endfor
    nfsometer-1.9/nfsometerlib/html/tocnode.html0000644000000000000000000000152713125073146021310 0ustar rootroot00000000000000<%doc> Copyright 2012 NetApp, Inc. All Rights Reserved, contribution by Weston Andros Adamson This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
  • ${node.section} ${node.text}
  • % if node.children:
      % for child in node.children: ${child.html()} % endfor
    % endif nfsometer-1.9/nfsometerlib/html/widget.html0000644000000000000000000000213613125073146021135 0ustar rootroot00000000000000<%doc> Copyright 2012 NetApp, Inc. All Rights Reserved, contribution by Weston Andros Adamson This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
    ${widget.toc.anchor()}

    ${widget.toc.num()} ${widget.toc.title()}

    ${widget.desc}
    % for n in widget.statnote_mesgs:
  • ${n}
  • % endfor
    ${widget.bucket_pie_html} ${widget.bucket_table_html}
    % for ds in widget.datasets: ${ds.html()} % endfor

    nfsometer-1.9/nfsometerlib/workloads/0000755000000000000000000000000013125073406020022 5ustar rootroot00000000000000nfsometer-1.9/nfsometerlib/workloads/bonnie++.nfsometer0000644000000000000000000000137013125073146023350 0ustar rootroot00000000000000# # RUNDIR - a newly created directory on the nfs share. # will exist only when running COMMAND. # # LOCALDIR - a directory on the local fs meant to serve as storage space for # the fetch stage and setup() call # will exist for setup, fetch stages and when COMMAND is run, but # not durning check(). # # # the command to run # COMMAND="bonnie++ -d $RUNDIR" # # command description # DESCRIPTION="The bonnie++ FS benchmark" # workload_check() # - no arguments # - any output to stdout means the check failed # - use need_* functions from workloads.sh workload_check() { need_bin bonnie++ } # workload_setup() # - no arguments # - non-zero return indicates a problem workload_setup() { return 0 } nfsometer-1.9/nfsometerlib/workloads/cthon.nfsometer0000644000000000000000000000241213125073146023061 0ustar rootroot00000000000000# # RUNDIR - a newly created directory on the nfs share. # will exist only when running COMMAND. # # LOCALDIR - a directory on the local fs meant to serve as storage space for # the fetch stage and setup() call # will exist for setup, fetch stages and when COMMAND is run, but # not durning check(). # URL="git://linux-nfs.org/~steved/cthon04.git" URL_OUT="cthon" # # the command to run # COMMAND="cd $LOCALDIR/cthon && NFSTESTDIR=\"$RUNDIR\" ./runtests -a" # # command description # DESCRIPTION="The connectathon test suite from git://linux-nfs.org/~steved/cthon04.git. All tests are run: basic, special, general and locking" # workload_check() # - no arguments # - any output to stdout means the check failed # - use need_* functions from workloads.sh workload_check() { need_bin time } # workload_setup() # - no arguments # - non-zero return indicates a problem workload_setup() { cd cthon || return 1 # fix tests.init file mv tests.init tests.init.bak || return 1 cat tests.init.bak | grep -v "SUNW" > tests.init || return 1 # fix runtests script mv runtests runtests.bak || return 1 cat runtests.bak | sed 's/TESTARG=-a/TESTARG=/g' > runtests || return 1 # finally make make 2>&1 || return 1 return 0 } nfsometer-1.9/nfsometerlib/workloads/custom.nfsometer0000644000000000000000000000164713125073146023271 0ustar rootroot00000000000000# # RUNDIR - a newly created directory on the nfs share. # will exist only when running COMMAND. # # LOCALDIR - a directory on the local fs meant to serve as storage space for # the fetch stage and setup() call # will exist for setup, fetch stages and when COMMAND is run, but # not durning check(). # # # the command to run # COMMAND="$NFSOMETER_CMD" # # command description # NAME="Custom: $NFSOMETER_CMD" if [ -n "$NFSOMETER_NAME" ]; then NAME="$NFSOMETER_NAME" fi DESCRIPTION="Custom workload: $NFSOMETER_CMD" if [ -n "$NFSOMETER_DESC" ]; then DESCRIPTION="$NFSOMETER_DESC" fi # workload_check() # - no arguments # - any output to stdout means the check failed # - use need_* functions from workloads.sh workload_check() { need_env "NFSOMETER_CMD" } # workload_setup() # - no arguments # - non-zero return indicates a problem workload_setup() { return 0 } nfsometer-1.9/nfsometerlib/workloads/dd_100m_100k.nfsometer0000644000000000000000000000140713125073146023630 0ustar rootroot00000000000000# # RUNDIR - a newly created directory on the nfs share. # will exist only when running COMMAND. # # LOCALDIR - a directory on the local fs meant to serve as storage space for # the fetch stage and setup() call # will exist for setup, fetch stages and when COMMAND is run, but # not durning check(). # COMMAND="dd if=/dev/zero of=./dd_file.100m_100k bs=102400 count=1024" DESCRIPTION="A run of dd(1) writing a 100MB file of zeros in 100k blocks" # workload_check() # - no arguments # - any output to stdout means the check failed # - use need_* functions from workloads.sh workload_check() { need_bin dd } # workload_setup() # - no arguments # - non-zero return indicates a problem workload_setup() { return 0 } nfsometer-1.9/nfsometerlib/workloads/dd_100m_1k.nfsometer0000644000000000000000000000140313125073146023464 0ustar rootroot00000000000000# # RUNDIR - a newly created directory on the nfs share. # will exist only when running COMMAND. # # LOCALDIR - a directory on the local fs meant to serve as storage space for # the fetch stage and setup() call # will exist for setup, fetch stages and when COMMAND is run, but # not durning check(). # COMMAND="dd if=/dev/zero of=./dd_file.100m_1k bs=1024 count=102400" DESCRIPTION="A run of dd(1) writing a 100MB file of zeros in 1k blocks" # workload_check() # - no arguments # - any output to stdout means the check failed # - use need_* functions from workloads.sh workload_check() { need_bin dd } # workload_setup() # - no arguments # - non-zero return indicates a problem workload_setup() { return 0 } nfsometer-1.9/nfsometerlib/workloads/filebench_fileserver.nfsometer0000644000000000000000000000224313125073146026115 0ustar rootroot00000000000000# # RUNDIR - a newly created directory on the nfs share. # will exist only when running COMMAND. # # LOCALDIR - a directory on the local fs meant to serve as storage space for # the fetch stage and setup() call # will exist for setup, fetch stages and when COMMAND is run, but # not durning check(). # FB_WORKLOAD=fileserver.f FB_RUNTIME=300 FB_SOURCE="/usr/share/filebench/workloads/$FB_WORKLOAD" FB_FILE="nfsometer-$FB_WORKLOAD" # # the command to run # COMMAND="cd $LOCALDIR && filebench -f $FB_FILE" # # command description # DESCRIPTION="A 5 minute run of the fileserver workload from the filebench test suite" # workload_check() # - no arguments # - any output to stdout means the check failed # - use need_* functions from workloads.sh workload_check() { need_bin filebench need_file $FB_SOURCE } # workload_setup() # - no arguments # - non-zero return indicates a problem workload_setup() { sudo sh -c "echo 0 > /proc/sys/kernel/randomize_va_space" cp $FB_SOURCE ./$FB_FILE || return 1 echo "set \$dir = $RUNDIR" >> ./$FB_FILE || return 1 echo "run $FB_RUNTIME" >> ./$FB_FILE || return 1 return 0 } nfsometer-1.9/nfsometerlib/workloads/filebench_networkfs.nfsometer0000644000000000000000000000224313125073146025771 0ustar rootroot00000000000000# # RUNDIR - a newly created directory on the nfs share. # will exist only when running COMMAND. # # LOCALDIR - a directory on the local fs meant to serve as storage space for # the fetch stage and setup() call # will exist for setup, fetch stages and when COMMAND is run, but # not durning check(). # FB_WORKLOAD=networkfs.f FB_RUNTIME=300 FB_SOURCE="/usr/share/filebench/workloads/$FB_WORKLOAD" FB_FILE="nfsometer-$FB_WORKLOAD" # # the command to run # COMMAND="cd $LOCALDIR && filebench -f $FB_FILE" # # command description # DESCRIPTION="A 5 minute run of the networkfs workload from the filebench test suite" # workload_check() # - no arguments # - any output to stdout means the check failed # - use need_* functions from workloads.sh workload_check() { need_bin filebench need_file $FB_SOURCE } # workload_setup() # - no arguments # - non-zero return indicates a problem workload_setup() { sudo sh -c "echo 0 > /proc/sys/kernel/randomize_va_space" cp $FB_SOURCE ./$FB_FILE || return 1 echo "set \$dir = $RUNDIR" >> ./$FB_FILE || return 1 echo "run $FB_RUNTIME" >> ./$FB_FILE || return 1 return 0 } nfsometer-1.9/nfsometerlib/workloads/filebench_varmail.nfsometer0000644000000000000000000000223713125073146025405 0ustar rootroot00000000000000# # RUNDIR - a newly created directory on the nfs share. # will exist only when running COMMAND. # # LOCALDIR - a directory on the local fs meant to serve as storage space for # the fetch stage and setup() call # will exist for setup, fetch stages and when COMMAND is run, but # not durning check(). # FB_WORKLOAD=varmail.f FB_RUNTIME=300 FB_SOURCE="/usr/share/filebench/workloads/$FB_WORKLOAD" FB_FILE="nfsometer-$FB_WORKLOAD" # # the command to run # COMMAND="cd $LOCALDIR && filebench -f $FB_FILE" # # command description # DESCRIPTION="A 5 minute run of the varmail workload from the filebench test suite" # workload_check() # - no arguments # - any output to stdout means the check failed # - use need_* functions from workloads.sh workload_check() { need_bin filebench need_file $FB_SOURCE } # workload_setup() # - no arguments # - non-zero return indicates a problem workload_setup() { sudo sh -c "echo 0 > /proc/sys/kernel/randomize_va_space" cp $FB_SOURCE ./$FB_FILE || return 1 echo "set \$dir = $RUNDIR" >> ./$FB_FILE || return 1 echo "run $FB_RUNTIME" >> ./$FB_FILE || return 1 return 0 } nfsometer-1.9/nfsometerlib/workloads/filebench_webserver.nfsometer0000644000000000000000000000224313125073146025753 0ustar rootroot00000000000000# # RUNDIR - a newly created directory on the nfs share. # will exist only when running COMMAND. # # LOCALDIR - a directory on the local fs meant to serve as storage space for # the fetch stage and setup() call # will exist for setup, fetch stages and when COMMAND is run, but # not durning check(). # FB_WORKLOAD=webserver.f FB_RUNTIME=300 FB_SOURCE="/usr/share/filebench/workloads/$FB_WORKLOAD" FB_FILE="nfsometer-$FB_WORKLOAD" # # the command to run # COMMAND="cd $LOCALDIR && filebench -f $FB_FILE" # # command description # DESCRIPTION="A 5 minute run of the webserver workload from the filebench test suite" # workload_check() # - no arguments # - any output to stdout means the check failed # - use need_* functions from workloads.sh workload_check() { need_bin filebench need_file $FB_SOURCE } # workload_setup() # - no arguments # - non-zero return indicates a problem workload_setup() { sudo sh -c "echo 0 > /proc/sys/kernel/randomize_va_space" cp $FB_SOURCE ./$FB_FILE || return 1 echo "set \$dir = $RUNDIR" >> ./$FB_FILE || return 1 echo "run $FB_RUNTIME" >> ./$FB_FILE || return 1 return 0 } nfsometer-1.9/nfsometerlib/workloads/gitclone.nfsometer0000644000000000000000000000201013125073146023544 0ustar rootroot00000000000000# # RUNDIR - a newly created directory on the nfs share. # will exist only when running COMMAND. # # LOCALDIR - a directory on the local fs meant to serve as storage space for # the fetch stage and setup() call # will exist for setup, fetch stages and when COMMAND is run, but # not durning check(). # URL="git://git.linux-nfs.org/projects/trondmy/linux-nfs" URL_OUT="linux-nfs" # # the command to run # COMMAND="git clone $LOCALDIR/linux-nfs" # # command description # DESCRIPTION="A git clone of a very large git repo: the linux kernel. Uses Trond's repo from git.linux-nfs.org. The measured 'git clone's are cloning from local disk to nfs as to not depend on connectivity to git.linux-nfs.org" # workload_check() # - no arguments # - any output to stdout means the check failed # - use need_* functions from workloads.sh workload_check() { need_bin git } # workload_setup() # - no arguments # - non-zero return indicates a problem workload_setup() { return 0 } nfsometer-1.9/nfsometerlib/workloads/iozone.nfsometer0000644000000000000000000000141113125073146023247 0ustar rootroot00000000000000# # RUNDIR - a newly created directory on the nfs share. # will exist only when running COMMAND. # # LOCALDIR - a directory on the local fs meant to serve as storage space for # the fetch stage and setup() call # will exist for setup, fetch stages and when COMMAND is run, but # not durning check(). # # # the command to run # COMMAND="iozone -azcR -f ./testfile" # # command description # DESCRIPTION="The IOzone test suite from iozone.org" # workload_check() # - no arguments # - any output to stdout means the check failed # - use need_* functions from workloads.sh workload_check() { need_bin iozone } # workload_setup() # - no arguments # - non-zero return indicates a problem workload_setup() { return 0 } nfsometer-1.9/nfsometerlib/workloads/iozone_direct.nfsometer0000644000000000000000000000145413125073146024610 0ustar rootroot00000000000000# # RUNDIR - a newly created directory on the nfs share. # will exist only when running COMMAND. # # LOCALDIR - a directory on the local fs meant to serve as storage space for # the fetch stage and setup() call # will exist for setup, fetch stages and when COMMAND is run, but # not durning check(). # # # the command to run # COMMAND="iozone -IazcR -f ./testfile" # # command description # DESCRIPTION="The IOzone test suite from iozone.org - run with O_DIRECT (-I argument)" # workload_check() # - no arguments # - any output to stdout means the check failed # - use need_* functions from workloads.sh workload_check() { need_bin iozone } # workload_setup() # - no arguments # - non-zero return indicates a problem workload_setup() { return 0 } nfsometer-1.9/nfsometerlib/workloads/kernel.nfsometer0000644000000000000000000000210313125073146023223 0ustar rootroot00000000000000# # RUNDIR - a newly created directory on the nfs share. # will exist only when running COMMAND. # # LOCALDIR - a directory on the local fs meant to serve as storage space for # the fetch stage and setup() call # will exist for setup, fetch stages and when COMMAND is run, but # not durning check(). # LINUX="linux-3.4" TARBALL="$LOCALDIR/$LINUX.tar.bz2" URL="http://www.kernel.org/pub/linux/kernel/v3.0/$LINUX.tar.bz2" URL_OUT="$LINUX.tar.bz2" # # the command to run # COMMAND="tar jxvf $TARBALL && cd $LINUX && make defconfig && make -j4 all modules" # # command description # DESCRIPTION="A large compile: the linux kernel. Uses linux-3.1.1.tar.bz2 from kernel.org. Does a 'make defconfig' to generate .config, then 'make all modules'" # workload_check() # - no arguments # - any output to stdout means the check failed # - use need_* functions from workloads.sh workload_check() { need_bin tar need_bin make } # workload_setup() # - no arguments # - non-zero return indicates a problem workload_setup() { return 0 } nfsometer-1.9/nfsometerlib/workloads/python.nfsometer0000644000000000000000000000177413125073146023301 0ustar rootroot00000000000000# # RUNDIR - a newly created directory on the nfs share. # will exist only when running COMMAND. # # LOCALDIR - a directory on the local fs meant to serve as storage space for # the fetch stage and setup() call # will exist for setup, fetch stages and when COMMAND is run, but # not durning check(). # PYTHON="Python-2.7.2" TARBALL="$LOCALDIR/$PYTHON.tgz" URL="http://python.org/ftp/python/2.7.2/$PYTHON.tgz" URL_OUT="$PYTHON.tgz" # # the command to run # COMMAND="tar zxvf $TARBALL && cd $PYTHON && ./configure && make" # # command description # DESCRIPTION="A small compile test: the Python interpreter and associated libraries. Uses Python-2.7.2.tgz from python.org" # workload_check() # - no arguments # - any output to stdout means the check failed # - use need_* functions from workloads.sh workload_check() { need_bin tar need_bin make } # workload_setup() # - no arguments # - non-zero return indicates a problem workload_setup() { return 0 } nfsometer-1.9/nfsometerlib/workloads/workload.sh0000755000000000000000000000534713125073146022215 0ustar rootroot00000000000000#!/bin/bash WORKLOADS_DIR="$(dirname $0)" WORKLOADS="$(cd $WORKLOADS_DIR && ls *.nfsometer 2>&1 | sed 's/.nfsometer//g')" usage() { [ -n "$*" ] && echo $* >&2 echo "usage: $0 [args]" >&2 echo "XXX" >&2 exit 1 } check_rundir() { if [ -z "$RUNDIR" -o ! -d "$RUNDIR" ] ; then usage "RUNDIR not defined" fi } check_localdir() { if [ -z "$LOCALDIR" -o ! -d "$LOCALDIR" ] ; then usage "LOCALDIR not defined" fi } check_dirs() { check_rundir check_localdir } do_fetch() { _ret=0 W=$1.nfsometer source $WORKLOADS_DIR/$W cd $LOCALDIR if [ -n "$URL" -a -n "$URL_OUT" ] ; then if [ ! -f "$URL_OUT" ]; then wget -O "$URL_OUT" "$URL" _ret=$? if [ $_ret -ne 0 ] ; then rm -f "$URL_OUT" fi fi fi return $_ret } do_check() { W=$1.nfsometer source $WORKLOADS_DIR/$W cd $LOCALDIR workload_check return $? } do_setup() { W=$1.nfsometer source $WORKLOADS_DIR/$W cd $LOCALDIR workload_setup return $? } get_command() { W=$1.nfsometer source $WORKLOADS_DIR/$W echo $COMMAND } get_description() { W=$1.nfsometer source $WORKLOADS_DIR/$W echo $DESCRIPTION } get_url() { W=$1.nfsometer source $WORKLOADS_DIR/$W echo $URL } get_url_out() { W=$1.nfsometer source $WORKLOADS_DIR/$W echo $URL_OUT } get_name() { W=$1.nfsometer source $WORKLOADS_DIR/$W if [ -n "$NAME" ]; then echo $NAME else echo $1 fi } need_env() { if [ -z "$(eval "echo \$$(echo $1)")" ] ; then echo "env variable '$1' not defined" fi } need_bin() { if [ -z "$(which $1 2> /dev/null)" ] ; then echo "binary '$1' not found" fi } need_file() { if [ ! -f "$1" ] ; then echo "file '$1' not found" fi } if [ $# -lt 1 ] ; then usage fi CMD="$1" if [ "$CMD" = "list" ]; then echo $WORKLOADS | sort elif [ "$CMD" = "check" ]; then check_localdir if [ $# -ne 2 ] ; then usage "check expects one argument " fi do_check $2 elif [ "$CMD" = "setup" ]; then check_dirs if [ $# -ne 2 ] ; then usage "setup expects one argument " fi do_setup $2 elif [ "$CMD" = "command" ]; then #check_dirs if [ $# -ne 2 ] ; then usage "command expects one argument " fi get_command $2 elif [ "$CMD" = "description" ]; then #check_dirs if [ $# -ne 2 ] ; then usage "description expects one argument " fi get_description $2 elif [ "$CMD" = "name" ]; then #check_dirs if [ $# -ne 2 ] ; then usage "description expects one argument " fi get_name $2 elif [ "$CMD" = "url" ]; then check_localdir if [ $# -ne 2 ] ; then usage "url expects one argument " fi get_url $2 elif [ "$CMD" = "url_out" ]; then check_localdir if [ $# -ne 2 ] ; then usage "url_out expects one argument " fi get_url_out $2 else usage "invalid command: $CMD" fi nfsometer-1.9/nfsometerlib/__init__.py0000644000000000000000000000111313125073146020123 0ustar rootroot00000000000000""" Copyright 2012 NetApp, Inc. All Rights Reserved, contribution by Weston Andros Adamson This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. """ nfsometer-1.9/nfsometerlib/cmd.py0000644000000000000000000000452213125073146017136 0ustar rootroot00000000000000""" Copyright 2012 NetApp, Inc. All Rights Reserved, contribution by Weston Andros Adamson This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. """ import os import posix import sys import subprocess # command wrappers def simplecmd(args): r = cmd(args) return '\n'.join(r[0]).strip() class CmdError(Exception): pass class CmdErrorCode(CmdError): def __init__(self, cmd, code, errstr): self.cmd = cmd self.code = code self.errstr = errstr def __str__(self): return str.format( 'command "{:s}" exited with non-zero status: {:d}{:s}', self.cmd, self.code, self.errstr) class CmdErrorOut(CmdError): pass def cmd(args, raiseerrorcode=True, raiseerrorout=True, instr='', env=None, pass_output=False): #print "command> %s" % args if env: curenv = dict(posix.environ) for k,v in env.iteritems(): curenv[k] = v env = curenv stdin = subprocess.PIPE stdout = subprocess.PIPE stderr = subprocess.PIPE if pass_output: stdout = sys.stdout stderr = sys.stderr #def pre_fn(): #os.setpgrp() proc = subprocess.Popen(args, shell=True, stdin=stdin, stdout=stdout, stderr=stderr, env=env) #preexec_fn=pre_fn) if instr: proc.stdin.write(instr) outstr, errstr = proc.communicate() ret = proc.wait() if not errstr: errstr = '' else: errstr = '\n%s' % errstr if raiseerrorcode and ret != 0: raise CmdErrorCode(args, ret, errstr) if raiseerrorout and errstr: raise CmdErrorOut('command "%s" has output to stderr: %s' % (args, errstr)) if outstr: o_str = outstr.split('\n') else: o_str = '' if errstr: e_str = errstr.split('\n') else: e_str = '' return (o_str, e_str) nfsometer-1.9/nfsometerlib/collection.py0000644000000000000000000004650513125073146020535 0ustar rootroot00000000000000""" Copyright 2012 NetApp, Inc. All Rights Reserved, contribution by Weston Andros Adamson This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. """ import os import numpy as np from subprocess import call from config import * from selector import Selector import parse from trace import TraceAttrs class Stat: """ Object that stores values for a parsed statistic across multiple traces. """ def __init__(self, name, values=None, filename=None, tracedirs=None): """ name - globally unique name of statistic values - value for each parsed run filename - parsed tracedir file tracedirs - tracedir for each parsed run 'values' and 'tracedirs' must be the same length """ self.name = name self._values = [] if values: self._values.extend(values) self._filename = None if filename: self._filename = filename self._tracedirs = [] if tracedirs: self._tracedirs.extend(tracedirs) assert len(self._values) == len(self._tracedirs) self._clear_cached() def _clear_cached(self): """ clear cached values, should be called when self._values changes """ self._mean = None self._std = None self._empty = None self._max = None def __repr__(self): return "Stat(name=%r, values=%r, tracedirs=%r)" % \ (self.name, self._values, self._tracedirs) def __nonzero__(self): return not self.empty() def num_runs(self): """ return the number of runs parsed """ return len(self._values) def run_value(self, tracedir, *args): """ return the value for the run associated with tracedir """ try: run = self._tracedirs.index(tracedir) except ValueError, e: if args: assert len(args) == 1 return args[0] raise e try: return self._values[run] except IndexError, e: if args: assert len(args) == 1 return args[0] raise e def add_value(self, value, filename, tracedir): """ add a value filename - filename this stat came from - must be the same for all values in the Stat. tracedir - tracedir this stat value came from """ self._clear_cached() self._values.append(value) if not self._filename: self._filename = filename else: assert self._filename == filename self._tracedirs.append(tracedir) def mean(self): """ returns mean of values """ if self._mean == None: self._mean = np.mean(self._values) return self._mean def std(self): """ returns standard deviation of values """ if self._std == None: self._std = np.std(self._values) return self._std def empty(self): """ returns true if empty """ if self._empty == None: self._empty = not any(self._values) return self._empty def max(self): """ returns max of values """ if self._max == None: self._max = max(self._values) return self._max def values(self): """ returns tuple of all values in this Stat """ return tuple(self._values) def filename(self): """ returns filename of file that the values were sourced from """ return self._filename def tracedirs(self): """ returns tuple of tracedirs that the values were sourced from """ return tuple(self._tracedirs) class Bucket: """ A collection of Stat objects that are related - in the same bucket """ def __init__(self, name, stats=None): self.name = name self._stats = [] self._tracedirs = [] self._filename = None self._sum_by_tracedir = {} self._clear_cached() def _clear_cached(self): self._sorted = False self._mean = None self._std = None self._max = None self._empty = None self._num_runs = None def __nonzero__(self): return not self.empty() def _sort(self): if not self._sorted: self._stats.sort(lambda x,y: -1 * cmp(x.mean(), y.mean())) self._sorted = True def foreach(self): self._sort() for s in self._stats: yield s def num_runs(self): return len(self._tracedirs) def run_total(self, tracedir): return self._sum_by_tracedir[tracedir] def mean(self): if self._mean == None: self._mean = np.mean(self._sum_by_tracedir.values()) return self._mean def std(self): if self._std == None: self._std = np.std(self._sum_by_tracedir.values()) return self._std def max(self): if self._max == None: self._max = max(self._sum_by_tracedir.values()) return self._max def filename(self): return self._filename def tracedirs(self): return self._tracedirs def empty(self): if not self._empty: self._empty = all([ x.empty() for x in self._stats]) return self._empty def add_stat_to_bucket(self, stat): self._clear_cached() self._stats.append(stat) if not self._filename: self._filename = stat.filename() else: assert self._filename == stat.filename() vals = stat.values() dirs = stat.tracedirs() assert len(vals) == len(dirs) for i, d in enumerate(dirs): if not d in self._tracedirs: self._tracedirs.append(d) if not self._sum_by_tracedir.has_key(d): self._sum_by_tracedir[d] = 0.0 self._sum_by_tracedir[d] += vals[i] def __repr__(self): return "Bucket(%r, stats=%r)" % (self.name, tuple(self._stats),) class TraceStats: """ a collection of Stat and Bucket objects """ def __init__(self, collection): self.collection = collection self._attrs = {} self._values = {} self._num_runs = None def add_attr(self, name, value): if not self._attrs.has_key(name): self._attrs[name] = set() self._attrs[name].add(value) def get_attr(self, name): return self._attrs[name] def has_attr(self, name): return self._attrs.has_key(name) def merge_attrs(self, new): str_attrs = ['workload_command', 'workload_description'] for name in str_attrs: self.add_attr(name, new[name]) float_attrs = ['starttime', 'stoptime'] for name in float_attrs: self.add_attr(name, float(new[name])) def add_stat(self, key, value, units, key_desc, key_better, bucket_def, filename, tracedir): """ add a value for the key. should be called once on each key for every workload result directory """ if not self._values.has_key(key): self._values[key] = Stat(key) self._values[key].add_value(float(value), filename, tracedir) info = {'units': units, 'descr': key_desc, 'better': key_better} self.collection.set_stat_info(key, info) if bucket_def: if isinstance(bucket_def, (list, tuple)) and \ isinstance(bucket_def[0], (list, tuple)): defs = bucket_def else: defs = [ bucket_def ] for x in defs: d = x[0] bucket_name = x[1] if len(x) > 2: display = x[2] else: display = None d.add_key(bucket_name, key, display) def add_bucket(self, bucket_name, stat, descr): """ add a value for the bucket. should be called once on each key for every workload result directory """ assert isinstance(stat, Stat), repr(stat) if not self._values.has_key(bucket_name): self._values[bucket_name] = Bucket(bucket_name) self._values[bucket_name].add_stat_to_bucket(stat) units = self.collection.stat_units(stat.name) better = self.collection.stat_better(stat.name) info = {'units': units, 'descr': descr, 'better': better} self.collection.set_stat_info(self._values[bucket_name].name, info) def get_stat(self, key): return self._values.get(key, None) def num_runs(self): return max([ x.num_runs() for x in self._values.values() ]) class TraceCollection: """ A collection of TraceStats objects """ def __init__(self, resultsdir): assert os.path.isdir(resultsdir) self.resultsdir = resultsdir self._tracestats = {} self._stat_info = {} # map tracedir -> warning messages self._warnings = {} cwd = os.getcwd() os.chdir(self.resultsdir) for ent in os.listdir('.'): try: # old if ent.startswith('test-') and os.path.isdir(ent): self.load_tracedir(ent) # also old elif ent.startswith('nfstest-') and os.path.isdir(ent): self.load_tracedir(ent) # new elif ent.startswith(TRACE_DIR_PREFIX) and os.path.isdir(ent): self.load_tracedir(ent) except IOError, e: self.warn(ent, str(e)) os.chdir(cwd) workloads = set() kernels = set() mountopts = set() detects = set() tags = set() clients = set() servers = set() paths = set() for sel, tracestat in self._tracestats.iteritems(): parse.gather_buckets(self, tracestat) workloads.add(sel.workload) kernels.add(sel.kernel) mountopts.add(sel.mountopt) detects.add(sel.detect) tags.add(sel.tag) clients.add(sel.client) servers.add(sel.server) paths.add(sel.path) # get sorting out of the way now workloads = list(workloads) workloads.sort() kernels = list(kernels) kernels.sort() mountopts = list(mountopts) mountopts.sort() detects = list(detects) detects.sort() tags = list(tags) tags.sort() clients = list(clients) clients.sort() servers = list(servers) servers.sort() paths = list(paths) paths.sort() self.selection = Selector(workloads, kernels, mountopts, detects, tags, clients, servers, paths) def notes_edit(self): notes_file = os.path.join(self.resultsdir, NOTES_FILE) call([posix.environ.get('EDITOR', 'vi'), notes_file]) def notes_get(self): notes_file = os.path.join(self.resultsdir, NOTES_FILE) try: return file(notes_file).readlines() except IOError: return [] def warn(self, tracedir, msg): if not tracedir.endswith('/'): tracedir += '/' if msg.startswith('[Errno '): msg = msg[msg.find(']') + 1:] if not self._warnings.has_key(tracedir): self._warnings[tracedir] = [] self._warnings[tracedir].append(msg.replace(tracedir, '[dir]/')) warn(tracedir + ': ' + msg) def warnings(self): return [ (d, tuple(self._warnings[d])) for d in self._warnings.keys() ] def empty(self): return len(self._tracestats) == 0 def set_stat_info(self, key, info): if not self._stat_info.has_key(key): self._stat_info[key] = info else: assert self._stat_info[key] == info, \ "set_stat_info: info mismatch for %s: %r != %r" % \ (key, self._stat_info[key], info) def stat_units(self, key): u = self._stat_info.get(key, {}).get('units', None) return self._stat_info.get(key, {}).get('units', None) def stat_description(self, key): descr = self._stat_info.get(key, {}).get('descr', None) return descr def stat_better(self, key): b = BETTER_UNKNOWN better = self._stat_info.get(key, {}).get('better', b) return better def get_better_info(self, selection, key): bounds = TEST_BOUND_IO # XXX should come from workload definition if selection.workload.startswith('filebench_'): bounds = TEST_BOUND_TIME better = self.stat_better(key) return better_info(bounds, better) def _ref_trace(self, workload, kernel, mountopts, detects, tags, client, server, path): """ return instance to TraceStats keyed by arguments """ sel = Selector(workload, kernel, mountopts, detects, tags, client, server, path) assert sel.is_valid_key(), "Invalid key: %r" % sel if not self._tracestats.has_key(sel): self._tracestats[sel] = TraceStats(self) return self._tracestats[sel] def get_trace(self, selection): return self._tracestats[selection] def has_traces(self, selection): """ return True if this collection has any traces matching 'selection', otherwise returns False """ for x in selection.foreach(): if self._tracestats.has_key(x): return True return False def _load_traceattrs(self, tracedir): """ load attrs from attr file """ attr = {'tracedir': tracedir, } attr_file = os.path.join(tracedir, 'arguments') trace_attrs = TraceAttrs(filename=attr_file).to_dict() for k, v in trace_attrs.iteritems(): attr[k] = v return attr def _check_dmesg(self, tracedir): """ check dmesg of tracedir for lines starting with "NFS:" returns an error message if found returns empty string if nothing is found """ def _check_lines(f): return '\n'.join([ x[2:] for x in file(f).readlines() if x.startswith('>') and x.lower().find('nfs:') >= 0 ]) diff = os.path.join(tracedir, 'dmesg.diff') result = _check_lines(diff) if result: return 'dmesg.start and dmesg.end are different:\n%s' % (result,) return '' def load_tracedir(self, tracedir): """ load a trace directory and all stats contained within """ assert os.path.isdir(tracedir) attrs = self._load_traceattrs(tracedir) warning = self._check_dmesg(tracedir) if warning: self.warn(tracedir, warning) # XXX move to upgrade tracestat = self._ref_trace(attrs['workload'], attrs['kernel'], attrs['mountopts'], attrs['detects'], attrs['tags'], attrs['client'], attrs['server'], attrs['path']) tracestat.merge_attrs(attrs) parse.parse_tracedir(self, tracestat, tracedir, attrs) def get_attr(self, selection, attr_name): """ returns a tuple of unique values for 'attr_name' for traces matching 'selection' """ assert len(selection.workloads) attr = set() for subsel in selection.foreach(): try: tracestat = self.get_trace(subsel) except KeyError: continue if tracestat.has_attr(attr_name): trace_attr = tracestat.get_attr(attr_name) attr = attr.union(trace_attr) attr = list(attr) attr.sort() return tuple(attr) def _get_contents(self, selection): res = [] outer = ('client', 'kernel', 'server', 'path') for sel in selection.foreach(outer): info = {} info['client'] = sel.client info['kernel'] = sel.kernel info['server'] = sel.server info['path'] = sel.path tmpmap = {} map_order = [] for subsel in sel.foreach(): try: tracestat = self.get_trace(subsel) except: continue nruns = tracestat.num_runs() mdt = subsel.mountopt if subsel.detect: mdt += ' ' + subsel.detect if subsel.tag: mdt += ' ' + subsel.tag if not mdt in map_order: map_order.append(mdt) if not tmpmap.has_key(mdt): tmpmap[mdt] = {} if not tmpmap[mdt].has_key(nruns): tmpmap[mdt][nruns] = [] tmpmap[mdt][nruns].append(subsel.workload) wmap = {} worder = [] for mdt in map_order: if not tmpmap.has_key(mdt): continue runs = tmpmap[mdt].keys() runs.sort() for r in runs: workloads = ' '.join(tmpmap[mdt][r]) run_mdt = '%u runs of %s' % (r, mdt) if not workloads in wmap: wmap[workloads] = [] worder.append(workloads) wmap[workloads].append(run_mdt) wlist = [] for w in worder: wlist.append((w, tuple(wmap[w]))) info['info'] = wlist res.append(info) return res def show_contents(self, selector=None, pre=''): """ return list of lines showing contents of the collection filtered by 'selector' if present """ if not selector: selector = self.selection res = self._get_contents(selector) out = [] for info in res: out.append("client: %s" % info['client']) out.append("kernel: %s" % info['kernel']) out.append("server: %s" % info['server']) out.append("path: %s" % info['path']) for w, l in info['info']: out.append('workloads: %s' % w) for x in l: out.append(' %s' % x) out.append('') return [ ' %s' % x for x in out ] def gather_data(self, keys, selection): groups = [] vals = {} # XXX order = ['workload', 'client', 'server', 'mountopt', 'detect', 'tag', 'kernel', 'path'] for subsel in selection.foreach(order): assert not vals.has_key(subsel) vals[subsel] = {} try: tracestat = self.get_trace(subsel) except KeyError: continue for k in keys: vals[subsel][k] = tracestat.get_stat(k) groups.append(subsel) return groups, vals nfsometer-1.9/nfsometerlib/config.py0000644000000000000000000002612013125073371017636 0ustar rootroot00000000000000""" Copyright 2012 NetApp, Inc. All Rights Reserved, contribution by Weston Andros Adamson This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. """ import re import os, posix, stat, sys import socket NFSOMETER_VERSION='1.9' NFSOMETER_MANPAGE='nfsometer.1' NFSOMETERLIB_DIR=os.path.split(__file__)[0] NFSOMETER_DIR=os.path.join(posix.environ['HOME'], '.nfsometer') # # Trace # RUNNING_TRACE_DIR='/tmp/nfsometer_trace' PROBE_DIR='/tmp/nfsometer_probe' TRACE_ATTRFILE='arguments' TRACE_DIR_PREFIX='nfsometer_trace' TRACE_DIR_VERSION=10 TRACE_MOUNT_TRIES = 3 TRACE_MOUNT_TRY_DELAY = 1.0 TRACE_UMOUNT_TRIES = 60 TRACE_CLEANUP_UMOUNT_TRIES = 60 TRACE_UMOUNT_TRY_DELAY = 1.0 TRACE_LOADGEN_STAGGER_MAX = 60 # # Locations # MOUNTDIR=os.path.join(RUNNING_TRACE_DIR, 'mnt') WORKLOADFILES_ROOT=os.path.join(NFSOMETER_DIR, 'workload_files') RESULTS_DIR=os.path.join(posix.environ['HOME'], 'nfsometer_results') HOSTNAME=socket.getfqdn() RUNROOT='%s/nfsometer_runroot_%s' % (MOUNTDIR, HOSTNAME) HTML_DIR="%s/html" % NFSOMETERLIB_DIR # # Mtimes # MODULE_PATH=os.path.dirname(__file__) MODULE_CONFIG_PATH=os.path.join(MODULE_PATH, 'config.py') MODULE_CONFIG_MTIME=os.stat(MODULE_CONFIG_PATH)[stat.ST_MTIME] MODULE_GRAPH_PATH=os.path.join(MODULE_PATH, 'graph.py') MODULE_GRAPH_MTIME=os.stat(MODULE_GRAPH_PATH)[stat.ST_MTIME] # # Workload # FILE_COMMAND='command.sh' WORKLOADS_DIR=os.path.join(NFSOMETERLIB_DIR, 'workloads') WORKLOADS_SCRIPT=os.path.join(WORKLOADS_DIR, 'workload.sh') # # Notes # NOTES_FILE="nfsometer-notes.txt" # # Graphs # GRAPH_ERRORBAR_COLOR='#CF2727' GRAPH_ERRORBAR_WIDTH=4 GRAPH_EDGE_COLOR='#000000' COLORS = [ '#FEC44F', # Yellow '#D95F0E', # Orange '#476CDA', # Blue '#336600', # Green '#008B8B', # Turquoise '#303030', # Blackish '#FEE0B6', '#B2ABD2', '#8073AC', '#542788', '#2D004B', '#67001F', '#B2182B', '#D6604D', '#F4A582', '#FDDBC7', '#E0E0E0', '#BABABA', '#878787', '#4D4D4D', '#1A1A1A', ] def color_idx(i): return i % len(COLORS) HATCHES = ['/', '.', 'x', '*', '|', 'o', '-', '+', '\\', 'O', ] def hatch_idx(i): assert i > 0 return (i - 1) % len(HATCHES) def get_hatch(i): if i > 0: return HATCHES[hatch_idx(i)] return '' # # Report # TEMPLATE_TOC='%s/toc.html' % HTML_DIR TEMPLATE_TOCNODE='%s/tocnode.html' % HTML_DIR TEMPLATE_TABLE='%s/table.html' % HTML_DIR TEMPLATE_DATASET='%s/dataset.html' % HTML_DIR TEMPLATE_WIDGET='%s/widget.html' % HTML_DIR TEMPLATE_REPORT='%s/report.html' % HTML_DIR TEMPLATE_INDEX='%s/index.html' % HTML_DIR TEMPLATE_REPORTLIST='%s/reportlist.html' % HTML_DIR TEMPLATE_DATAINFOPANE='%s/data_info_pane.html' % HTML_DIR TEMPLATE_REPORTINFO='%s/report_info.html' % HTML_DIR _TEMPLATE_CACHE={} def html_template(filename): global _TEMPLATE_CACHE if not _TEMPLATE_CACHE.has_key(filename): _TEMPLATE_CACHE[filename] = Template(filename=filename) return _TEMPLATE_CACHE[filename] CSSFILEPATH='%s/style.css' % HTML_DIR JSFILEPATH='%s/script.js' % HTML_DIR JQUERY_URL='http://code.jquery.com/jquery-1.7.2.min.js' HTML_PLUSMINUS = '±' HTML_NO_DATA='no data' HTML_COMPARISON_ZERO='zero' # # Parser # class ParseError(Exception): pass # detects DETECT_DELEG='deleg' DETECT_PNFS='pnfs' # valid nfs versions in normalized form NFS_VERSIONS = [ 'v2', 'v3', 'v4.0', 'v4.1' ] # older clients need vers= (minorversion=) syntax NFS_VERSIONS_OLD_SYNTAX = { 'v2': 'vers=2', 'v3': 'vers=3', 'v4.0': 'vers=4', 'v4.1': 'vers=4,minorversion=1', } # mountopt version parsers _RE_VERS_NEW = re.compile('^v(\d+)(\.\d+)?$') _RE_VERS_OLD_MAJOR = re.compile('^vers=(\d+)$') _RE_VERS_OLD_MINOR = re.compile('^minorversion=(\d+)$') def _mountopts_splitvers(mountopt): """ return normalized string form of NFS protocol version from mountopt """ opts = mountopt.split(',') major, minor = None, None other = [] for o in opts: m = _RE_VERS_NEW.match(o) if m: assert major == None assert minor == None major = m.group(1) if m.group(2): minor = m.group(2)[1:] continue m = _RE_VERS_OLD_MAJOR.match(o) if m: assert major == None major = m.group(1) continue m = _RE_VERS_OLD_MINOR.match(o) if m: assert minor == None minor = m.group(1) continue # otherwise something else other.append(o) if not minor and major != None and int(major) >= 4: minor = '0' if major and minor: return ('v%s.%s' % (major, minor), other) elif major: return ('v%s' % (major,), other) raise ValueError("no version found in mount option '%s'" % (mountopt)) def mountopts_version(mountopt): return _mountopts_splitvers(mountopt)[0] def mountopts_normalize(mountopt): vers, other = _mountopts_splitvers(mountopt) other.sort() if other: return '%s,%s' % (vers, ','.join(other)) return vers def mountopts_old_syntax(mountopts): vers, other = _mountopts_splitvers(mountopts) new = NFS_VERSIONS_OLD_SYNTAX.get(vers, vers) if other: new += ',' + ','.join(other) return new def groups_by_nfsvers(groups): gmap = {} for g in groups: vers = mountopts_version(g.mountopt) if not gmap.has_key(vers): gmap[vers] = [] gmap[vers].append(g) return gmap # # Formatting # def pluralize(x, pluralstr='s'): if x != 1: return pluralstr return '' # # STATNOTE_* - disclaimers and such # # TODO this should be based off of some arg in workload def # for fixed-time tests # def statnote_filebench_times(sel): has_fb = False for w in sel.workloads: if w.startswith('filebench_'): has_fb = True break if has_fb: return """Filebench tests are run for a set amount of time the time_real value is somewhat useless. """ return '' def statnote_v3_no_lock(sel): for mountopt in sel.mountopts: if mountopts_version(mountopt) == 'v3': return """NFSv3's locking protocol runs on a different service and is not counted """ return '' def statnote_v41_pnfs_no_ds(sel): old_kernel = False for kernel in sel.kernels: # XXX a hack, and not really true since some versions < 3 do have these # stats if kernel.startswith('2.'): old_kernel = True break if old_kernel: return """ Older linux kernels do not count READ, WRITE and COMMIT operations to pNFS dataservers (unless the DS is also the MDS).""" return '' # # Unit Scaling # SCALE = { 'T': 1024 * 1024 * 1024 * 1024, 'G': 1024 * 1024 * 1024, 'M': 1024 * 1024, 'K': 1024, } def fmt_scale_units(val, units): def near(_val, _scale): return _val >= (_scale * 0.9) scale = 1.0 if units == 'B': if near(val, SCALE['T']): scale = SCALE['T'] units = 'TB' elif near(val, SCALE['G']): scale = SCALE['G'] units = 'GB' elif near(val, SCALE['M']): scale = SCALE['M'] units = 'MB' elif near(val, SCALE['K']): scale = SCALE['K'] units = 'KB' elif units == 'KB/s': if near(val, SCALE['G']): scale = SCALE['G'] units = 'TB/s' elif near(val, SCALE['M']): scale = SCALE['M'] units = 'GB/s' elif near(val, SCALE['K']): scale = SCALE['K'] units = 'MB/s' return scale, units # # Better API # TEST_BOUND_UNKNOWN = 0 TEST_BOUND_IO = 1 TEST_BOUND_TIME = 2 BETTER_UNKNOWN = 0 BETTER_ALWAYS_LESS = 1 BETTER_ALWAYS_MORE = 2 BETTER_LESS_IF_IO_BOUND = 3 # but more if time bound BETTER_MORE_IF_IO_BOUND = 4 # but less if time bound BETTER_EXTRA_MASK = 0x0f BETTER_NO_VARIANCE = 0x10 def better_info(bounds, better): extra = better & (~BETTER_EXTRA_MASK) better = better & BETTER_EXTRA_MASK if better == BETTER_ALWAYS_LESS: less_is_better = True elif better == BETTER_ALWAYS_MORE: less_is_better = False elif better == BETTER_LESS_IF_IO_BOUND: if bounds == TEST_BOUND_IO: less_is_better = True else: less_is_better = False elif better == BETTER_MORE_IF_IO_BOUND: if bounds == TEST_BOUND_IO: less_is_better = False else: less_is_better = True else: return ('', '', '') more = [] if extra & BETTER_NO_VARIANCE: more.append(' unless workload is time bound') if less_is_better: return ('↓', 'less is better', more) else: return ('↑', 'more is better', more) CONST_TIME_EXCUSE = " as this workload is time constrained" def find_suffix(search, suffixes): """ Split 'search' into (name, suffix) suffixes - list of suffixes """ assert isinstance(suffixes, (list, tuple)) for s in suffixes: if search.endswith('_' + s): idx = len(search) - len('_' + s) return (search[:idx], search[idx+1:]) raise KeyError("key %r has invaid suffix in list %r" % (search, suffixes)) # # Console formatting # def inform(msg): pre, post = '> ', '' for x in msg.split('\n'): if x.strip(): sys.stdout.write("%s%s%s\n" % (pre, x, post)) sys.stdout.flush() def warn(msg): pre, post = 'WARNING: ', '' for x in msg.split('\n'): if x.strip(): sys.stderr.write("%s%s%s\n" % (pre, x, post)) sys.stderr.flush() def import_error(m): warn(m) sys.exit(1) # # Import third-party modules # try: import numpy as np except: import_error("Error importing numpy - Make sure numpy is installed") try: import matplotlib except: import_error("Error importing matplotlib - Make sure matplotlib is installed") def check_mpl_version(): vers = matplotlib.__version__ warning = False sv = vers.split('.') if int(sv[0]) < 1: warning = True elif int(sv[0]) == 1 and int(sv[1]) < 1: warning = True if warning: warn("matplotlib version %s < 1.1 - some graph features might not work!" % vers) try: # Don't require $DISPLAY to be set! matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib.font_manager as fm except: import_error("Error importing matplotlib submodules - this is probably an incompatible version of matplotlib") try: from mako.template import Template except: import_error("Error importing mako - Make sure mako is installed") nfsometer-1.9/nfsometerlib/graph.py0000644000000000000000000004200413125073146017471 0ustar rootroot00000000000000""" Copyright 2012 NetApp, Inc. All Rights Reserved, contribution by Weston Andros Adamson This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. """ #!/usr/bin/env python import multiprocessing import cPickle import os, sys, time from collection import * from config import * import selector _GRAPH_COLLECTION = None def get_collection(): global _GRAPH_COLLECTION assert _GRAPH_COLLECTION != None return _GRAPH_COLLECTION def set_collection(collection): global _GRAPH_COLLECTION if _GRAPH_COLLECTION == None: _GRAPH_COLLECTION = collection else: assert _GRAPH_COLLECTION == collection class GraphFactory: def __init__(self, collection, imagedir, serial_gen=False): self.pool = None self.pool_error = None self.gen_count = 0 self.prune_count = 0 self.cached_count = 0 self.serial_gen = serial_gen self.imagedir = imagedir self.num_proc = max(multiprocessing.cpu_count() - 2, 2) self._cache = set() self.collection = collection set_collection(collection) try: os.mkdir(self.imagedir) except OSError, e: assert e.errno == os.errno.EEXIST self._entries = set(os.listdir(imagedir)) def _cache_hit(self, src): cache_val = os.path.split(src)[-1] self._cache.add(cache_val) def _cache_seen(self, src): cache_val = os.path.split(src)[-1] res = cache_val in self._cache return res def check_pool(self): if self.pool_error != None: self.pool.terminate() sys.stderr.write("Error generating graphs. ") sys.stderr.write("Run with --serial-graphs to see error\n\n") sys.exit(1) def error(self, e): self.pool_error = e try: self.pool.terminate() except RuntimeError: pass def pool_done(self, res): if isinstance(res, Exception): self.error(res) elif res: self.gen_count += 1 def make_uniq_str(self, graphtype, attrs): o = [ ('graphtype', graphtype), ('config_mtime', MODULE_CONFIG_MTIME), ('graph_mod_mtime', MODULE_GRAPH_MTIME), ('attrs', hash(repr(attrs))), ] hval = hash(repr(o)) return hval def make_graph(self, graphtype, attrs): classes = attrs.get('classes', None) other_attrs = [] if classes: other_attrs.append('class="%s"' % ' '.join(classes)) if attrs.has_key('groups'): if not attrs.has_key('gmap'): attrs['gmap'] = groups_by_nfsvers(attrs['groups']) gmap = attrs['gmap'] if graphtype == 'bar_and_nfsvers': all_src = self._graph_src('bar', attrs) num = len(attrs['groups']) cur = 0 sub_src = [] for vers in NFS_VERSIONS: if not gmap.has_key(vers): continue assert cur < num newattrs = dict(attrs) newattrs['groups'] = gmap[vers] newattrs['selection'] = selector.merge_selectors(gmap[vers]) newattrs['gmap'] = {vers: gmap[vers],} src = self._graph_src('bar', newattrs) sub_src.append(('vers_' + vers, src)) cur += len(gmap[vers]) selection = attrs['selection'] if len(selection.clients) > 1: for subsel in selection.foreach('client'): newattrs = dict(attrs) newattrs['groups'] = \ selector.filter_groups(attrs['groups'], subsel) newattrs['selection'] = subsel src = self._graph_src('bar', newattrs) sub_src.append(('client_' + subsel.client, src)) if len(selection.servers) > 1: for subsel in selection.foreach('server'): newattrs = dict(attrs) newattrs['groups'] = \ selector.filter_groups(attrs['groups'], subsel) newattrs['selection'] = subsel src = self._graph_src('bar', newattrs) sub_src.append(('server_' + subsel.server, src)) if len(selection.kernels) > 1: for subsel in selection.foreach('kernel'): newattrs = dict(attrs) newattrs['groups'] = \ selector.filter_groups(attrs['groups'], subsel) newattrs['selection'] = subsel src = self._graph_src('bar', newattrs) sub_src.append(('kernel_' + subsel.kernel, src)) if len(selection.paths) > 1: for subsel in selection.foreach('path'): newattrs = dict(attrs) newattrs['groups'] = \ selector.filter_groups(attrs['groups'], subsel) newattrs['selection'] = subsel src = self._graph_src('bar', newattrs) sub_src.append(('path_' + subsel.path, src)) if len(selection.detects) > 1: for subsel in selection.foreach('detect'): newattrs = dict(attrs) newattrs['groups'] = \ selector.filter_groups(attrs['groups'], subsel) newattrs['selection'] = subsel src = self._graph_src('bar', newattrs) sub_src.append(('detect_' + subsel.detect, src)) if len(selection.tags) > 1: for subsel in selection.foreach('tag'): newattrs = dict(attrs) newattrs['groups'] = \ selector.filter_groups(attrs['groups'], subsel) newattrs['selection'] = subsel src = self._graph_src('bar', newattrs) sub_src.append(('tag_' + subsel.tag, src)) def _fmt_hidden(name, value): return '' % \ (name, value) return """

    %s %s
    """ % (all_src, ' '.join(other_attrs), _fmt_hidden('data_graph_all', all_src), '\n'.join([ _fmt_hidden('data_graph_' + x, y) for x, y in sub_src ])) src = self._graph_src(graphtype, attrs) return '' % (src, ' '.join(other_attrs)) def _graph_src(self, graphtype, attrs): hval = self.make_uniq_str(graphtype, attrs) imgfile = '%s_%s.png' % (graphtype, hval) imgpath = os.path.join(self.imagedir, imgfile) src = './images/%s' % (os.path.split(imgpath)[-1],) if graphtype == 'bar': graphfunc = make_bargraph_cb elif graphtype == 'pie': graphfunc = make_pie_cb elif graphtype == 'legend': graphfunc = make_legend_cb else: raise RuntimeError('Unhandled graphtype: %r' % graphtype) # see if the same graph already exists seen = self._cache_seen(src) if not seen and not imgfile in self._entries: args = [imgpath] args.append(attrs) if self.serial_gen: graphfunc(*args) else: if self.pool == None: assert get_collection() != None self.pool = multiprocessing.Pool(processes=self.num_proc) args.insert(0, graphfunc) self.check_pool() self.pool.apply_async(graph_cb_wrapper, args, {}, self.pool_done) elif not seen: self.cached_count += 1 self._cache_hit(src) return src def prune_graphs(self): for dentry in os.listdir(self.imagedir): if self._cache_seen(dentry): continue os.unlink(os.path.join(self.imagedir, dentry)) self.prune_count += 1 def count_images(self): total = 0 for dentry in os.listdir(self.imagedir): if self._cache_seen(dentry): total += 1 return total def wait_for_graphs(self): if self.pool: self.pool.close() last_count = None while True: left = len(self._cache) - self.count_images() if last_count != None and last_count == left: # no progress, just allow join to fix things break last_count = left sys.stdout.write("\rGenerating graphs - (%u to go)......" % (left,)) sys.stdout.flush() time.sleep(1) self.pool.join() self.prune_graphs() inform('\rGraph Summary: ') if self.gen_count: print ' %u images generated' % self.gen_count if self.cached_count: print ' %u cached images' % self.cached_count if self.prune_count: print ' %u files pruned' % self.prune_count def _fmt_data(x, scale): assert not isinstance(x, (list, tuple)) if isinstance(x, Stat): return x.mean() / scale, x.std() / scale # disallow? elif isinstance(x, (float, int, long)): return x, 0.0 elif x == None: # when graphing, no data can just be zero return 0.0, 0.0 raise ValueError('Unexpected data type for %r' % (val,)) def _graphize_units(units): if not units: u = '' else: u = units.replace('μ', '$\mu$') return u def graph_cb_wrapper(graph_f, imgfile, attrs): try: graph_f(imgfile, attrs) except KeyboardInterrupt: return False except Exception, e: return e return True def make_bargraph_cb(imgfile, attrs): graph_width = attrs['graph_width'] graph_height = attrs['graph_height'] groups = attrs['groups'] units = attrs['units'] key = attrs['key'] no_ylabel = attrs['no_ylabel'] hatch_map = attrs['hatch_map'] selection = attrs['selection'] color_map = attrs['color_map'] collection = get_collection() _, vals = collection.gather_data([key], selection) all_means = [] for g in groups: v = vals[g][key] if v != None: all_means.append(float(v.mean())) if all_means: maxval = max(all_means) else: maxval = 0.0 scale, units = fmt_scale_units(maxval, units) units = _graphize_units(units) matplotlib.rc('ytick', labelsize=8) matplotlib.rc('xtick', labelsize=8) fig = plt.figure(1) plt.clf() plt.gcf().set_size_inches(graph_width, graph_height) ax1 = fig.add_subplot(111) ax1.set_autoscale_on(True) ax1.autoscale_view(True,True,True) for i in ax1.spines.itervalues(): i.set_linewidth(0.0) # width of bars within a group bar_width_portion = 0.6 space_width_portion = 1.0 - bar_width_portion # bar width width = bar_width_portion / len(groups) # space between bars, two extra - for prespace version_total = 0 last_vers = None for i, g in enumerate(groups): this_vers = mountopts_version(g.mountopt) if not last_vers or last_vers != this_vers: version_total += 1 last_vers = this_vers groupspace = space_width_portion / (len(groups) + version_total) # before each grouping of bars (per key) space_multiplier = groupspace + width vers_space_multiplier = float(space_multiplier) / float(version_total) version_count = 0 last_vers = None for i, g in enumerate(groups): this_vers = mountopts_version(g.mountopt) if not last_vers or last_vers != this_vers: version_count += 1 last_vers = this_vers # both map key -> hidx -> list of values valmap = {} errmap = {} max_hatch_index = 0 valmap[key] = {} errmap[key] = {} val = vals[g].get(key, None) hidx = 0 # default hatch if isinstance(val, Bucket): for s in val.foreach(): x_v, x_s = _fmt_data(s, scale) hidx = hatch_map[s.name] assert not valmap[key].has_key(hidx), \ '%u, %r' % (hidx, val) assert not errmap[key].has_key(hidx), \ '%u, %r' % (hidx, val) valmap[key][hidx] = x_v errmap[key][hidx] = x_s max_hatch_index = max(max_hatch_index, hidx) else: x_v, x_s = _fmt_data(val, scale) valmap[key][hidx] = x_v errmap[key][hidx] = x_s assert max_hatch_index >= 0 assert len(valmap) == len(errmap) ind = np.arange(1) adj = groupspace + (space_multiplier * float(i)) + \ (vers_space_multiplier * float(version_count - 1)) # add to array to account for bars and spacing pos = ind + adj bottom = [0.0] for hidx in range(max_hatch_index + 1): heights = [valmap[key].get(hidx, 0.0)] this_yerr = [errmap[key].get(hidx, 0.0)] # old versions of matplotlib dont support error_kw bar_kws = {'yerr': this_yerr, 'bottom': bottom, 'color': color_map[g], 'edgecolor': '#000000', 'alpha': 0.9, 'hatch': get_hatch(hidx), 'error_kw': dict(elinewidth=GRAPH_ERRORBAR_WIDTH, ecolor=GRAPH_ERRORBAR_COLOR, barsabove=True, capsize=1.0), } try: ax1.bar(pos, heights, width, **bar_kws) except AttributeError: # try without error_kw for older versions of mpl del bar_kws['error_kw'] ax1.bar(pos, heights, width, **bar_kws) assert len(bottom) == len(heights) for bx in range(len(bottom)): bottom[bx] += heights[bx] if not no_ylabel and units != None: plt.ylabel(units, size=8) fig.subplots_adjust(right=1.0) else: plt.yticks([]) fig.subplots_adjust(left=0.0, right=1.0) plt.xticks(ind, ['']) plt.xlim((0, 1)) plt.savefig(imgfile, transparent=True, bbox_inches='tight') plt.close(1) def make_legend_cb(imgfile, attr): width = attr['width'] height = attr['height'] color = attr['color'] hatch_idx = attr['hatch_idx'] matplotlib.rc('ytick', labelsize=8) matplotlib.rc('xtick', labelsize=8) fig = plt.figure(1) plt.clf() plt.gcf().set_size_inches(width, height) ax1 = fig.add_subplot(111) ax1.set_autoscale_on(True) ax1.autoscale_view(True,True,True) for i in ax1.spines.itervalues(): i.set_linewidth(0.0) ax1.get_xaxis().set_visible(False) ax1.get_yaxis().set_visible(False) ind = np.arange(1) heights = [height] # old versions of matplotlib dont support error_kw bar_kws = {'color': color, 'alpha': 0.9, 'linewidth': 1, } if color == None: bar_kws['color'] = '#ffffff' if hatch_idx != None: bar_kws['hatch'] = get_hatch(hatch_idx) bar_kws['edgecolor'] = '#000000' ax1.bar(ind, heights, width, **bar_kws) plt.yticks([]) plt.xticks(ind, [''] * len(ind)) plt.savefig(imgfile, transparent=True) plt.close(1) def make_pie_cb(imgfile, attrs): graph_width = attrs['graph_width'] graph_height = attrs['graph_height'] slice_values = attrs['slice_values'] slice_labels = attrs['slice_labels'] slice_explode = attrs['slice_explode'] slice_colors = attrs['slice_colors'] slice_hatches = attrs.get('slice_hatches', None) fig = plt.figure(1) plt.gcf().set_size_inches(graph_width, graph_height) #ax = plt.axes([0.1, 0.1, 0.8, 0.8]) slices = plt.pie(slice_values, explode=slice_explode, labels=slice_labels, autopct='', colors=slice_colors, shadow=True) if slice_hatches: for i in range(len(slices[0])): slices[0][i].set_hatch(slice_hatches[i]) plt.xlim((-1.05, 1.05)) plt.ylim((-1.05, 1.05)) plt.savefig(imgfile, transparent=True) plt.close(1) nfsometer-1.9/nfsometerlib/options.py0000644000000000000000000004745313125073146020100 0ustar rootroot00000000000000""" Copyright 2012 NetApp, Inc. All Rights Reserved, contribution by Weston Andros Adamson This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. """ import os, posix, sys import getopt import re from config import * _progname = sys.argv[0] if _progname.startswith('/'): _progname = os.path.split(_progname)[-1] # options class for parsing command line OTYPE_BOOL=1 OTYPE_ARG=2 OTYPE_LIST=3 OTYPE_HELP=4 # no arguments expected OMODES_ARG_NONE=0 # modes that need and may have OMODES_ARG_SERVERPATH=1 # modes that need and one OMODES_ARG_SERVERPATH_AND_ONE_WORKLOAD=2 # modes that need and may have OMODES_ARG_WORKLOAD=3 OMODES = { 'all': OMODES_ARG_SERVERPATH, # included in all 'fetch': OMODES_ARG_WORKLOAD, 'trace': OMODES_ARG_SERVERPATH, 'report': OMODES_ARG_NONE, # not included in all 'workloads': OMODES_ARG_NONE, 'list': OMODES_ARG_NONE, 'notes': OMODES_ARG_NONE, 'loadgen': OMODES_ARG_SERVERPATH_AND_ONE_WORKLOAD, 'examples': OMODES_ARG_NONE, 'help': OMODES_ARG_NONE, } OMODE_DEFAULT='all' class Options: # opts mode = None resultdir = RESULTS_DIR num_runs = 1 options = [] serial_graph_gen = False always_options = None randomize_traces = False tags = [] serverpath = None workloads_requested = [] _man_name = "nfsometer - NFS performance measurement tool" _man_description = """nfsometer is a performance measurement framework for running workloads and reporting results across NFS protocol versions, NFS options and Linux NFS client implementations """ _basic_usage_fmt = "%s [mode] [options]" _synopsis_fmt = "%s [options] [mode] [[] [workloads...]]" _modes_description_fmt = """ Basic usage (no mode specified): \\fB%(script)s [workloads...]\\fR This will fetch needed files, run traces, and generate reports, same as running the the 'fetch', 'trace' and 'report' stages. Advanced usage (specify modes): \\fB%(script)s list\\fR List the contents of the results directory. \\fB%(script)s workloads\\fR List available and unavailable workloads. \\fB%(script)s notes\\fR Edit the notes file of the results directory. These notes will be displayed in report headers. \\fB%(script)s loadgen \\fR Run in loadgen mode: don't record any stats, just loop over against . Only one -o option is allowed. Use the -n option to run multuple instances of the loadgen workload. When running more than one instance, the intial start times are staggered. \\fB%(script)s fetch [workloads...]\\fR Fetch all needed files for the specified workload(s). If no workloads are specified, all workloads are fetched. Fetched files are only downloaded once and are cached for future runs. \\fB%(script)s trace [workloads...]\\fR Run traces against . The traces run will be: (options + always options + tags) X (workloads) X (num runs) This will only run traces that don't already exist in the results directory. \\fB%(script)s report\\fR Generate all reports available from the results directory. \\fB%(script)s example\\fR Show examples from man page """ _examples_fmt = """ Example 1: See what workloads are available \\fB$ %(script)s workloads\\fR This command lists available workloads and will tell you why workloads are unavailable (if any exist). Example 2: Compare cthon, averaged over 3 runs, across nfs protocol versions \\fB%(script)s -n 3 server:/export cthon\\fR This example uses the default for -o: "-o v3 -o v4 -o v4.1". To see the results, open results/index.html in a web browser. Example 3: Compare cthon, averaged over 3 runs, between v3 and v4.0 only \\fB%(script)s -n 3 -o v3 -o v4 server:/export cthon\\fR This example specifies v3 and v4 only. To see the results, open results/index.html in a web browser. Example 4: Compare two kernels running iozone workload, averaged over 2 runs, across all nfs protocol versions nfsometer can compare two (or more) kernel versions, but has no way of building, installing or booting new kernels. It's up to the user to install new kernels. In order for these kernels to be differentiated, 'uname -a' must be different. 1) boot into kernel #1 2) \\fB%(script)s -n 2 server:/export iozone\\fR 3) boot into kernel #2 4) \\fB%(script)s -n 2 server:/export iozone\\fR 5) open results/index.html in a web browser To see the results, open results/index.html in a web browser. Example 5: Using tags Tags (the -t option) can be used to mark nfsometer runs as occurring with some configuration not captured by mount options or detectable tags, such as different sysctl settings (client side), different server side options, or different network conditions. 1) set server value foo to 2.3 2) \\fB%(script)s -o v4 -o v4.1 -t foo=2.3\\fR 3) set server value foo to 10 4) \\fB%(script)s -o v4 -o v4.1 -t foo=10\\fR What is passed to -t is entirely up to the user - it will not be interpreted or checked by nfsometer at all, so be careful! To see the results, open results/index.html in a web browser. Example 6: Always options The -o flag specifies distinct option sets to run, but sometimes there are options that should be present in each. Instead of writing each one out, you can use the -a option: \\fB%(script)s -o v3 -o v4 -a sec=krb5 server:/export iozone\\fR this is equivalent to: \\fB%(script)s -o v3,sec=krb5 -o v4,sec=krb5 server:/export iozone\\fR Example 7: Using the "custom" workload A main use case of nfsometer is the "custom" workload - it allows the user to specify the command that nfsometer is to run. NOTE: the command's cwd (current working directory) is the runroot created on the server. \\fBexport NFSOMETER_CMD="echo foo > bar"\\fR \\fBexport NFSOMETER_NAME="echo"\\fR \\fBexport NFSOMETER_DESC="Writes 4 bytes to a file"\\fR \\fB%(script)s server:/export custom\\fR This will run 3 traces (v3, v4, v4.1) against server:/export of the command: \\fBecho foo > bar\\fR. Example 8: Using the loadgen mode Loadgen runs several instances of a workload without capturing traces. The idea is that you use several clients to generate load, then another client to measure performance of a loaded server. The "real" run of nfsometer (not loadgen) should mark the traces using the -t option. 1) On client A, run the cthon workload to get a baseline of a server without any load. \\fB%(script)s trace server:/export cthon\\fR 2) When that's done, start loadgen on client B: \\fB%(script)s -n 10 loadgen server:/export dd_100m_1k\\fR This runs 10 instances of dd_100m_1k workload on server:/export. It can take several minutes to start in an attempt to stagger all the workload instances. 3) once all instances are started, run the "real" nfsometer trace on client A. Use the -t option to mark the traces as having run under load conditions: \\fB%(script)s -t "10_dd" trace server:/export cthon\\fR 4) Explain how the tests were set up in the result notes. This should be run on client A (which has the traces: \\fB%(script)s notes\\fR 5) Now generate the reports: \\fB%(script)s report\\fR Example 8: Long running nfsometer trace The nfsometer.py script currently runs in the foreground. As such, it will be killed if the tty gets a hangup or the connection to the client is closed. For the time being, %(script)s should be run in a screen session, or run with nohup and the output redirected to a file. 1) \\fBscreen -RD\\fR 2) \\fB%(script)s -n 2 server:/export iozone\\fR 3) close terminal window (or ^A^D) ... 4) reattach later with \\fBscreen -RD\\fR 5) once nfsometer.py is done, results will be in results/index.html """ _options_def = [ ('r', 'resultdir', OTYPE_ARG, 'resultdir', ("The directory used to save results.",), "dir"), ('o', 'options', OTYPE_LIST, 'options', ("Mount options to iterate through.", "This option may be used multiple times.", "Each mount option must have a version specified.",), "mount.nfs options"), ('a', 'always-options', OTYPE_ARG, 'always_options', ("Options added to every trace.", "This option may be used multiple times.",), 'mount.nfs options'), ('t', 'tag', OTYPE_LIST, 'tags', ("Tag all new traces with 'tags'.", "This option may be used multiple times.",), 'tags'), ('n', 'num-runs', OTYPE_ARG, 'num_runs', ("Number of runs for each trace of ", " X X ",), "num runs"), (None, 'serial-graphs', OTYPE_BOOL, 'serial_graph_gen', ("Generate graphs inline while generating reports.", "Useful for debugging graphing issues.",), None), (None, 'rand', OTYPE_BOOL, 'randomize_traces', ("Randomize the order of traces",), None), ('h', 'help', OTYPE_HELP, None, ("Show the help message",), None), ] def _getopt_short(self): ret = '' for oshort, olong, otype, oname, ohelp, odesc in self._options_def: if oshort: assert len(oshort) == 1, 'multi character short option!' if otype in (OTYPE_ARG, OTYPE_LIST): ret += oshort + ':' else: ret += oshort return ret def _getopt_long(self): ret = [] for oshort, olong, otype, oname, ohelp, odesc in self._options_def: if olong: if otype in (OTYPE_ARG, OTYPE_LIST): ret.append(olong + '=') else: ret.append(olong) return ret def parse(self): shortstr = self._getopt_short() longlist = self._getopt_long() try: opts, args = getopt.getopt(sys.argv[1:], shortstr, longlist) except getopt.GetoptError, err: self.usage(str(err)) # parse options for o, a in opts: found = False for oshort, olong, otype, oname, ohelp, odesc in self._options_def: if (oshort and o == '-' + oshort) or \ (olong and o == '--' + olong): if otype == OTYPE_BOOL: setattr(self, oname, True) elif otype == OTYPE_ARG: setattr(self, oname, a) elif otype == OTYPE_LIST: getattr(self, oname).append(a) elif otype == OTYPE_HELP: self.usage() else: raise ValueError('Invalid OTYPE: %u' % (otype,)) found = True break if not found: self.error('Invalid option: %s' % (o,)) # parse and validate args # parse mode if len(args) >= 1 and args[0] in OMODES: self.mode = args[0] args = args[1:] else: self.mode = OMODE_DEFAULT mode_arg_type = OMODES[self.mode] if mode_arg_type == OMODES_ARG_SERVERPATH: # [ ... ] if not len(args): self.error('missing argument') if args[0].find(':') < 0: self.error(" argument expected, " "but no ':' found: %r" % args[0]) self.serverpath = args[0] self.workloads_requested = args[1:] args = [] elif mode_arg_type == OMODES_ARG_SERVERPATH_AND_ONE_WORKLOAD: # if not len(args): self.error('missing argument') if args[0].find(':') < 0: self.error(" argument expected, " "but no ':' found: %r" % args[0]) self.serverpath = args[0] args = args[1:] if not len(args): self.error("expecting workload argument after ") if len(args) > 1: self.error("expecting only one workload argument after" "") self.workloads_requested = args args = [] elif mode_arg_type == OMODES_ARG_WORKLOAD: self.workloads_requested = args args = [] elif mode_arg_type == OMODES_ARG_NONE: if len(args): self.error("unexpected arguments: %s" % (' '.join(args),)) else: raise ValueError("unhandled mode_arg_type %r" % (mode_arg_type,)) # normalize if not self.options and mode_arg_type == OMODES_ARG_SERVERPATH: inform('No options specified. ' 'Using default: -o v3 -o v4 -o v4.1') self.options = ['v3', 'v4', 'v4.1'] elif not self.options and \ mode_arg_type == OMODES_ARG_SERVERPATH_AND_ONE_WORKLOAD: inform('No options specified. ' 'Using %s default: -o v4' % (self.mode,)) self.options = ['v4',] elif self.options and not mode_arg_type in \ (OMODES_ARG_SERVERPATH, OMODES_ARG_SERVERPATH_AND_ONE_WORKLOAD): self.error('options are not allowed for mode %s' % (self.mode,)) mountopts = [] for x in self.options: mountopts.extend(re.split('[ |]', x)) if self.always_options: mountopts = [ x + ',' + self.always_options for x in mountopts ] errors = [] self.mountopts = [] for x in mountopts: try: vers = mountopts_version(x) except ValueError, e: self.usage(str(e)) self.mountopts.append(x) if errors: self.error('\n'.join(errors)) self.num_runs = int(self.num_runs) self.server = None self.path = None if self.serverpath: self.server, self.path = self.serverpath.split(':', 1) self.tags = ','.join(self.tags) if mode_arg_type == OMODES_ARG_SERVERPATH_AND_ONE_WORKLOAD: if not len(self.mountopts) == 1: self.error("mode %s expects only one option", (self.mode,)) if 'custom' in self.workloads_requested: # check for env variables err = False for name in ('NFSOMETER_CMD', 'NFSOMETER_NAME', 'NFSOMETER_DESC',): if not name in posix.environ: print >>sys.stderr, "%s not set" % name err = True if err: self.error("\nCustom workload missing environment variables") def _option_help(self, man=False): lines = [] for oshort, olong, otype, oname, ohelp, odesc in self._options_def: if not odesc: odesc = '' optstrs = [] if oshort: ods = '' if odesc: ods = ' <%s>' % odesc if man: optstrs.append('\\fB-' + oshort + ods + '\\fR') else: optstrs.append('-' + oshort + ods) if olong: ods = '' if odesc: ods = '=<%s>' % odesc if man: optstrs.append('\\fB--' + olong + ods + '\\fR') else: optstrs.append('--' + olong + ods) if man: optstrs = '" %s "' % ', '.join(optstrs) else: optstrs = ', '.join(optstrs) if oname: val = getattr(self, oname) else: val = None if val: defaultstr = 'default: %r' % (val,) else: defaultstr = '' # function to split ohelp to fit on 80 column screen def _fmthelp(chunks, offset, man=False): if man: return '\n'.join([ re.sub(' +', ' ', x.replace('\n', ' ')) for x in chunks]) ret = [] for x in chunks: ret.append(x) rfmt = '\n' + (' ' * offset) return rfmt.join(ret) if man: lines.append('.sp 1') lines.append('.TP 0.5i') lines.append('.BR %s' % (optstrs,)) lines.append(_fmthelp(ohelp, 0, man=True)) if defaultstr: lines.append(defaultstr) else: lines.append('%s' % (optstrs,)) lines.append('%-15s%s' % ('', _fmthelp(ohelp, 17))) if defaultstr: lines.append('%-15s%s' % ('', defaultstr)) lines.append('') return lines def error(self, msg=''): print >>sys.stderr, msg print >>sys.stderr, \ '\nrun "%s --help" and "%s examples" for more info' % \ (_progname, _progname) sys.stderr.flush() sys.exit(1) def _modes_description(self, script, man=False): kwargs = {'script': script} fmt = self._modes_description_fmt % kwargs if not man: # strip man formatting return re.sub('\\\\f\S', '', fmt) return fmt def _examples(self, man=False): if not man: # strip man formatting return re.sub('\\\\f\S', '', self._examples_fmt % {'script': _progname}) return self._examples_fmt % {'script': 'nfsometer'} def _synopsis(self, script): return self._synopsis_fmt % script def examples(self): print >>sys.stdout, self._examples() def usage(self, msg=''): print >>sys.stderr, "usage: %s" % self._synopsis(_progname) print >>sys.stderr, self._modes_description(_progname) print >>sys.stderr print >>sys.stderr, "Options:" print >>sys.stderr, ' %s' % '\n '.join(self._option_help()) if msg: print >>sys.stderr print >>sys.stderr, "Error: " + msg sys.exit(1) def generate_manpage(self, output_path): o = [] o.append('.\" Manual for nfsometer') o.append('.TH man 1 "%s" "nfsometer"' % NFSOMETER_VERSION) o.append('.SH NAME') o.append(self._man_name) o.append('.SH SYNOPSIS') o.append(self._synopsis('nfsometer')) o.append('.SH DESCRIPTION') o.append(re.sub(' +', ' ', self._man_description.replace('\n', ' '))) o.append('.SH MODES') o.append(self._modes_description('nfsometer', man=True)) o.append('.SH OPTIONS') o.append('\n'.join(self._option_help(man=True))) o.append('.SH EXAMPLES') o.append(self._examples(man=True)) o.append('.SH SEE ALSO') o.append('mountstats, nfsstats') o.append('.SH BUGS') o.append('No known bugs.') o.append('.SH AUTHOR') o.append('Weston Andros Adamson (dros@netapp.com)') for i in range(len(o)): o[i] = o[i].strip().replace('-', '\\-') file(output_path, 'w+').write('\n'.join(o)) nfsometer-1.9/nfsometerlib/parse.py0000644000000000000000000011016613125073146017507 0ustar rootroot00000000000000""" Copyright 2012 NetApp, Inc. All Rights Reserved, contribution by Weston Andros Adamson This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. """ import os import re from config import * # # Regular Expressions section # these are precompiled so they are only compiled once # def _re(regex): """ short-hand wrapper for regex compilation """ return re.compile(regex) RE = { 'time_real': _re('^real\s+([\d]+)m([\d.]+)s'), 'time_user': _re('^user\s+([\d]+)m([\d.]+)s'), 'time_sys': _re('^sys\s+([\d]+)m([\d.]+)s'), 'ms_mount_opts': _re('^\s+NFS mount options:\s+(.*)$'), 'ms_read_norm': _re('^\s+applications read (\d+) bytes via read'), 'ms_write_norm': _re('^\s+applications wrote (\d+) bytes via write'), 'ms_read_odir': _re('^\s+applications read (\d+) bytes via O_DIRECT'), 'ms_write_odir': _re('^\s+applications wrote (\d+) bytes via O_DIRECT'), 'ms_read_nfs': _re('^\s+client read (\d+) bytes via NFS READ'), 'ms_write_nfs': _re('^\s+client wrote (\d+) bytes via NFS WRITE'), 'ms_rpc_line': _re('^\s+(\d+) RPC requests sent, (\d+) RPC ' \ + 'replies received \((\d)+ XIDs not found\)'), 'ms_rpc_backlog': _re('^\s+average backlog queue length: (\d)'), 'ms_ops_header': _re('^(\S+):$'), 'ms_ops_line1': _re('^\s+(\d+) ops \((\d+)%\)\s+(-?\d+) retrans ' + '\((-?\d+)%\)\s+(\d+) major timeouts'), 'ms_ops_line2': _re('^\s+avg bytes sent per op:\s+(\d+)\s+avg bytes received per op:\s+(\d+)'), 'ms_ops_line3': _re('^\s+backlog wait:\s+(\d+\.\d+)\s+RTT:\s+(\d+\.\d+)\s+total execute time:\s+(\d+\.\d+)\s+'), 'pms_xprt_tcp': _re('^\s+xprt:\s+tcp\s+(.*)'), 'pms_xprt_udp': _re('^\s+xprt:\s+udp\s+(.*)'), 'nio_infoline': _re('^.* mounted on (\S+):'), 'nio_readhdr': _re('^read:\s+'), 'nio_writehdr': _re('^write:\s+'), 'nio_numbers': _re('^\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)\s+(-?\d+)\s+\((-?[\d.]+)%\)\s+([\d.]+)\s+([\d.]+)'), 'filebench_stats': _re('^.*IO Summary:\s+(\d+)\s+ops,\s+([\d.]+)\s+ops/s,\s+\((\d+)/(\d+)\s+r/w\),\s+([\d.]+)mb/s,\s+(\d+)us\s+cpu/op,\s+([\d.]+)ms\s+latency'), 'ns_count_title': _re('^(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)'), 'ns_count_data': _re('^(\d+)\s+\d+%\s+(\d+)\s+\d+%\s+(\d+)\s+\d+%\s+(\d+)\s+\d+%\s+(\d+)\s+\d+%\s+(\d+)\s+\d+%'), 'ns_count_newsection': _re('^Client nfs'), 'ns_rpc_title': _re('^Client rpc stats:'), 'ns_rpc_data': _re('^(\d+)\s+(\d+)\s+(\d+)'), # proc_mounstats # events - 27 values 'pms_events': _re('^\s+events:\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)'), # iozone output #'iozone_report_hdr': _re('^"(.*) report"'), } ## # Bucket Definitions ## BUCKET_OTHER='Other' class BucketDef: """ Used to define buckets """ def __init__(self): self._key2bucket = {} self._key2display = {} self._other_keys = set() def key_to_bucket(self, key): b = self._key2bucket.get(key, None) if not b: return BUCKET_OTHER return b def bucket_names(self): """ return all buckets """ r = list(set([ x for x in self._key2bucket.values()])) r.sort() other = BUCKET_OTHER try: r.remove(other) except ValueError: pass r.append(other) return r def add_key(self, bucket_name, key, display): if self._key2bucket.has_key(key) or key in self._other_keys: return if display: self._key2display[key] = display if bucket_name: self._key2bucket[key] = bucket_name else: self._other_keys.add(key) def keys(self): r = set(self._key2bucket.keys()) r = r.union(self._other_keys) return tuple(r) def key_display(self, key): return self._key2display.get(key, key) # global bucket definitions wall_times_bucket_def = BucketDef() exec_times_bucket_def = BucketDef() nfsstat_bucket_def = BucketDef() mountstat_exec_time_bucket_def = BucketDef() mountstat_rtt_bucket_def = BucketDef() mountstat_bytes_sent_bucket_def = BucketDef() mountstat_bytes_received_bucket_def = BucketDef() iozone_bucket_def = BucketDef() nfsstat_op_map_def = { 'Creation and Deletion': ('create', 'open', 'open_conf', 'open_dgrd', 'open_noat', 'mkdir', 'rmdir', 'remove', 'close', 'mknod', ), 'File Metadata': ('access', 'lookup', 'lookup_root', 'rename', 'link', 'readlink', 'symlink', ), 'Readdir': ('readdir', 'readdirplus', ), 'Getattr and Setattr': ('getattr', 'setattr', ), 'FS Metadata': ('fsstat', 'fsinfo', 'statfs', ), 'Locks and Delegations': ('lock', 'lockt', 'locku', 'rel_lkowner', 'delegreturn', 'get_lease_t', ), 'Write': ('write', 'commit', 'ds_write', ), 'Read': ('read',), 'PNFS': ('getdevinfo', 'getdevlist', 'layoutget', 'layoutcommit', 'layoutreturn', ), 'Getacl and Setacl': ('getacl', 'setacl', ), 'Session': ('create_ses', 'destroy_ses', 'exchange_id', ), } nfsstat_op_map = {} for b, ops in nfsstat_op_map_def.iteritems(): for o in ops: nfsstat_op_map[o] = b mountstat_op_map_def = { 'Creation and Deletion': ('CREATE', 'OPEN', 'MKDIR', 'RMDIR', 'REMOVE', 'CLOSE', 'OPEN_CONFIRM', 'OPEN_DOWNGRADE', ), 'File Metadata': ('ACCESS', 'LOOKUP', 'LOOKUP_ROOT', 'RENAME', 'LINK', 'READLINK', 'SYMLINK', ), 'Readdir': ('READDIR', 'READDIRPLUS', ), 'Getattr and Setattr': ('GETATTR', 'SETATTR', ), 'FS Metadata': ('FSSTAT', 'FSINFO', 'STATFS', ), 'Locks and Delegations': ('LOCK', 'LOCKT', 'LOCKU', 'RELEASE_LOCKOWNER', 'DELEGRETURN', 'GET_LEASE_TIME', ), 'Write': ('WRITE', 'COMMIT', ), 'Read': ('READ', ), 'PNFS': ('GETDEVICEINFO', 'GETDEVICELIST', 'LAYOUTGET', 'LAYOUTCOMMIT', 'LAYOUTRETURN', ), 'Getacl and Setacl': ('GETACL', 'SETACL', ), 'Session': ('CREATE_SESSION', 'DESTROY_SESSION', 'EXCHANGE_ID', ), } mountstat_op_map = {} for b, ops in mountstat_op_map_def.iteritems(): for o in ops: mountstat_op_map[o] = b def gather_buckets(collection, tracestat): gather_bucket(collection, tracestat, wall_times_bucket_def, 'Average wall-clock time of workload') gather_bucket(collection, tracestat, exec_times_bucket_def, 'Exececution times of workload') gather_bucket(collection, tracestat, nfsstat_bucket_def, 'Count of NFS operations for group %(bucket)s') gather_bucket(collection, tracestat, mountstat_exec_time_bucket_def, 'Average operation execution time for group %(bucket)s') gather_bucket(collection, tracestat, mountstat_rtt_bucket_def, 'Operation round trip time for group %(bucket)s') gather_bucket(collection, tracestat, mountstat_bytes_sent_bucket_def, 'Average bytes sent per operation for group %(bucket)s') gather_bucket(collection, tracestat, mountstat_bytes_received_bucket_def, 'Average bytes received per operation for group %(bucket)s') #gather_bucket(collection, tracestat, iozone_bucket_def, # 'Average KB/s for iozone %(bucket)s') def gather_bucket(collection, tracestat, bucket_def, descr): keys = [ x for x in bucket_def.keys() ] for k in keys: stat = tracestat.get_stat(k) if stat == None: continue b = bucket_def.key_to_bucket(k) if b != None: fmtmap = {'bucket': b} descr_fmt = descr % fmtmap tracestat.add_bucket(b, stat, descr_fmt) def parse_tracedir(collection, tracestat, tracedir, attrs): parsers = ( (parse_time, True), (parse_mountstats, True), (parse_nfsiostat, True), (parse_nfsstats, True), (parse_proc_mountstats, (int(attrs['orig_tracedir_version']) > 4)), #(parse_iozone, (attrs['workload'].startswith('iozone'))), (parse_filebench, (attrs['workload'].startswith('filebench_'))), ) for p, cond in parsers: if not cond: continue try: p(tracestat, tracedir, attrs) except Exception, e: collection.warn(tracedir, str(e)) def parse_time(tracestat, tracedir, attrs): prefix = 'times:' stat_desc = 'output of time(1)' filename = 'test.time' path = os.path.join(tracedir, filename) lines = [ x.strip() for x in file(path) if x.strip() ] assert len(lines) == 3 def _parse_time(minutes, seconds): return (float(minutes) * 60.0) + float(seconds) WALLTIME_BUCKET='Wall Times Time' EXECTIME_BUCKET='Exec Times Time' m = RE['time_real'].match(lines[0]) time_real = _parse_time(m.group(1), m.group(2)) tracestat.add_stat(prefix + 'Real Time', time_real, 's', 'Wall-clock time of workload execution', BETTER_ALWAYS_LESS, (wall_times_bucket_def, WALLTIME_BUCKET), filename, tracedir) tracetime = float(attrs['stoptime']) - float(attrs['starttime']) tracestat.add_stat(prefix + 'Trace Time', tracetime, 's', 'Wall-clock time of mount, workload execution, unmount and ' 'flushing of dirty data', BETTER_ALWAYS_LESS, None, filename, tracedir) diff = tracetime - float(time_real) tracestat.add_stat(prefix + 'Sync Time', diff, 's', 'Wall-clock time of mount, workload execution, unmount and ' 'flushing of dirty data', BETTER_ALWAYS_LESS, (wall_times_bucket_def, WALLTIME_BUCKET), filename, tracedir) m = RE['time_user'].match(lines[1]) time_user = _parse_time(m.group(1), m.group(2)) tracestat.add_stat(prefix + 'User Time', time_user, 's', 'Time spent executing the workload in the user context', BETTER_ALWAYS_LESS, (exec_times_bucket_def, EXECTIME_BUCKET), filename, tracedir) m = RE['time_sys'].match(lines[2]) time_sys = _parse_time(m.group(1), m.group(2)) tracestat.add_stat(prefix + 'Sys Time', time_sys, 's', 'Time spent executing the workload in the kernel context', BETTER_ALWAYS_LESS, (exec_times_bucket_def, EXECTIME_BUCKET), filename, tracedir) def parse_mountstats(tracestat, tracedir, attrs): prefix = 'mountstats:' stat_desc = 'output of mountstats(1)' filename = 'mountstats' path = os.path.join(tracedir, filename) f = file(path) for line in f: found = False m = RE['ms_mount_opts'].match(line) if m: tracestat.add_attr('mount_options', m.group(1)) continue m = RE['ms_read_norm'].match(line) if m: val = long(m.group(1)) tracestat.add_stat(prefix + 'read_normal', val, 'B', 'Bytes read through the read() syscall', BETTER_LESS_IF_IO_BOUND, None, filename, tracedir) continue m = RE['ms_write_norm'].match(line) if m: val = long(m.group(1)) tracestat.add_stat(prefix + 'write_normal', val, 'B', 'Bytes written through write() syscall', BETTER_LESS_IF_IO_BOUND, None, filename, tracedir) continue m = RE['ms_read_odir'].match(line) if m: val = long(m.group(1)) tracestat.add_stat(prefix + 'read_odirect', val, 'B', 'Bytes read through read(O_DIRECT) syscall', BETTER_LESS_IF_IO_BOUND, None, filename, tracedir) continue m = RE['ms_write_odir'].match(line) if m: val = long(m.group(1)) tracestat.add_stat(prefix + 'write_odirect', val, 'B', 'Bytes written through write(O_DIRECT) syscall', BETTER_LESS_IF_IO_BOUND, None, filename, tracedir) continue m = RE['ms_read_nfs'].match(line) if m: val = long(m.group(1)) tracestat.add_stat(prefix + 'read_nfs', val, 'B', 'Bytes read via NFS RPCs', BETTER_LESS_IF_IO_BOUND, None, filename, tracedir) continue m = RE['ms_write_nfs'].match(line) if m: val = long(m.group(1)) tracestat.add_stat(prefix + 'write_nfs', val, 'B', 'Bytes written via NFS RPCs', BETTER_LESS_IF_IO_BOUND, None, filename, tracedir) continue m = RE['ms_rpc_line'].match(line) if m: tracestat.add_stat(prefix + 'rpc_requests', long(m.group(1)), 'RPCs', 'Count of RPC requests', BETTER_LESS_IF_IO_BOUND, None, filename, tracedir) tracestat.add_stat(prefix + 'rpc_replies', long(m.group(2)), 'RPCs', 'Count of RPC replies', BETTER_LESS_IF_IO_BOUND, None, filename, tracedir) tracestat.add_stat(prefix + 'xid_not_found', long(m.group(3)), 'RPCs', 'Count of RPC replies that couldn\'t be matched ' + 'with a request', BETTER_ALWAYS_LESS, None, filename, tracedir) continue m = RE['ms_rpc_backlog'].match(line) if m: tracestat.add_stat(prefix + 'backlog_queue_avg', long(m.group(1)), 'RPCs', 'Average number of outgoing requests on the backlog ' + 'queue', BETTER_ALWAYS_LESS, None, filename, tracedir) break # now read nfs ops op = None oplineno = 0 for line in f: m = RE['ms_ops_header'].match(line.strip()) if m: assert op == None op = m.group(1) op_bucket = mountstat_op_map.get(op, BUCKET_OTHER) oplineno = 1 continue if oplineno == 1: m = RE['ms_ops_line1'].match(line) if m: assert op != None oplineno += 1 continue elif oplineno == 2: m = RE['ms_ops_line2'].match(line) if m: tracestat.add_stat(prefix + op + ' Bytes Sent', m.group(1), 'B', 'Average bytes sent for %s operations' % op, BETTER_ALWAYS_MORE, (mountstat_bytes_sent_bucket_def, op_bucket + ' Bytes Sent', op), filename, tracedir) tracestat.add_stat(prefix + op + ' Bytes Received', m.group(2), 'B', 'Average bytes received for %s operations' % op, BETTER_ALWAYS_MORE, (mountstat_bytes_received_bucket_def, op_bucket + ' Bytes Received', op), filename, tracedir) oplineno += 1 elif oplineno == 3: m = RE['ms_ops_line3'].match(line) if m: tracestat.add_stat(prefix + op + ' RTT', m.group(2), 'ms', 'Average round trip time of %s operations' % op, BETTER_ALWAYS_LESS, (mountstat_rtt_bucket_def, op_bucket + ' RTT', op), filename, tracedir) tracestat.add_stat(prefix + op + ' Exec Time', m.group(3), 'μs', 'Average execution time of %s operations' % op, BETTER_ALWAYS_LESS, (mountstat_exec_time_bucket_def, op_bucket + ' Exec Time', op), filename, tracedir) op = None oplineno = 0 continue elif op: raise ParseError("Didn't match line: %s" % line) def parse_nfsiostat(tracestat, tracedir, attrs): prefix = 'nfsiostat:' stat_desc = 'output of nfsiostat(1)' filename = 'nfsiostat' path = os.path.join(tracedir, filename) lines = file(path).readlines() # skip until we find our mount name=None found_mnt = False warn = True got_read = False got_write = False for line in lines: if not found_mnt: m = RE['nio_infoline'].match(line) if m and m.group(1) == attrs['localpath']: found_mnt = True elif warn and line.strip(): tracestat.collection.warn(tracedir, "More than one NFS mount found, " "this will skew global stats like nfsstats") warn = False continue if got_read and got_write: break m = RE['nio_readhdr'].match(line) if m: name='read' continue m = RE['nio_writehdr'].match(line) if m: name='write' continue if name: m = RE['nio_numbers'].match(line) assert m, "Cant match line: %s" % line # name is 'read' or 'write' plural = name + 's' tracestat.add_stat(prefix + '%s_ops_per_sec' % name, m.group(1), 'ops/s', 'Operations per second of of NFS %s' % plural, BETTER_ALWAYS_MORE, None, filename, tracedir) tracestat.add_stat(prefix + '%s_kb_per_sec' % name, m.group(2), 'KB/s', 'KB per second of NFS %s' % plural, BETTER_ALWAYS_MORE, None, filename, tracedir) tracestat.add_stat(prefix + '%s_kb_per_op' % name, m.group(3), 'KB/op', 'KB per operation of NFS %s' % plural, BETTER_ALWAYS_MORE, None, filename, tracedir) tracestat.add_stat(prefix + '%s_avg_rtt_ms' % name, m.group(6), 'ms', 'Average round trip time of NFS %s' % plural, BETTER_ALWAYS_LESS, None, filename, tracedir) if name == "read": got_read = True elif name == "write": got_write = True name=None continue def parse_nfsstats(tracestat, tracedir, attrs): prefix = 'nfsstats:' stat_desc = 'output of nfsstats(1)' filename = 'nfsstats' path = os.path.join(tracedir, filename) lines = file(path).readlines() m = RE['ns_rpc_title'].match(lines[0]) if m: parse_idx = 4 m = RE['ns_rpc_data'].match(lines[2]) else: parse_idx = 8 m = RE['ns_rpc_title'].match(lines[4]) if m: m = RE['ns_rpc_data'].match(lines[6]) if not m: raise ParseError("Can't find RPC call count") tracestat.add_stat(prefix + 'rpc_calls', long(m.group(1)), 'Calls', 'Count of RPC calls', BETTER_LESS_IF_IO_BOUND, None, filename, tracedir) op_counts = {} titles = None # handle bug in nfsstats not clearing v4 stats... :-/ sections = 0 for line in lines[parse_idx:]: m = RE['ns_count_newsection'].match(line) if m: sections += 1 if sections > 1: break else: continue if not titles: m = RE['ns_count_title'].match(line) if m: titles = m.groups()[0:] else: m = RE['ns_count_data'].match(line) if m: for i, t in enumerate(titles): assert not op_counts.has_key(t), "dup op count %s" % t op_counts[t] = long(m.group(i+1)) titles = None for op, count in op_counts.iteritems(): if count: op_bucket = nfsstat_op_map.get(op, BUCKET_OTHER) tracestat.add_stat(prefix + op.upper() + ' Count', count, 'Calls', 'Count of %s operations' % op.upper(), BETTER_LESS_IF_IO_BOUND, (nfsstat_bucket_def, op_bucket + ' Count', op), filename, tracedir) def parse_filebench(tracestat, tracedir, attrs): prefix = 'filebench:' stat_desc = 'output of the filebench test suite' filename = 'test.log' path = os.path.join(tracedir, filename) # NOTE: BETTER_* based on fact that filebench output is only ever time bound found = False for line in file(path): m = RE['filebench_stats'].match(line) if m: tracestat.add_stat(prefix + 'op_count', m.group(1), 'fbops', 'Count of filebench operations', BETTER_ALWAYS_MORE, None, filename, tracedir) tracestat.add_stat(prefix + 'ops_per_second', m.group(2), 'fbops/s', 'Filebench operations per second', BETTER_ALWAYS_MORE, None, filename, tracedir) tracestat.add_stat(prefix + 'mb_per_second', m.group(5), 'MB/s', 'MB per second throughput', BETTER_ALWAYS_MORE, None, filename, tracedir) tracestat.add_stat(prefix + 'cpu_per_op', m.group(6), 'CPU/FBop', 'CPU usage per filebench operation', BETTER_ALWAYS_LESS, None, filename, tracedir) tracestat.add_stat(prefix + 'latency_ms', m.group(7), 'ms', 'Filebench measured latency', BETTER_ALWAYS_LESS, None, filename, tracedir) found = True break assert found, "Couldn't match filebench line: %s" % path def parse_proc_mountstats(tracestat, tracedir, attrs): prefix = 'proc_mountstats:' stat_desc = '/proc/self/mountstats after the test run' filename = 'proc_mountstats.stop' path = os.path.join(tracedir, filename) f = file(path) found = False for line in f: m = RE['pms_events'].match(line) if m: found = True tracestat.add_stat(prefix + 'inode_revalidate', long(m.group(1)), 'events', 'Count of inode_revalidate events', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, filename, tracedir) tracestat.add_stat(prefix + 'dentry_revalidate', long(m.group(2)), 'events', 'Count of dentry_revalidate events', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, filename, tracedir) tracestat.add_stat(prefix + 'data_invalidate', long(m.group(3)), 'events', 'Count of data_invalidate events', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, filename, tracedir) tracestat.add_stat(prefix + 'attr_invalidate', long(m.group(4)), 'events', 'Count of attr_invalidate events', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, filename, tracedir) tracestat.add_stat(prefix + 'vfs_open', long(m.group(5)), 'events', 'Count of file and directory opens', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, filename, tracedir) tracestat.add_stat(prefix + 'vfs_lookup', long(m.group(6)), 'events', 'Count of lookups', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, filename, tracedir) tracestat.add_stat(prefix + 'vfs_access', long(m.group(7)), 'events', 'Count of access calls', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, filename, tracedir) tracestat.add_stat(prefix + 'vfs_updatepage', long(m.group(8)), 'events', 'Count of updatepage calls', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, filename, tracedir) tracestat.add_stat(prefix + 'vfs_readpage', long(m.group(9)), 'events', 'Count of readpage calls', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, filename, tracedir) tracestat.add_stat(prefix + 'vfs_readpages', long(m.group(10)), 'events', 'Count of readpages calls', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, filename, tracedir) tracestat.add_stat(prefix + 'vfs_writepage', long(m.group(11)), 'events', 'Count of writepage calls', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, filename, tracedir) tracestat.add_stat(prefix + 'vfs_writepages', long(m.group(12)), 'events', 'Count of writepages calls', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, filename, tracedir) tracestat.add_stat(prefix + 'vfs_getdents', long(m.group(13)), 'events', 'Count of getdents calls', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, filename, tracedir) tracestat.add_stat(prefix + 'vfs_setattr', long(m.group(14)), 'events', 'Count of setattr calls', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, filename, tracedir) tracestat.add_stat(prefix + 'vfs_flush', long(m.group(15)), 'events', 'Count of flush calls', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, filename, tracedir) tracestat.add_stat(prefix + 'vfs_fsync', long(m.group(16)), 'events', 'Count of fsync calls', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, filename, tracedir) tracestat.add_stat(prefix + 'vfs_lock', long(m.group(17)), 'events', 'Count of lock calls', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, filename, tracedir) tracestat.add_stat(prefix + 'vfs_release', long(m.group(18)), 'events', 'Count of release calls', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, filename, tracedir) tracestat.add_stat(prefix + 'congestion_wait', long(m.group(19)), 'events', 'Count of congestion_wait', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, filename, tracedir) tracestat.add_stat(prefix + 'setattr_trunc', long(m.group(20)), 'events', 'Count of setattr_trunc', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, filename, tracedir) tracestat.add_stat(prefix + 'extend_write', long(m.group(21)), 'events', 'Count of extend_write', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, filename, tracedir) tracestat.add_stat(prefix + 'silly_rename', long(m.group(22)), 'events', 'Count of silly_rename', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, filename, tracedir) tracestat.add_stat(prefix + 'short_read', long(m.group(23)), 'events', 'Count of short_read', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, filename, tracedir) tracestat.add_stat(prefix + 'short_write', long(m.group(24)), 'events', 'Count of short_write', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, filename, tracedir) tracestat.add_stat(prefix + 'delay', long(m.group(25)), 'events', 'Count of delays (v3: JUKEBOX, v4: ERR_DELAY, grace period, key expired)', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, filename, tracedir) tracestat.add_stat(prefix + 'pnfs_read', long(m.group(26)), 'events', 'Count of pnfs_read calls', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, filename, tracedir) tracestat.add_stat(prefix + 'pnfs_write', long(m.group(27)), 'events', 'Count of pnfs_write calls', BETTER_ALWAYS_LESS | BETTER_NO_VARIANCE, None, filename, tracedir) continue m = RE['pms_xprt_tcp'].match(line) if m: values = [ x for x in m.group(1).split(' ') if x ] if len(values) > 10: # older mountstats don't have so many values tracestat.add_stat(prefix + 'xprt_max_slots', long(values[10]), 'slots', 'Max slots used by rpc transport', BETTER_ALWAYS_LESS, None, filename, tracedir) continue m = RE['pms_xprt_udp'].match(line) if m: values = [ x for x in m.group(1).split(' ') if x ] if len(values) > 8: # older mountstats don't have so many values tracestat.add_stat(prefix + 'xprt_max_slots', long(values[8]), 'slots', 'Max slots used by rpc transport', BETTER_ALWAYS_LESS, None, filename, tracedir) continue assert found def parse_iozone(tracestats, tracedir, attrs): prefix = 'iozone:' stat_desc = 'output of the iozone test suite' filename = 'test.log' path = os.path.join(tracedir, filename) f = file(path) rpt_name = None rpt_col_hdr = [] # maps name -> (%u_%u) -> value newkeys = [] for line in f: line = line.strip() if rpt_name: if not line: # pop report rpt_name = None rpt_col_hdr = [] continue if not rpt_col_hdr: rpt_col_hdr = [] for x in line.split(' '): if x.strip(): assert x.startswith('"') and x.endswith('"') rpt_col_hdr.append(x[1:-1]) else: newrow = [ x for x in line.split(' ' ) if x.strip() ] row_hdr = newrow.pop(0) assert row_hdr.startswith('"') and row_hdr.endswith('"') row_hdr = row_hdr[1:-1] for i, val in enumerate(newrow): key = '%s_%u_%u' % (rpt_name, int(row_hdr), int(rpt_col_hdr[i])) newkeys.append((key.lower(), val)) else: m = RE['iozone_report_hdr'].match(line) if m: rpt_name = m.group(1) continue for key, value in newkeys: skey = key.split('_') report = '_'.join(skey[:-2]) x = int(skey[-2]) y = int(skey[-1]) tracestat.add_stat(prefix + key + ' iozone', long(value), 'KB/s', '%s: size kb: %u, reclen: %u' % (report, x, y), BETTER_ALWAYS_MORE, (iozone_bucket_def, report + ' iozone'), filename, tracedir) nfsometer-1.9/nfsometerlib/report.py0000644000000000000000000013543113125073146017712 0ustar rootroot00000000000000""" Copyright 2012 NetApp, Inc. All Rights Reserved, contribution by Weston Andros Adamson This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. """ import os from math import sqrt, pow import time import graph from collection import * from selector import Selector, SELECTOR_ORDER from config import * from workloads import * ENABLE_PIE_GRAPHS=False def strip_key_prefix(k): s = k.split(':', 1) if len(s) == 2: return s[1] return k # handle: has data, not reference point def pct_f(x, y): if y == 0.0: return 0.0 pct = (float(x) / float(y)) * 100.0 return pct def get_legend_html(r, color_idx, hatch_idx, classes): color = None if color_idx != None: color_idx = color_idx % len(COLORS) color = COLORS[color_idx] graph_attrs = { 'width': 0.18, 'height': 0.18, 'color': color, 'hatch_idx': hatch_idx, 'classes': classes, } return r.graphs.make_graph('legend', graph_attrs) def fmt_float(f, precision=4): if f != None: w_fmt = "%%.%uf" % precision w_fmt = w_fmt % f seen_dot = False while len(w_fmt): if not seen_dot and w_fmt[-1] == '0': w_fmt = w_fmt[:-1] elif w_fmt[-1] == '.': w_fmt = w_fmt[:-1] seen_dot = True else: break return w_fmt or '0' return f def html_fmt_group(g, report_selector): assert isinstance(g, Selector) def _html_selector_thing(sel, thing): return '%s' % (thing, getattr(sel, thing)) # always display mountopt descr = _html_selector_thing(g, 'mountopt') if g.detect: descr += _html_selector_thing(g, 'detect') if g.tag: descr += _html_selector_thing(g, 'tag') if (len(report_selector.kernels) > 1 or len(report_selector.clients) > 1 or len(report_selector.servers) > 1 or len(report_selector.paths) > 1): descr += '
    ' # only display kernel, server, etc if there are more than one in this # report's view if len(report_selector.kernels) > 1: descr += _html_selector_thing(g, 'kernel') if len(report_selector.clients) > 1: descr += _html_selector_thing(g, 'client') if len(report_selector.servers) > 1: descr += _html_selector_thing(g, 'server') if len(report_selector.paths) > 1: descr += _html_selector_thing(g, 'path') return """
    %s
    """ % \ (descr, g.html()) def html_fmt_value(mean, std, units=None): if units: scale, units = fmt_scale_units(mean, units) mean = mean / scale std = std / scale fmt_mean = fmt_float(mean, 2) fmt_std = fmt_float(std, 2) if fmt_std != '0': fmt_mean += ' %s%s' % \ (HTML_PLUSMINUS, fmt_std) if units: fmt_mean += ' %s' % (units,) return fmt_mean def html_stat_info_id(sel, key=None): r = [sel,] if key != None: r.append(key) r = repr(tuple(r)).replace(',', '_').replace("'", '') r = r.replace('(', '').replace(')', '') r = r.replace(' ', '') return r class Table: """ Basic Table """ def __init__(self, report, values, groups, keys, units, nolegend=False, fmt_key=None, fmt_group=None, fmt_cell=None, noheader=False, nolabels=False, index_offset=0, classes=None): assert isinstance(values, dict) self.report = report self.values = values def _empty(x): if isinstance(x, str): if x == HTML_NO_DATA or x == HTML_COMPARISON_ZERO: return True if x: return False return True self.seen_data = False for group in groups: for k in keys: if not _empty(values[group].get(k, None)): self.seen_data = True break self.groups = groups self.keys = keys self.units = units self.nolegend = nolegend self.fmt_key = fmt_key self.fmt_cell = fmt_cell self.fmt_group = fmt_group self.noheader = noheader self.nolabels = nolabels self.index_offset = index_offset self._classes = [] if classes: self._classes.extend(classes) formatted_keys = {} for k in self.keys: formatted_keys[k] = self.html_key(k) self.formatted_keys = formatted_keys formatted_groups = {} for g in self.groups: formatted_groups[g] = self.html_group(g) self.formatted_groups = formatted_groups formatted_cells = {} for g in self.groups: for k in self.keys: formatted_cells[(g, k)] = self.html_cell(g, k) self.formatted_cells = formatted_cells def classes(self): return ' '.join(self._classes) def html_key(self, k): """ return key formatted for html """ if self.fmt_key: return self.fmt_key(k) return k def html_group(self, g): """ return group formatted for html """ # add legend legend = '' % \ (html_stat_info_id(g),) if not self.nolegend: group_idx = self.groups.index(g) cidx = color_idx(group_idx) + self.index_offset classes = ('color_box', 'color_box_link') legend += get_legend_html(self.report, cidx, None, classes) # format group text if self.fmt_group: g = self.fmt_group(g, self.report.selection) return '
    %s%s
    ' % (legend, str(g)) def html_cell(self, g, k): cell = None no_data = False val = self.values.get(g, {}).get(k, None) if isinstance(val, str): cell = val elif isinstance(val, (Stat, Bucket)): cell = html_fmt_value(val.mean(), val.std(), units=self.units) else: assert val == None, "Not a string, Stat or Bucket: %r\ng = %s, k = %s" % (val, g, k) if cell == None: cell = HTML_NO_DATA if cell in (HTML_NO_DATA, HTML_COMPARISON_ZERO): no_data = True if self.fmt_cell: cell = self.fmt_cell(g, k, self.values, cell) other = '' if no_data: other = 'class="no_data"' return '%s' % (other, cell) def empty(self): return not self.seen_data def html(self): template = html_template(TEMPLATE_TABLE) return template.render(table=self) class WideTable: """ A collection of tables, where groups are split out by nfs proto version """ def __init__(self, report, values, groups, gmap, keys, units, nolegend=False, fmt_key=None, fmt_group=None, fmt_cell=None): self.tables = [] num = len(groups) cur = 0 for vers in NFS_VERSIONS: if not gmap.has_key(vers): continue assert cur < num new = Table(report, values, gmap[vers], keys, units, nolegend=nolegend, fmt_key=fmt_key, fmt_group=fmt_group, fmt_cell=fmt_cell, index_offset=cur, classes=['data', 'data_table_%s' % vers]) cur += len(gmap[vers]) self.tables.append(new) assert cur == num def html(self): r = ''.join([ x.html() for x in self.tables]) r = """
    %s
    """ % r return r def empty(self): for t in self.tables: if not t.empty(): return False return True class TocNode: """ Table of Contents """ def __init__(self, text, section, parent): self.text = text self.section = section self.parent = parent self.children = [] def title_list(self): tl = [] node = self while node: if node.text: tl.insert(0, node.text) node = node.parent return tl def title(self): tl = self.title_list() return ' : '.join(tl) def anchor(self): return '' % self.section def num(self): return '%s' % self.section def add(self, text): if self.section: section = '%s.%s' % (self.section, len(self.children) + 1) else: section = '%s' % (len(self.children) + 1,) new_node = TocNode(text, section, self) self.children.append(new_node) return new_node def unlink(self): if self.parent: self.parent.children.remove(self) def html(self): if not self.text: template = html_template(TEMPLATE_TOC) else: template = html_template(TEMPLATE_TOCNODE) return template.render(node=self) class Dataset: """ Dataset - title, description, image, tables """ def __init__(self, selection, widget, title, units, groups, key, vals, toc, report, no_graph=False, no_title=False, tall_cell=False, subtitle='', anchor=None, fmt_key=None, fmt_cell=None, fmt_group=None, bucket_def=None): """ generate the "dataset" - a graph and a table """ self.key = key self.subtitle = subtitle self.anchor = anchor self.selection = selection self.report = report self.fmt_key = fmt_key self.fmt_cell = fmt_cell self.fmt_group = fmt_group self.bucket_def = bucket_def if not self.fmt_key: self.fmt_key = lambda x: x self.hatch_map, bucket_to_value, total_value = \ self.make_hatch_map(vals, groups, key) # XXX ugly self.all_buckets = None value_map = {} for i, g in enumerate(groups): value_map[g] = {} v = vals.get(g, {}).get(key, None) if v != None: if isinstance(v, Bucket): self.all_buckets = True for stat in v.foreach(): value_map[g][stat.name] = stat.mean() else: self.all_buckets = False value_map[g][key] = v.mean() break self.bucket_legend = '' self.bucket_pie = '' # does ordering matter here? bk_order = [ (k,v) for k, v in self.hatch_map.iteritems() ] bk_order.sort(lambda x,y: cmp(x[1], y[1])) table_values = {} bucket_names = [] for bucket_name, hatch_idx in bk_order: display_key = bucket_name if self.bucket_def: display_key = self.bucket_def.key_display(display_key) display_key = self.fmt_key(display_key) table_values[bucket_name] = { 'description': self.report.collection.stat_description(bucket_name), 'legend': get_legend_html(self.report, None, hatch_idx, ('cmp_ref',)), 'pct': '%0.1f' % pct_f(bucket_to_value[bucket_name], total_value), 'display_key': display_key, } bucket_names.append(bucket_name) tbl = Table(self.report, table_values, bucket_names, ('legend', 'display_key', 'description', 'pct'), '', nolegend=True, noheader=True, nolabels=True) self.bucket_legend = tbl.html() if ENABLE_PIE_GRAPHS and len(bk_order) > 1: pie_values = [] pie_colors = [] pie_hatches = [] for bucket_name, hatch_idx in bk_order: total = 0.0 for g in groups: total += value_map[g].get(bucket_name, 0.0) pie_values.append(total) pie_colors.append('#ffffff') pie_hatches.append(get_hatch(hatch_idx)) pie_scale = 0.3 * float(len(pie_values)) pie_attrs = { 'graph_width': pie_scale, 'graph_height': pie_scale, 'slice_labels': [''] * len(pie_values), 'slice_colors': pie_colors, 'slice_values': pie_values, 'slice_explode': [0.0] * len(pie_values), 'slice_hatches': pie_hatches, 'classes': ('legend_pie',), } self.bucket_pie = report.graphs.make_graph('pie', pie_attrs) self.toc = toc.add(title) no_ylabel = False # make comparison values self.comparison_vals_map = {} select_order = ('mountopt', 'detect', 'tag') self.comparison_vals_map['config'] = \ self.make_comparison_vals(vals, key, groups, select_order) select_order = ('mountopt',) self.comparison_vals_map['mountopt'] = \ self.make_comparison_vals(vals, key, groups, select_order) select_order = ('detect',) self.comparison_vals_map['detect'] = \ self.make_comparison_vals(vals, key, groups, select_order) select_order = ('tag',) self.comparison_vals_map['tag'] = \ self.make_comparison_vals(vals, key, groups, select_order) self.gmap = groups_by_nfsvers(groups) self.nfs_versions = [ v for v in NFS_VERSIONS if self.gmap.has_key(v) ] # ensure the order of groups is in nfs_version order groups = [] for v in self.nfs_versions: groups.extend(self.gmap[v]) self.color_map = {} for i, g in enumerate(groups): self.color_map[g] = COLORS[color_idx(i)] self.tab = WideTable(report, vals, groups, self.gmap, [key], units, fmt_key=fmt_key, fmt_cell=self.fmt_cell_modes, fmt_group=fmt_group) graph_height = 2.0 graph_width = 8.0 # Graph section self.graph_html = '' if not self.empty() and not no_graph: graph_attrs = { 'units': units, 'key': key, 'groups': groups, 'gmap': self.gmap, 'no_ylabel': no_ylabel, 'graph_width': graph_width, 'graph_height': graph_height, 'classes': ('data_graph',), 'selection': selection, 'hatch_map': self.hatch_map, 'color_map': self.color_map, } self.graph_html = report.graphs.make_graph('bar_and_nfsvers', graph_attrs) binfo = self.report.collection.get_better_info(selection, key) self.better_sym = binfo[0] self.better_str = binfo[1] self.better_more = binfo[2] self.description = self.report.collection.stat_description(key) if not self.description: self.description = '' def fmt_cell_modes(self, g, k, v, c): if self.fmt_cell: c = self.fmt_cell(g, k, v, c) hits = self.fmt_cell_hits(v[g].get(k, None)) c = '
    %s
    ' % (c,) if hits: c += '' % (hits,) for compare, compvals in self.comparison_vals_map.iteritems(): if compvals: c += '
    ' \ '
    %s
    ' % (compare, compvals[g][k]) stat = v[g].get(k, None) info_html = '' if stat: table_hdrs = [] table_rows = [] color_idx = COLORS.index(self.color_map[g]) if isinstance(stat, Bucket): table_hdrs.append('run') for x in stat.foreach(): hidx = self.hatch_map[x.name] hdr = get_legend_html(self.report, color_idx, hidx, classes=('data_info_hatch',)) hdr += '
    %s' % self.fmt_key(self.bucket_def.key_display(x.name)) table_hdrs.append(hdr) table_hdrs.append('total') else: table_hdrs.append('run') hdr = get_legend_html(self.report, color_idx, 0, classes=('data_info_hatch',)) hdr += '
    ' + stat.name table_hdrs.append(hdr) for run, tracedir in enumerate(stat.tracedirs()): row = [] row.append('%s' % (tracedir, run)) if isinstance(stat, Bucket): for x in stat.foreach(): row.append('%s' % (tracedir, stat.filename(), fmt_float(x.run_value(tracedir, None)))) row.append(fmt_float(stat.run_total(tracedir))) else: row.append('%s' % (tracedir, stat.filename(), fmt_float(stat.run_value(tracedir, None)))) table_rows.append(row) info_html = html_template(TEMPLATE_DATAINFOPANE).render( table_hdrs=table_hdrs, table_rows=table_rows, avg=fmt_float(stat.mean()), std=fmt_float(stat.std())) c += '' % info_html return c def empty(self): return self.tab.empty() def make_hatch_map(self, values, groups, key): # calc magnitude of each key across ALL groups for ordering key2val = {} total_val = 0.0 for g in groups: stat = values[g].get(key, None) if stat == None: continue if isinstance(stat, Bucket): for sub in stat.foreach(): if not key2val.has_key(sub.name): key2val[sub.name] = 0.0 key2val[sub.name] += sub.mean() total_val += sub.mean() else: # a basic Stat - makes hatch map with one entry if not key2val.has_key(stat.name): key2val[stat.name] = 0.0 key2val[stat.name] += stat.mean() total_val += stat.mean() ordered = [ (k, v) for k, v in key2val.iteritems() ] ordered.sort(lambda x,y: cmp(x[1], y[1])) ordered.reverse() k2h = {} for i, kv in enumerate(ordered): assert not k2h.has_key(kv[0]) k2h[kv[0]] = i return k2h, key2val, total_val def html(self): template = html_template(TEMPLATE_DATASET) return template.render(dataset=self) def make_comparison_vals(self, vals, key, groups, select_order): newvals = {} #select_order = ('mountopt', 'detect', 'tag') compare_groups = [] for g in groups: idx = None for i, cg in enumerate(compare_groups): if g.compare_order(cg[0], select_order) == 0: # found a group! idx = i break if idx != None: compare_groups[idx].append(g) # new group else: compare_groups.append([g,]) for cg in compare_groups: ref_val = None ref_g = None for g in cg: if not newvals.has_key(g): newvals[g] = {} # handle no data v = vals[g].get(key, None) if v == None: newvals[g][key] = HTML_NO_DATA continue # handle zero if v.empty(): newvals[g][key] = HTML_COMPARISON_ZERO continue # find reference point if not ref_val: ref_val = v ref_g = g vstr = get_legend_html(self.report, groups.index(g), None, ('cmp_ref',)) newvals[g][key] = vstr continue diff_mean = pct_f(v.mean() - ref_val.mean(), ref_val.mean()) diff_std = sqrt((pow(ref_val.std(),2) + pow(v.std(),2))/2.0) diff_std = pct_f(diff_std, ref_val.mean()) operator = '-' if diff_mean >= 0.0: operator = '+' diff_val_str = '%0.2f' % abs(diff_mean) diff_std_str = '%0.2f' % abs(diff_std) ref_idx = groups.index(ref_g) if diff_val_str == '0.00' and diff_std_str == '0.00': operator = '=' vstr = '
    %s
    %s' % \ (operator, get_legend_html(self.report, ref_idx, None, ('cmp_ref',))) else: cell = html_fmt_value(abs(diff_mean), abs(diff_std)) vstr = """%s
    %s
    %s%%
    """ % \ (get_legend_html(self.report, ref_idx, None, ('cmp_ref',)), operator, cell) newvals[g][key] = vstr return newvals def fmt_cell_hits(self, value): classes = ('hatch_hit',) if isinstance(value, Bucket): stat_list = [ x for x in value.foreach() ] stat_list.sort(lambda x,y: -1 * cmp(x.mean(), y.mean())) units = self.report.collection.stat_units(value.name) pie_html = '' if ENABLE_PIE_GRAPHS and len(stat_list) > 1: pie_values = [ x.mean() for x in stat_list ] pie_attrs = { 'graph_width': 0.8, 'graph_height': 0.8, 'slice_labels': [''] * len(stat_list), 'slice_colors': ['#ffffff'] * len(stat_list), 'slice_values': pie_values, 'slice_explode': [0.0] * len(stat_list), 'slice_hatches': [ get_hatch(self.hatch_map[x.name]) for x in stat_list ], 'classes': ('breakdown_pie',), } pie_html = self.report.graphs.make_graph('pie', pie_attrs) out = [pie_html + ''] total = sum([ x.mean() for x in stat_list ]) for stat in stat_list: legend = get_legend_html(self.report, None, self.hatch_map[stat.name], classes) fmt = html_fmt_value(stat.mean(), stat.std(), units=units) if value.mean(): pct = (stat.mean() / total) * 100.0 else: pct = 0.0 out.append('' % \ (pct, legend, fmt)) fmt = html_fmt_value(total, 0, units=units) out.append('' % \ (fmt,)) out.append('
    %0.2f%%%s%s
    total%s
    ') return '
    %s
    ' % ('\n'.join(out)) return '' # # WIDGETS # class Widget: def __init__(self, collection, selection, report, toc): assert self.widget assert self.desc self.collection = collection self.selection = selection self.report = report self.toc = toc.add(self.widget) self.datasets = [] self.statnote_mesgs = [ f(selection) for f in getattr(self, 'statnotes', []) ] self.statnote_mesgs = [ m for m in self.statnote_mesgs if m ] self.setup() def setup(self): raise NotImplemented def new_dataset(self, selection, title, groups, key, vals, **kwargs): collection = self.report.collection units = kwargs.get('units', collection.stat_units(key)) try: del kwargs['units'] except: pass if not 'fmt_group' in kwargs: kwargs['fmt_group'] = html_fmt_group kwargs['fmt_key'] = strip_key_prefix new = Dataset(selection, self.widget, title, units, groups, key, vals, self.toc, self.report, **kwargs) if not new.empty(): self.datasets.append(new) else: new.toc.unlink() def empty(self): return len(self.datasets) == 0 def html(self): template = html_template(TEMPLATE_WIDGET) return template.render(widget=self, dataset_class=Dataset) class SimpleWidget(Widget): ds_info = None bucket_table_html = '' bucket_pie_html = '' def __init__(self, collection, selection, report, toc): assert len(self.ds_info) self.keys = set() for t, key in self.ds_info: if key in self.keys: raise ValueError("key %s already used in ds_info" % k) self.keys.add(key) self.keys = tuple(self.keys) Widget.__init__(self, collection, selection, report, toc) def setup(self): groups, vals = self.collection.gather_data(self.keys, self.selection) for t, key in self.ds_info: self.new_dataset(self.selection, t, groups, key, vals) class Widget_RunTimes(SimpleWidget): widget = 'Times' desc = 'Run times of workload as measured by time(1)' statnotes = (statnote_filebench_times,) ds_info = ( ('Trace Time', 'times:time_trace'), ('Other Time', 'times:time_other'), ('Real Time', 'times:time_real'), ('Sys Time', 'times:time_sys'), ('User Time', 'times:time_user'), ) class Widget_Filebench(SimpleWidget): widget = 'Filebench' desc = 'Stats collected from the Filebench test suite output.' ds_info = ( ('Operation count', 'filebench:op_count'), ('Operations/second', 'filebench:ops_per_second'), ('MB/second', 'filebench:mb_per_second'), ('CPU/Operation', 'filebench:cpu_per_op'), ('Latency', 'filebench:latency_ms'), ) class Widget_NfsBytes(SimpleWidget): widget = 'Bytes' desc = 'Bytes read and written by syscalls (normal and O_DIRECT) ' \ 'and NFS operations.' ds_info = ( ('Read Syscalls', 'mountstats:read_normal'), ('Write Syscalls', 'mountstats:write_normal'), ('O_DIRECT Read Syscalls', 'mountstats:read_odirect'), ('O_DIRECT Write Syscalls', 'mountstats:write_odirect'), ('Read NFS calls', 'mountstats:read_nfs'), ('Write NFS calls', 'mountstats:write_nfs'), ) class Widget_RpcCounts(SimpleWidget): widget = 'RPC' desc = 'RPC message counts' ds_info = ( ('Calls', 'nfsstats:rpc_calls'), ) # XXX not used? class Widget_RpcStats(SimpleWidget): widget = 'RPC' desc = 'RPC message counts' statnotes = (statnote_v3_no_lock, statnote_v41_pnfs_no_ds) #'xid_not_found', #'backlog_queue_avg', ds_info = ( ('RPC Requests', 'mountstats:rpc_requests'), ('RPC Replies', 'mountstats:rpc_requests'), ) class Widget_MaxSlots(SimpleWidget): widget = 'Max Slots' desc = 'Max slots used for rpc transport' ds_info = ( ('Max Slots', 'proc_mountstats:xprt_max_slots'), ) class Widget_Nfsiostat(SimpleWidget): widget = 'Throughput' desc = 'Throughput statistics' statnotes = [statnote_v41_pnfs_no_ds] ds_info = ( ('Read KB/s', 'nfsiostat:read_kb_per_sec'), ('Write KB/s', 'nfsiostat:write_kb_per_sec'), ('Read Operations/s', 'nfsiostat:read_ops_per_sec'), ('Write operations/s', 'nfsiostat:write_ops_per_sec'), ('Read Average KB per Operation', 'nfsiostat:read_kb_per_op'), ('Write Average KB per Operation', 'nfsiostat:write_kb_per_op'), ('Read Average RTT', 'nfsiostat:read_avg_rtt_ms'), ('Write Average RTT', 'nfsiostat:write_avg_rtt_ms'), ) class Widget_VfsEvents(SimpleWidget): widget = 'VFS Events' desc = 'Event counters from the VFS layer' ds_info = ( ('Open', 'proc_mountstats:vfs_open'), ('Lookup', 'proc_mountstats:vfs_lookup'), ('Access', 'proc_mountstats:vfs_access'), ('Updatepage', 'proc_mountstats:vfs_updatepage'), ('Readpage', 'proc_mountstats:vfs_readpage'), ('Readpages', 'proc_mountstats:vfs_readpages'), ('Writepage', 'proc_mountstats:vfs_writepage'), ('Writepages', 'proc_mountstats:vfs_writepages'), ('Getdents', 'proc_mountstats:vfs_getdents'), ('Setattr', 'proc_mountstats:vfs_setattr'), ('Flush', 'proc_mountstats:vfs_flush'), ('Fsync', 'proc_mountstats:vfs_fsync'), ('Lock', 'proc_mountstats:vfs_lock'), ('Release', 'proc_mountstats:vfs_release'), ) class Widget_InvalidateEvents(SimpleWidget): widget = 'Validation Events' desc = 'Counters for validation events' ds_info = ( ('Inode Revalidate', 'proc_mountstats:inode_revalidate'), ('Dentry Revalidate', 'proc_mountstats:dentry_revalidate'), ('Data Invalidate', 'proc_mountstats:data_invalidate'), ('Attr Invalidate', 'proc_mountstats:attr_invalidate'), ) class Widget_PnfsReadWrite(SimpleWidget): widget = 'pNFS Events' # XXX counts or bytes?? desc = 'Counters for pNFS reads and writes' ds_info = ( ('Reads', 'proc_mountstats:pnfs_read'), ('Writes', 'proc_mountstats:pnfs_write'), ) class Widget_OtherEvents(SimpleWidget): widget = 'NFS Events' desc = 'Counters for NFS events' ds_info = ( ('Short Read', 'proc_mountstats:short_read'), ('Short Write', 'proc_mountstats:short_write'), ('Congestion Wait', 'proc_mountstats:congestion_wait'), ('Extend Write', 'proc_mountstats:extend_write'), ('Setattr Truncate', 'proc_mountstats:setattr_trunc'), ('Delay', 'proc_mountstats:delay'), ('Silly Rename', 'proc_mountstats:silly_rename'), ) class BucketWidget(Widget): bucket_def = None bucket_table_html = '' bucket_pie_html = '' def __init__(self, collection, selection, report, toc): assert self.bucket_def assert self.desc Widget.__init__(self, collection, selection, report, toc) def setup(self): bucket_names = self.bucket_def.bucket_names() groups, vals = self.collection.gather_data(bucket_names, self.selection) bucket_totals = {} bucket_hits = {} total = 0.0 for bucket_name in bucket_names: bucket_totals[bucket_name] = 0.0 bucket_hits[bucket_name] = set() # find all Buckets across all groups this_bucket = [] # TODO: move unit gathering to Bucket? units = None for g in groups: bucket = vals[g].get(bucket_name, None) if bucket != None: this_bucket.append(bucket) for stat in bucket.foreach(): m = stat.mean() total += m bucket_totals[bucket_name] += m bucket_hits[bucket_name].add(stat.name) u = self.collection.stat_units(stat.name) if not units: units = u else: assert u == units # skip empty datasets if not this_bucket or all([ x.empty() for x in this_bucket]): continue self.new_dataset(self.selection, bucket_name, groups, bucket_name, vals, tall_cell=True, bucket_def=self.bucket_def, units=units) bucket_info = [ (k, v) for k, v in bucket_totals.iteritems() ] bucket_info.sort(lambda x, y: cmp(x[1], y[1]) * -1) bucket_info = [ x for x in bucket_info if x[1] ] vals = {} for name, btotal in bucket_info: pct = (btotal / total) * 100.0 hits = list(bucket_hits[name]) hits = [ self.bucket_def.key_display(h) for h in bucket_hits[name] ] hits.sort() vals[name] = {'pct': "%.01f%%" % pct, 'hits': ', '.join(hits)} if len(bucket_info) > 1: tbl = Table(self.report, vals, [ x[0] for x in bucket_info if x[1] ], ['pct', 'hits'], '', noheader=True) self.bucket_table_html = tbl.html() if ENABLE_PIE_GRAPHS: pie_size = float(len(bucket_info)) / 2.0 pie_attrs = { 'graph_width': pie_size, 'graph_height': pie_size, 'slice_labels': [''] * len(bucket_info), 'slice_colors': COLORS, 'slice_values': [ x[1] for x in bucket_info ], 'slice_explode': [0.0] * len(bucket_info), 'classes': ('bucket_pie',), } self.bucket_pie_html = \ self.report.graphs.make_graph('pie', pie_attrs) class Widget_Iozone(BucketWidget): widget = 'Iozone' desc = 'Iozone Averages' bucket_def = parse.iozone_bucket_def class Widget_WallTimes(BucketWidget): widget = 'Wall Times' desc = 'Wall-clock times of workloads' bucket_def = parse.wall_times_bucket_def class Widget_ExecTimes(BucketWidget): widget = 'Exec Times' desc = 'Execution times of workloads' bucket_def = parse.exec_times_bucket_def class Widget_NfsOpsCount(BucketWidget): widget = 'NFS Operations' desc = 'Count of NFS operations' statnotes = (statnote_v3_no_lock,) bucket_def = parse.nfsstat_bucket_def class Widget_NfsOpsExec(BucketWidget): widget = 'Exec Time' desc = 'Execution time of NFS operations' statnotes = (statnote_v3_no_lock, statnote_v41_pnfs_no_ds) bucket_def = parse.mountstat_exec_time_bucket_def class Widget_NfsOpsRtt(BucketWidget): widget = 'RTT by Operation Group' desc = 'Round trip time of NFS operations' statnotes = (statnote_v3_no_lock, statnote_v41_pnfs_no_ds) bucket_def = parse.mountstat_rtt_bucket_def class Widget_NfsOpsBytesSent(BucketWidget): widget = 'Bytes Sent' desc = 'Average bytes sent for NFS operations' statnotes = (statnote_v3_no_lock, statnote_v41_pnfs_no_ds) bucket_def = parse.mountstat_bytes_sent_bucket_def class Widget_NfsOpsBytesReceived(BucketWidget): widget = 'Bytes Received' desc = 'Average bytes received for NFS operations' statnotes = (statnote_v3_no_lock, statnote_v41_pnfs_no_ds) bucket_def = parse.mountstat_bytes_received_bucket_def class Info: def __init__(self, collection, selection, report, toc): if toc: self.toc = toc.add('Info') else: self.toc = None self.collection = collection self.selection = selection self.report = report def html(self): self.selector_infos = [] def _join_lists(x): if isinstance(x, (list, tuple)): return ', '.join(x) return x def _htmlize_list(x): if isinstance(x, (list, tuple)): return '\n
    '.join([ str(y).replace(',', ' ') for y in x]) return x last_info = None total_runs = 0 start_min = None stop_max = None seen_mdts = [] mount_options = {} workload_info = [] if isinstance(self.report, Report): topsel = self.report.selection else: topsel = self.collection.selection for wsel in topsel.foreach('workload'): workload_name = wsel.fmt('workload') # XXX 0? workload_command = \ self.collection.get_attr(wsel, 'workload_command')[0] workload_description = \ self.collection.get_attr(wsel, 'workload_description')[0] wdesc = '%s' \ '%s' % \ (workload_name, workload_description) command = _htmlize_list(workload_command.split('\n')) if isinstance(self.report, ReportSet): title = _make_report_title(self.collection, wsel) path = _make_report_path(title) rpts = '%s' % (path, title) else: rpts = '' workload_info.append((wdesc, command, rpts)) for sel in wsel.foreach(): if self.collection.has_traces(sel): trace = self.collection.get_trace(sel) mdt = sel.mountopt if sel.detect: mdt += ' ' + _join_lists(sel.detect) if sel.tags: mdt += ' ' + _join_lists(sel.tags) real_info = { 'workload': sel.workload, 'kernel': sel.kernel, 'mdt': mdt, 'mountopt': sel.mountopt, 'detect': sel.detect, 'tags': sel.tags, 'client': sel.client, 'server': sel.server, 'path': sel.path, 'runs': trace.num_runs(), 'starttime': min(trace.get_attr('starttime')), 'stoptime': max(trace.get_attr('stoptime')), 'mount_options': trace.get_attr('mount_options'), } total_runs += real_info['runs'] if not start_min: start_min = real_info['starttime'] else: start_min = min(start_min, real_info['starttime']) if not stop_max: stop_max = real_info['stoptime'] else: stop_max = max(stop_max, real_info['stoptime']) if not mdt in seen_mdts: seen_mdts.append(mdt) if not mount_options.has_key(mdt): mount_options[mdt] = set() mount_options[mdt] = \ mount_options[mdt].union(real_info['mount_options']) # lowlite (opposite of hilite) values same as prev row. info = {} ignore = ('runs',) for k in real_info.keys(): if not k in ignore and last_info and \ real_info[k] == last_info[k]: info[k] = '%s' % \ (real_info[k],) else: info[k] = real_info[k] # recompute mdt info['mdt'] = info['mountopt'] if info['detect']: info['mdt'] += ' ' + _join_lists(info['detect']) if info['tags']: info['mdt'] += ' ' + _join_lists(info['tags']) self.selector_infos.append(info) last_info = real_info for mdt in seen_mdts: tmp = list(mount_options[mdt]) tmp.sort() mount_options[mdt] = [ x.replace(',', ', ') for x in tmp ] self.total_runs = total_runs self.mount_options = mount_options self.seen_mdts = seen_mdts self.times = 'ran between %s and %s' % \ (time.ctime(start_min), time.ctime(stop_max)) if isinstance(self.report, Report): self.workload = workload_info[0][0] self.command = workload_info[0][1] else: self.workload = None self.command = None # gather warnings self.warnings = self.collection.warnings() if self.warnings: self.warnings = \ [ '%s:
    • %s
    ' % (d, '
  • '.join(w)) for d, w in self.warnings ] self.warnings = '
  • %s
  • ' % '
  • '.join(self.warnings) # user notes self.usernotes = self.collection.notes_get() self.more = [] if isinstance(self.report, ReportSet): self.more = [ """ %s
    command:
    %s
    reports:
    %s """ % x for x in workload_info ] template = html_template(TEMPLATE_REPORTINFO) return template.render(info=self, fmt_class=self._fmt_class) def _fmt_class(self, key): """ format class based on 'key' """ k = key.replace(' ', '_') # get rid of trailing 's' so plural and singular can share css if k.endswith('s'): k = k[:-1] return k _WIDGET_ORDER = ( #Widget_Iozone, Widget_Filebench, Widget_WallTimes, Widget_ExecTimes, Widget_NfsBytes, Widget_Nfsiostat, Widget_RpcCounts, Widget_MaxSlots, Widget_NfsOpsCount, Widget_NfsOpsExec, Widget_NfsOpsRtt, Widget_NfsOpsBytesSent, Widget_NfsOpsBytesReceived, Widget_VfsEvents, Widget_InvalidateEvents, Widget_PnfsReadWrite, Widget_OtherEvents, ) def _make_report_path(title): path = title path = path.replace(' ', '_') # erase certain chars for c in ['/', ':', ',']: path = path.replace(c, '') path = path.lower() + '.html' return path.replace('_report', '') def _make_report_title(collection, selection): title = "Report" info = selection.display_info(collection.selection) if info: out = [] for x in info: if x[0].startswith('workload'): out.append(str(x[1])) else: out.append('%s: %s' % x) title += ': ' + ', '.join(out) return title class Report: show_zeros = False widget_classes = _WIDGET_ORDER def __init__(self, rptset, collection, selection, reportdir, graphs, cssfile): self.rptset = rptset self.collection = collection self.selection = selection self.reportdir = reportdir self.graphs = graphs self.cssfile = cssfile self.toc = TocNode(None, None, None) self.title = _make_report_title(collection, selection) self.path = _make_report_path(self.title) self.report_info = Info(collection, selection, self, self.toc) self.widgets = [] for widget_class in self.widget_classes: w = widget_class(self.collection, selection, self, self.toc) if not w.empty(): self.widgets.append(w) else: w.toc.unlink() def empty(self): return len(self.widgets) == 0 def html(self): template = html_template(TEMPLATE_REPORT) return template.render(report=self) class ReportSet: def __init__(self, collection, serial_graph_gen): self.collection = collection self.reportdir = collection.resultsdir self.imagedir = os.path.join(self.reportdir, 'images') self.graphs = graph.GraphFactory(self.collection, self.imagedir, serial_gen=serial_graph_gen) self.cssfilepath = CSSFILEPATH self.cssfile = os.path.split(self.cssfilepath)[-1] self.jqueryurl = JQUERY_URL self.jsfilepath = JSFILEPATH self.jsfile = os.path.split(self.jsfilepath)[-1] self._clear_files() self._write_extrafiles() self.reportset_info = Info(collection, collection.selection, self, None) def _clear_files(self): os.system("rm %s/*.html 2> /dev/null" % self.reportdir) os.system("rm %s/*.css 2> /dev/null" % self.reportdir) # add legend to styles: def _write_extrafiles(self): os.system('cp %s %s' % (self.jsfilepath, self.reportdir)) os.system('cp %s %s' % (self.cssfilepath, self.reportdir)) def _write_report(self, r): abs_path = os.path.join(self.reportdir, r.path) file(abs_path, 'w+').write(r.html()) print " %s" % r.path def _write_index(self): path = 'index.html' abs_path = os.path.join(self.reportdir, path) file(abs_path, 'w+').write(self.html_index()) print " %s" % path def _step_through_reports(self, cb_f): for x in self.collection.selection.foreach('workload'): cb_f(x) def generate_report(self, selection): if not self.collection.has_traces(selection): return r = Report(self, self.collection, selection, self.reportdir, self.graphs, self.cssfile) self._write_report(r) def generate_reports(self): check_mpl_version() print inform("Generating Reports") print self._write_index() self._step_through_reports(self.generate_report) print self.graphs.wait_for_graphs() def html_index(self): """ generate an index file """ template = html_template(TEMPLATE_INDEX) return template.render(index=self) def html_toc(self, current_title=''): """ generate a
    tag with the index in it """ template = html_template(TEMPLATE_REPORTLIST) return template.render(index=self, current_title=current_title) nfsometer-1.9/nfsometerlib/selector.py0000644000000000000000000001364113125073146020215 0ustar rootroot00000000000000""" Copyright 2012 NetApp, Inc. All Rights Reserved, contribution by Weston Andros Adamson This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. """ from config import * SELECTOR_ORDER=( 'workload', 'kernel', 'mountopt', 'detect', 'tag', 'client', 'server', 'path', ) _valid_things = set(SELECTOR_ORDER) def _fmt(name, x, default=None, short=True, sep=' '): if isinstance(x, (list, tuple)): return sep.join([ _fmt(name, y) for y in x ]) return x class Selector(object): """ This class is used to specify selection of the current query """ def __init__(self, *args): things = [] if len(args) == 0: # make all empty args args = [ [] for i in range(len(SELECTOR_ORDER)) ] assert len(args) == len(SELECTOR_ORDER) for idx, name in enumerate(SELECTOR_ORDER): obj = args[idx] if not isinstance(obj, (list, tuple)): obj = [obj] setattr(self, name + 's', tuple(obj)) def __str__(self): out = [] for name in SELECTOR_ORDER: obj = getattr(self, name + 's') out.append('%s%s: %s' % (name, pluralize(len(obj)), ', '.join(obj))) return ', '.join(out) def __hash__(self): args = [] for name in SELECTOR_ORDER: obj = getattr(self, name + 's') assert len(obj) == 1, \ "Can't hash selector with %s length != 1 - %r" % (name, obj) args.append(obj) return hash(tuple(args)) def __cmp__(self, other): for name in SELECTOR_ORDER: r = cmp(getattr(self, name + 's'), getattr(other, name + 's')) if r: return r return 0 def compare_order(self, other, order): for name in order: r = cmp(getattr(self, name + 's'), getattr(other, name + 's')) if r != 0: return r return 0 def __repr__(self): args = [] for name in SELECTOR_ORDER: obj = getattr(self, name + 's') args.append(obj) return 'Selector(%s)' % (', '.join([repr(x) for x in args]),) def __getattr__(self, attr): superself = super(Selector, self) if attr in SELECTOR_ORDER: obj = getattr(self, attr + 's') assert len(obj) == 1, "%s is not singular" % attr return obj[0] elif hasattr(superself, attr): return getattr(superself, attr) else: raise AttributeError, "invalid attribute: %r" % attr def __add__(self, other): new = Selector() for name in SELECTOR_ORDER: vals = set(getattr(self, name + 's')) for x in getattr(other, name + 's'): vals.add(x) setattr(new, name + 's', list(vals)) return new def html(self): out = [] for name in SELECTOR_ORDER: obj = getattr(self, name + 's') out.append('%s%s: %s' % (name, pluralize(len(obj)), ', '.join(obj))) return '
    '.join(out) def is_valid_key(self): for name in SELECTOR_ORDER: obj = getattr(self, name + 's') if len(obj) != 1: return False return True def _foreach_thing(self, thing): if isinstance(thing, (list, tuple)): thing = list(thing) more_things = thing[1:] thing = thing[0] else: assert thing in _valid_things more_things = [] for x in getattr(self, thing + 's'): args = [] for name in SELECTOR_ORDER: if name == thing: obj = x else: obj = getattr(self, name + 's') args.append(obj) sel = Selector(*args) if more_things: for y in sel._foreach_thing(more_things): yield y else: yield sel def foreach(self, thing=None): if thing == None: thing = SELECTOR_ORDER for x in self._foreach_thing(thing): yield x def fmt(self, thing, short=True, title=False): x = getattr(self, thing + 's') return _fmt(thing, x, default = lambda x : ' '.join(x), short=short) def title(self, thing): x = getattr(self, thing + 's') return "%s%s" % (thing, pluralize(len(obj))) def display_info(self, all_selector, show_all=False, sep=' ', pre='', post=''): display_info = [] for name in SELECTOR_ORDER: obj = getattr(self, name + 's') all_obj = getattr(all_selector, name + 's') if show_all or obj != all_obj: pl = pluralize(len(obj)) entry = ('%s%s' % (name, pl), pre + str(_fmt(name, obj, sep=sep)) + post) display_info.append(entry) return display_info def contains(self, other): for name in SELECTOR_ORDER: this = set(getattr(self, name + 's')) other_list = getattr(other, name + 's') # make sure each element of other is in self for o in other_list: if not o in this: return False return True def merge_selectors(selectors): total = Selector() for s in selectors: total += s return total def filter_groups(groups, selector): return [ g for g in groups if selector.contains(g) ] nfsometer-1.9/nfsometerlib/trace.py0000644000000000000000000007251613125073146017501 0ustar rootroot00000000000000""" Copyright 2012 NetApp, Inc. All Rights Reserved, contribution by Weston Andros Adamson This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. """ import sys import os import errno import time import re import multiprocessing import signal import random import atexit import pwd from cmd import * from config import * from workloads import * import selector _server_path_v4 = re.compile('([^:]+):(\S+)') _server_path_v6 = re.compile('(\[\S+\]):(\S+)') @atexit.register def _exit_cleanup(): signal.signal(signal.SIGINT, signal.SIG_IGN) cleanup() def _get_tracedir(resdir, workload): tracedir_root = '%s-%s-%s' % \ (TRACE_DIR_PREFIX, workload, HOSTNAME) tracedir_root = tracedir_root.replace('>', '') tracedir_root = tracedir_root.replace(' ', '_') tracedir_root = tracedir_root.replace('|', '_') tracedir_root = tracedir_root.replace('"', '_') tracedir_root = tracedir_root.replace("'", '_') for i in range(100): tracedir = '%s-%s' % (tracedir_root, time.time()) if not os.path.exists(os.path.join(resdir, tracedir)): return tracedir raise Exception("can't find unused tracedir!") class TraceAttrs: """ object representing attributes of trace - always mirrored in the trace's 'arguments' file """ def __init__(self, filename=None, temp=False, new=False): self.__attrs = {} if not filename: self.__attrfile = os.path.join(RUNNING_TRACE_DIR, TRACE_ATTRFILE) else: self.__attrfile = filename self.__temp = temp self.__dirname = os.path.dirname(self.__attrfile) if not temp and not new: try: f = file(self.__attrfile) except IOError, e: raise IOError('Attr file not found') for line in f: if line.strip(): name, val = line.strip().split('=',1) self.__attrs[name.strip()] = \ val.strip().replace('\\n', '\n') if not self.__attrs.has_key('tracedir'): self.__attrs['tracedir'] = RUNNING_TRACE_DIR if not self.__attrs.has_key('stoptime'): self.__attrs['stoptime'] = 'ongoing' if not self.__attrs.has_key('tracedir_version'): if new or temp: self.__attrs['tracedir_version'] = TRACE_DIR_VERSION else: self.__attrs['tracedir_version'] = 1 self._upgrade_attrs() def _upgrade_attrs(self): """ based on the tracedir_version, upgrade attrs """ tracedir_vers = int(self.__attrs['tracedir_version']) while tracedir_vers < TRACE_DIR_VERSION: if tracedir_vers == 1: # move tags from separate attrs to one 'tags' attr v1_tags = [ 'delegations_enabled', 'pnfs_enabled', 'remote' ] tag_names = [ x for x in self.__attrs.keys() if x in v1_tags ] for name in tag_names: assert int(self.__attrs[name]) == 1 del self.__attrs[name] self.__attrs['tags'] = ','.join(tag_names) elif tracedir_vers == 2: # 'test' attr -> 'workload' attr self.__attrs['workload'] = self.__attrs['test'] del self.__attrs['test'] elif tracedir_vers == 3: # normalize mountopts new = mountopts_normalize(self.__attrs['mountopts']) self.__attrs['mountopts'] = new elif tracedir_vers == 4: # rename probe tags tags = self.__attrs['tags'].split(',') for i in range(len(tags)): if tags[i] == 'pnfs_enabled': tags[i] = 'pnfs' elif tags[i] == 'delegations_enabled': tags[i] = 'deleg' self.__attrs['tags'] = ','.join(tags) elif tracedir_vers == 5: # make dmesg.diff file from dmesg.start and dmesg.end start = os.path.join(self.__dirname, 'dmesg.start') stop = os.path.join(self.__dirname, 'dmesg.stop') diff = os.path.join(self.__dirname, 'dmesg.diff') if not os.path.exists(diff): cmd("diff %s %s > %s" % (start, stop, diff), raiseerrorcode=False) elif tracedir_vers == 6: # set workload_description and workload_command w = self.__attrs['workload'] desc = workload_description(w) command = workload_command(w, pretty=True) self.__attrs['workload_description'] = desc self.__attrs['workload_command'] = command elif tracedir_vers == 7: # pull out any detect tags # (only 'pnfs' and 'deleg' at this point) tags = self.__attrs['tags'].split(',') detects = [ t for t in tags if t in ('pnfs', 'deleg') ] for d in detects: tags.remove(d) self.__attrs['tags'] = ','.join(tags) self.__attrs['detects'] = ','.join(detects) elif tracedir_vers == 8: # detects are now + or - detects = self.__attrs['detects'].split(',') vers = mountopts_version(self.__attrs['mountopts']) new = [] if 'pnfs' in detects: new.append('+pnfs') elif vers == 'v4.1': new.append('-pnfs') if 'deleg' in detects: new.append('+deleg') elif vers in ('v4.0', 'v4.1'): new.append('-deleg') self.__attrs['detects'] = ','.join(new) elif tracedir_vers == 9: # get rid of - detects, just show + detects, but without + detects = self.__attrs['detects'].split(',') new = [] for d in detects: if d.startswith('+'): new.append(d[1:]) self.__attrs['detects'] = ','.join(new) else: raise Exception("Unhandled tracedir_version: %s" % (tracedir_vers,)) tracedir_vers += 1 assert tracedir_vers == TRACE_DIR_VERSION self.__attrs['orig_tracedir_version'] = self.__attrs['tracedir_version'] self.__attrs['tracedir_version'] = TRACE_DIR_VERSION def _sorted_names(self): names = self.__attrs.keys() names.sort() return names def get(self, name, *args): if self.__attrs.has_key(name): return self.__attrs[name] # handle optional default value if args: assert len(args) == 1 return args[0] raise KeyError(name) def set(self, name, value): self.__attrs[name] = value def to_dict(self): return self.__attrs def __str__(self): o = [] o.append('TraceAttrs:') for k in self._sorted_names(): o.append(' %-10s: %s' % (k, self.__attrs[k])) return '\n'.join(o) def write(self): if self.__temp: return f = file(self.__attrfile, 'w+') for k, v in self.__attrs.iteritems(): f.write('%s = %s\n' % (k, str(v).replace('\n', '\\n'))) def _dir_create(): try: os.mkdir(RUNNING_TRACE_DIR) except OSError, e: if e.errno == errno.EEXIST: raise IOError('An NFS trace is already running') raise os.mkdir(MOUNTDIR) def _probe_dir_remove(): cmd('rm -rf %s 2> /dev/null' % (PROBE_DIR,)) def _dir_remove(): cmd('rm -rf %s 2> /dev/null' % (RUNNING_TRACE_DIR,)) def dir_remove_old_asides(): res = cmd('ls %s-* 2> /dev/null' % (RUNNING_TRACE_DIR,), raiseerrorcode=False) if '\n'.join(res[0]).strip(): inform("Removing old error result directories from %s-*" % (RUNNING_TRACE_DIR,)) cmd('rm -rf %s-* 2> /dev/null' % (RUNNING_TRACE_DIR,)) def _dir_aside(): newdir = '%s-error-%u' % (RUNNING_TRACE_DIR, int(time.time())) warn('Moving failed rundir to %s - it will be deleted on next run of this script!' % newdir) cmd('mv "%s" "%s"' % (RUNNING_TRACE_DIR, newdir)) def _mount(attrs, old_syntax=False): mountopts = attrs.get('mountopts') if old_syntax: mountopts = mountopts_old_syntax(mountopts) cmdstr = 'sudo mount -v -t nfs -o "%s" "%s" "%s"' % \ (mountopts, attrs.get('serverpath'), attrs.get('localpath')) cmd(cmdstr, raiseerrorout=False) def _try_mount(attrs, quiet=False): is_probe = attrs.get('is_probe', 0) #if is_probe: # quiet = True if not quiet: sys.stdout.write("Mounting: %s (options: %s)..." % (attrs.get('serverpath'), attrs.get('mountopts'))) sys.stdout.flush() err = None for i in range(TRACE_MOUNT_TRIES): for old_syntax in (False, True): try: _mount(attrs, old_syntax=old_syntax) except Exception, e: if not quiet: sys.stdout.write('.') sys.stdout.flush() err = e else: err = None break time.sleep(TRACE_MOUNT_TRY_DELAY) if err == None: break if not quiet: sys.stdout.write('\n') sys.stdout.flush() if err: raise e def _is_mounted(attrs): try: simplecmd("mount | grep ' on %s type nfs'" % attrs.get('localpath')) except CmdErrorCode: return False return True def mounts_exist(): try: simplecmd("mount | grep ' type nfs'") except CmdErrorCode: return False return True def _dir_exists(): return os.path.exists(RUNNING_TRACE_DIR) def _unmount(attrs): cmd('sudo umount %s' % attrs.get('localpath')) def _try_unmount(attrs, quiet=False, cleanup=False): is_probe = attrs.get('is_probe', 0) #if is_probe: #quiet = True if not quiet: sys.stdout.write("Syncing: %s..." % attrs.get('serverpath')) sys.stdout.flush() cmd('sudo sync') if not quiet: sys.stdout.write('.\n') sys.stdout.write("Unmounting: %s..." % attrs.get('serverpath')) sys.stdout.flush() err = None tries = TRACE_UMOUNT_TRIES if cleanup: tries = TRACE_CLEANUP_UMOUNT_TRIES for i in range(tries): try: _unmount(attrs) except Exception, e: if not quiet: sys.stdout.write('.') sys.stdout.flush() err = e else: err = None break time.sleep(TRACE_UMOUNT_TRY_DELAY) if not quiet: sys.stdout.write('\n') if err: raise e # TODO this should be bound to the parsing stuff!! def _save_start_stats(attrs): commands = [ {'cmd': 'nfsstat', 'file': 'nfsstats.start' }, {'cmd': 'dmesg', 'file': 'dmesg.start' }, {'cmd': 'sudo sysctl -a 2>/dev/null | grep nfs', 'file': 'nfs_sysctls.start' }, {'cmd': 'cat /proc/self/mountstats', 'file': 'proc_mountstats.start' }, {'cmd': 'sudo klist -ke /etc/krb5.keytab 2> /dev/null || echo', 'file': 'klist_mach.start' }, {'cmd': 'klist 2> /dev/null || echo', 'file': 'klist_user.start' }, ] _collect_stats(commands) def _save_stop_stats(attrs): commands = [ {'cmd': 'nfsstat', 'file': 'nfsstats.stop' }, {'cmd': 'nfsstat -S %s/nfsstats.start' % RUNNING_TRACE_DIR, 'file': 'nfsstats' }, {'cmd': 'dmesg', 'file': 'dmesg.stop' }, {'cmd': 'diff %s/dmesg.start %s/dmesg.stop || echo' % \ (RUNNING_TRACE_DIR, RUNNING_TRACE_DIR), 'file': 'dmesg.diff' }, {'cmd': 'mountstats %s' % attrs.get('localpath'), 'file': 'mountstats' }, {'cmd': 'cat /proc/self/mountstats', 'file': 'proc_mountstats.stop' }, {'cmd': 'nfsiostat', 'file': 'nfsiostat' }, {'cmd': 'sudo sysctl -a 2>/dev/null | grep nfs', 'file': 'nfs_sysctls.stop' }, {'cmd': 'sudo klist -ke /etc/krb5.keytab 2> /dev/null || echo', 'file': 'klist_mach.stop' }, {'cmd': 'klist 2> /dev/null || echo', 'file': 'klist_user.stop' }, ] _collect_stats(commands) def _collect_stats(commands): stats = [] for c in commands: stats.append(c['file']) out = cmd(c['cmd']) f = file(os.path.join(RUNNING_TRACE_DIR, c['file']), 'w+') f.write('\n'.join(out[0])) def probe_detect(probe_trace_dir, mountopt): lines = [ x.strip() for x in file(os.path.join(probe_trace_dir, 'proc_mountstats.stop')) ] # find this mountpoint # ie device server:/path mounted on /mnt with fstype nfs4 statvers=1.1 start = -1 end = -1 for i, line in enumerate(lines): mounted_on = ' mounted on %s with ' % MOUNTDIR if line.find(mounted_on) >= 0: assert start == -1 start = i elif start >= 0 and line.startswith('device '): assert end == -1 end = i if end < 0: end = len(lines) present_ops = {} if start >= 0: lines = lines[start:end] else: warn("detect> can't find mount section in proc_mounstats, lines=\n%s" % '\n'.join(lines)) return skip = True for line in lines: # skip until per-op statistics if line == 'per-op statistics': skip = False continue if skip or not line.strip(): continue try: op, data = line.split(':', 1) except: warn("detect> can't parse mountstats line: %s" % (line)) continue data_list = [ int(x) for x in data.split(' ') if x ] present_ops[op] = tuple(data_list) def _ops_have_data(op_list): for op in op_list: if sum(present_ops.get(op, tuple())) != 0: return True return False detect = [] pnfs_ops = ['LAYOUTRETURN', 'GETDEVICEINFO'] if _ops_have_data(pnfs_ops): assert mountopts_version(mountopt) in ('v4.1', 'v4.2'), \ "expected v4.{1,2} for tag pnfs, but mountopt = %r" % (mountopt,) detect.append(DETECT_PNFS) deleg_ops = ['DELEGRETURN'] if _ops_have_data(deleg_ops): assert mountopts_version(mountopt) in ('v4.0', 'v4.1', 'v4.2'), \ "expected v4.x for tag deleg, but mountopt = %r" % (mountopt,) detect.append(DETECT_DELEG) detect = ','.join(detect) return detect def _is_auth_gss(): lines = [ x.strip() for x in file('/proc/self/mountstats') ] mounted_on = ' mounted on %s with ' % MOUNTDIR start = -1 end = -1 for i, line in enumerate(lines): if line.find(mounted_on) >= 0: assert start == -1 start = i elif start >= 0 and line.startswith('device '): assert end == -1 end = i if end < 0: end = len(lines) if start >= 0: lines = lines[start:end] else: return False for line in lines: if line.startswith('sec:'): label, data = line.split(':') data = data.strip() if data.startswith('flavor=6'): return True return False def _has_creds(): return os.stat(os.path.join(RUNNING_TRACE_DIR, 'klist_user.start')).st_size != 1 def _has_tkt(server): princ = re.compile('nfs/' + server + '\S+$') lines = [ x.strip() for x in file(os.path.join(RUNNING_TRACE_DIR, 'klist_user.start')) ] for i, line in enumerate(lines): if re.search(princ, line): return True return False def _eperm_helper(opts): server, path = opts.serverpath.split(':') if _is_auth_gss(): if _has_creds(): if _has_tkt(server): info = str.format( ' Check {:s} on {:s} and ensure user {:s} has the correct' ' permission.', path, server, pwd.getpwuid(os.getuid())[0]) else: info = str.format( ' No nfs service ticket for {:s} in user {:s}\'s' ' credential cache.', server, pwd.getpwuid(os.getuid())[0]) else: info = str.format( ' User {:s} has no kerberos credentials.', pwd.getpwuid(os.getuid())[0]) elif os.getuid() == 0: info = str.format( ' Check for root squashing in the export options for {:s} on' ' {:s}.', path, server) else: info = str.format( ' Check {:s} on {:s} and ensure user {:s} has the correct' ' permission.', path, server, pwd.getpwuid(os.getuid())[0]) return info # # public api commands # def get_current_hostname(): return simplecmd('hostname') def get_current_kernel(): return simplecmd('uname -r') def start(mountopts, serverpath, workload, detects, tags, is_setup=False, is_probe=False): # gather any additional arguments hostname = get_current_hostname() kernel = get_current_kernel() m = _server_path_v6.match(serverpath) if not m: m = _server_path_v4.match(serverpath) if not m: raise ValueError("Cannot parse server, path from '%s'" % serverpath) server = m.group(1) path = m.group(2) _dir_create() attrs = TraceAttrs(new=True) attrs.set('mountopts', mountopts) attrs.set('serverpath', serverpath) attrs.set('server', server) attrs.set('path', path) attrs.set('localpath', MOUNTDIR) attrs.set('starttime', long(time.time())) attrs.set('workload', workload) attrs.set('workload_command', workload_command(workload, pretty=True)) attrs.set('workload_description', workload_description(workload)) attrs.set('kernel', get_current_kernel()) attrs.set('client', get_current_hostname()) attrs.set('tags', tags) attrs.set('detects', detects) if is_setup: attrs.set('is_setup', 1) if is_probe: attrs.set('is_probe', 1) attrs.write() _try_mount(attrs) if not is_setup: _save_start_stats(attrs) def stop(resdir=None): attrs = TraceAttrs(filename=os.path.join(RUNNING_TRACE_DIR, TRACE_ATTRFILE)) if resdir != None and os.path.isdir(resdir): raise IOError("Result directory '%s' already exists" % resdir) attrs.set('stoptime', time.time()) attrs.write() is_setup = long(attrs.get('is_setup', 0)) is_probe = long(attrs.get('is_probe', 0)) if not is_setup: _save_stop_stats(attrs) if _is_mounted(attrs): _try_unmount(attrs) idle_check() if resdir != None: cmd('mv %s %s' % (RUNNING_TRACE_DIR, resdir)) if not is_probe: print 'Results copied to: %s' % (os.path.split(resdir)[-1],) else: cmd('rm -rf %s' % (RUNNING_TRACE_DIR)) if not is_setup: print 'Results thrown away' def find_mounted_serverpath(mountdir): try: res = cmd('mount | grep " on %s "' % mountdir) except: return '' out = [ x.strip() for x in res[0] if x ] assert len(out) == 1, "res = %r" % (res,) idx = out[0].find(' on ') return out[0][:idx] def cleanup(): _probe_dir_remove() serverpath = find_mounted_serverpath(MOUNTDIR) if not serverpath: _dir_remove() return attrs = TraceAttrs(temp=True) attrs.set('localpath', MOUNTDIR) attrs.set('serverpath', serverpath) if _is_mounted(attrs): _try_unmount(attrs, cleanup=True) if _dir_exists(): _dir_aside() def get_trace_attr(name): attrs = TraceAttrs(filename=os.path.join(RUNNING_TRACE_DIR, TRACE_ATTRFILE)) return attrs.get(name) def get_trace_list(collection, resultdir, workloads_requested, mountopts_detects_tags, num_runs, server, path): workloads = {} if workloads_requested: new = [] for w in workloads_requested: try: obj = WORKLOADS[w] name = obj.name() workloads[name] = obj new.append(name) except KeyError: print warn('Invalid workload: "%s"' % w) print print "Available workloads:" print " %s" % '\n '.join(available_workloads()) sys.exit(2) workloads_requested = new else: for w, workload_obj in WORKLOADS.iteritems(): if not workload_obj.check(): name = workload_obj.name() workloads[name] = workload_obj trace_list = [] total = 0 skipped = 0 requested = 0 current_kernel = get_current_kernel() client = get_current_hostname() for w, workload_obj in workloads.iteritems(): for mountopt, detects, tags in mountopts_detects_tags: sel = selector.Selector(w, current_kernel, mountopt, detects, tags, client, server, path) if collection.has_traces(sel): tracestat = collection.get_trace(sel) already = tracestat.num_runs() else: already = 0 assert already >= 0 need = num_runs - already if need < 0: need = 0 if need > 0: trace_list.append((workload_obj, mountopt, detects, tags, need)) total += need requested += num_runs skipped += min(already, num_runs) return trace_list, workloads, total, requested, skipped def _idle_check(): # make sure there are no servers res = cmd('cat /proc/fs/nfsfs/servers 2>/dev/null | grep -v "^NV SERVER"', raiseerrorcode=False) res = '\n'.join(res[0]).strip() if res: raise Exception("NFS client not idle: %s" % res) def idle_check(wait=True): IDLE_MAX=120 if wait: for i in range(IDLE_MAX): try: _idle_check() except: time.sleep(1) continue else: return _idle_check() def probe_mounts(opts): """ Probe mounts for any detectable tags arguments: - opts - an Options class instance result: a dict() mapping mountopt -> detected tags """ detect_by_mountopt = {} for m in opts.mountopts: inform("Probing %s: %s" % (opts.serverpath, m)) # even if there is no tag to probe for, it makes sense to attempt a # mount to make sure the mount works and is writable start(m, opts.serverpath, '__nfsometer-probe', [], [], is_probe=True) fpath = os.path.join(RUNROOT, '__nfsometer-probe') try: cmd('mkdir -p "%s"' % RUNROOT) except CmdErrorCode, e: msg = str.format('"mkdir -p {:s}" failed.', RUNROOT) # try to hint why it failed if e.code == errno.EPERM: msg += _eperm_helper(opts) else: msg += e.errstr warn(msg) # and bail out right now sys.exit(1) f = file(fpath, 'w+') f.write('nfsometer probe to determine server features: %s' % m) f.close() # force delegation if supported fd1 = os.open(fpath, os.O_RDWR) fd2 = os.open(fpath, os.O_RDWR) os.close(fd2) os.close(fd1) cmd('rm -f %s 2> /dev/null' % (fpath,)) stop(PROBE_DIR) detect_by_mountopt[m] = probe_detect(PROBE_DIR, m) if detect_by_mountopt[m]: inform("%s %s has tags: %s" % (opts.serverpath, m, detect_by_mountopt[m])) _probe_dir_remove() return detect_by_mountopt def run_traces(collection, opts, fetch_only=False): # cancel any ongoing trace cleanup() detect_by_mountopt = probe_mounts(opts) mountopts_detects_tags = [ (m, detect_by_mountopt.get(m, ''), opts.tags) for m in opts.mountopts ] trace_list, workloads, total, requested, skipped = \ get_trace_list(collection, opts.resultdir, opts.workloads_requested, mountopts_detects_tags, opts.num_runs, opts.server, opts.path) for w, workload_obj in workloads.iteritems(): workload_obj.fetch() if fetch_only: return # check each workload to make sure we'll be able to run it for w, workload_obj in workloads.iteritems(): check_mesg = workload_obj.check() if check_mesg: raise ValueError("Workload %s is unavailable: %s" % (w, check_mesg)) this_trace = 0 print print "Requested: %u workloads X %u options X %u runs = %u traces" % \ (len(workloads), len(mountopts_detects_tags), int(opts.num_runs), requested) if skipped: print "Results directory already has %u matching traces" % (skipped,) print "Need to run %u of %u requested traces" % (total, requested) for workload_obj, mountopt, detects, tags, nruns in trace_list: mdt = mountopt if detects: mdt += ' ' + detects if tags: mdt += ' ' + tags print " %s - needs %u runs of %s" % (workload_obj.name(), nruns, mdt) print dir_remove_old_asides() if opts.randomize_traces: inform("randomizing traces") for i in range(5): random.shuffle(trace_list) for workload_obj, mountopt, detects, tags, nruns in trace_list: if nruns <= 0: continue this_serverpath = opts.serverpath for run in range(nruns): this_trace += 1 print mdt = mountopt if detects: mdt += ' ' + detects if tags: mdt += ' ' + tags inform("Trace %u/%u: %u of %u for %s: %s" % (this_trace, total, run+1, nruns, workload_obj.name(), mdt)) print sys.stdout.write("< SETUP WORKLOAD >\n") sys.stdout.flush() start(mountopt, this_serverpath, workload_obj.name(), detects, tags, is_setup=True) workload_obj.setup() stop() print sys.stdout.write("< RUN WORKLOAD >\n") sys.stdout.flush() start(mountopt, this_serverpath, workload_obj.name(), detects, tags) workload_obj.run() tracedir = _get_tracedir(opts.resultdir, workload_obj.name()) stop(os.path.join(opts.resultdir, tracedir)) if this_trace == 0: inform('No traces were needed!') else: inform('Successfully ran %u traces!' % (this_trace,)) def _loadgen_pool_init(): pass def _loadgen_pool_f(workload, num): curr_proc=multiprocessing.current_process() curr_proc.daemon=True sys.stdout.flush() wobj = Workload(workload, 'loadgen_%u' % (num,)) stagger_time = random.randrange(0, TRACE_LOADGEN_STAGGER_MAX) inform("loadgen %u: %s stagger (sleep %d)" % (num, workload, stagger_time)) time.sleep(stagger_time) stop = False while not stop: try: inform("loadgen %u: %s setup" % (num, workload)) wobj.loadgen_setup() inform("loadgen %u: %s run" % (num, workload)) wobj.run_no_tracedir() except KeyboardInterrupt: inform("loadgen %u: %s stop" % (num, workload)) stop = True except Exception, e: warn("loadgen %u: %s error:\n%s" % (num, workload, e)) time.sleep(1.0) def loadgen(opts): mountattrs = {'serverpath': opts.serverpath, 'mountopts': opts.mountopts[0], 'localpath': MOUNTDIR, } _dir_create() workload = opts.workloads_requested[0] mainobj = WORKLOADS[workload] checkmesg = mainobj.check() if checkmesg: raise Exception("can't run workload %s: %s" % (workload, checkmesg)) mainobj.fetch() workpool = multiprocessing.Pool(opts.num_runs, _loadgen_pool_init) _try_mount(mountattrs) mainobj.setup() inform("Starting %u loadgen processes of workload: %s" % (opts.num_runs, workload)) for num in range(opts.num_runs): workpool.apply_async(_loadgen_pool_f, (workload, num)) inform("Waiting on loadgen threads of workload: %s" % (workload)) # busy loop to catch KeyboardInterrupt try: while True: time.sleep(1) except KeyboardInterrupt: inform("Loadgen cancelled by user. cleaning up") workpool.terminate() workpool.join() except Exception, e: workpool.terminate() workpool.join() raise e finally: _try_unmount(mountattrs) _dir_remove() nfsometer-1.9/nfsometerlib/workloads.py0000644000000000000000000002054613125073146020404 0ustar rootroot00000000000000""" Copyright 2012 NetApp, Inc. All Rights Reserved, contribution by Weston Andros Adamson This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. """ import os import errno import re from cmd import * from config import * _re_which = re.compile('[\s\S]*which: no (\S+) in \([\s\S]*') def _mkdir_quiet(path): try: os.mkdir(path) except OSError, e: if e.errno != errno.EEXIST: raise e # Base Class for NFS workloads class Workload: def __init__(self, defname, rundir_suffix=''): self.defname = defname self.env = {} self.env['PATH'] = '.:/bin:/sbin:/usr/bin:/usr/sbin/:/usr/local/bin:/usr/local/sbin/' self.rundir = os.path.join(RUNROOT, defname.replace(' ', '_')) if rundir_suffix: self.rundir += '_' + rundir_suffix.replace(' ', '_') self.localdir = os.path.join(WORKLOADFILES_ROOT, defname.replace(' ', '_')) self.script = """RUNDIR="%s" LOCALDIR="%s" %s""" % \ (self.rundir, self.localdir, WORKLOADS_SCRIPT) self._cache = {} self.create_localdir() def create_rundir(self): _mkdir_quiet(RUNROOT) _mkdir_quiet(self.rundir) def create_localdir(self): _mkdir_quiet(NFSOMETER_DIR) _mkdir_quiet(WORKLOADFILES_ROOT) _mkdir_quiet(self.localdir) def remove_rundir(self): sys.stdout.write('removing run directory...') sys.stdout.flush() cmd('rm -rf %s' % self.rundir) sys.stdout.write('done\n') sys.stdout.flush() def loadgen_setup(self): self.remove_rundir() self.create_rundir() def setup(self): self.remove_rundir() self.create_rundir() oldcwd = os.getcwd() os.chdir(self.localdir) sys.stdout.flush() cmd('%s setup %s' % (self.script, self.defname)) os.chdir(oldcwd) def fetch(self): url = self.url() url_out = self.url_out() if url and url_out: assert not '/' in url_out oldcwd = os.getcwd() os.chdir(self.localdir) if not os.path.exists(url_out): if url.startswith('git://'): print "Fetching git: %s" % url fetch_cmd = 'git clone "%s" "%s"' % (url, url_out) else: print "Fetching url: %s" % url fetch_cmd = 'wget -O "%s" "%s"' % (url_out, url) try: cmd(fetch_cmd, pass_output=True, raiseerrorout=True) except Exception, e: cmd('rm -rf "%s"' % url_out) finally: if not os.path.exists(url_out): warn("Error error fetching '%s'" % url) sys.exit(1) os.chdir(oldcwd) else: assert not url and not url_out def check(self): if not self._cache.has_key('check'): res = cmd('%s check %s' % (self.script, self.defname)) res = ', '.join([ x.strip() for x in res[0]]).strip() self._cache['check'] = res return self._cache['check'] def command(self): if not self._cache.has_key('command'): res = cmd('%s command %s' % (self.script, self.defname)) res = '\n'.join(res[0]).strip() assert not '\n' in res self._cache['command'] = res return self._cache['command'] def description(self): if not self._cache.has_key('description'): res = cmd('%s description %s' % (self.script, self.defname)) res = '\n'.join(res[0]).strip() assert not '\n' in res self._cache['description'] = res return self._cache['description'] def name(self): if not self._cache.has_key('name'): res = cmd('%s name %s' % (self.script, self.defname)) res = '\n'.join(res[0]).strip() assert not '\n' in res self._cache['name'] = res return self._cache['name'] def url(self): if not self._cache.has_key('url'): res = cmd('%s url %s' % (self.script, self.defname)) res = '\n'.join(res[0]).strip() assert not '\n' in res self._cache['url'] = res return self._cache['url'] def url_out(self): if not self._cache.has_key('url_out'): res = cmd('%s url_out %s' % (self.script, self.defname)) res = '\n'.join(res[0]).strip() assert not '\n' in res self._cache['url_out'] = res return self._cache['url_out'] def run(self): logfile = os.path.join(RUNNING_TRACE_DIR, 'test.log') timefile = os.path.join(RUNNING_TRACE_DIR, 'test.time') cmdfile = os.path.join(RUNNING_TRACE_DIR, 'command.sh') command = self.command() print "Running command: %s" % command sys.stdout.flush() oldcwd = os.getcwd() os.chdir(self.rundir) # write command to file file(cmdfile, 'w+').write(command) sh_cmd = "sh %s > %s 2>&1" % (cmdfile, logfile) wrapped_cmd = '( time ( %s ) ) 2> %s' % (sh_cmd, timefile) try: cmd(wrapped_cmd, env=self.env, pass_output=True, raiseerrorout=False) except KeyboardInterrupt: os.chdir(oldcwd) # re-raise raise KeyboardInterrupt except Exception, e: os.chdir(oldcwd) # re-raise raise e else: os.chdir(oldcwd) def run_no_tracedir(self): # we're not tracing, so just store these files in NFSland logfile = os.path.join(self.rundir, 'test.log') cmdfile = os.path.join(self.rundir, 'command.sh') command = self.command() print "Running command without trace: %s" % command sys.stdout.flush() oldcwd = os.getcwd() os.chdir(self.rundir) # write command to file file(cmdfile, 'w+').write(command) sh_cmd = "sh %s > %s 2>&1" % (cmdfile, logfile) try: cmd(sh_cmd, env=self.env, pass_output=True, raiseerrorout=False) except KeyboardInterrupt: os.chdir(oldcwd) # re-raise raise KeyboardInterrupt except Exception, e: os.chdir(oldcwd) # re-raise raise e else: os.chdir(oldcwd) WORKLOADS = {} res = cmd('%s list' % WORKLOADS_SCRIPT) workloads = '\n'.join(res[0]).strip().split(' ') for w in workloads: WORKLOADS[w] = Workload(w) def workload_command(workload, pretty=False): if workload == posix.environ.get('NFSOMETER_NAME', None): workload = 'custom' try: obj = WORKLOADS[workload] except: return '# (unknown)' cmdstr = obj.command() if pretty: cmdstr = cmdstr.replace(' && ', '\n') cmdstr = cmdstr.replace(os.path.join(WORKLOADFILES_ROOT, workload), '${workload_dir}') cmdstr = cmdstr.replace(os.path.join(RUNROOT, workload), '${run_dir}') return cmdstr def workload_description(workload): if workload == posix.environ.get('NFSOMETER_NAME', None): workload = 'custom' try: obj = WORKLOADS[workload] except: return '# (unknown)' return obj.description() def available_workloads(): o = [] defnames = WORKLOADS.keys() defnames.sort() for defname in defnames: check_mesg = WORKLOADS[defname].check() if not check_mesg: o.append('%s' % (defname,)) return o def unavailable_workloads(): """ return a string containing a comma separated list of the available workload """ o = [] defnames = WORKLOADS.keys() defnames.sort() for defname in defnames: check_mesg = WORKLOADS[defname].check() if check_mesg: o.append('%-20s - %s' % (defname, check_mesg)) return o nfsometer-1.9/COPYING0000644000000000000000000004325413125073146014370 0ustar rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. nfsometer-1.9/DESIGN0000644000000000000000000000123213125073146014217 0ustar rootroot00000000000000 NOTE: A lot is missing from this file because the NFSometer.py script is in the process of being completely replaced with a unified web interface. DATA COLLECTION Trace data is collected in a collection of flat files. Each trace gets it's own directory. Each trace result directory contians an "arguments" file that has a simple " = " format. o always collect the raw, unadulterated source data. this way, nfsometer bugs can be fixed, and the report can be rebuilt instead of running all the tests again o ANY changes to the tracedir format need to bump TRACEDIR_VERSION o Every new TRACEDIR_VERSION MUST make an upgrade step nfsometer-1.9/MANIFEST.in0000644000000000000000000000046013125073146015063 0ustar rootroot00000000000000include README include COPYING include DESIGN include howto-contribute.txt include nfsometer.1 include nfsometerlib/html/*.js include nfsometerlib/html/*.html include nfsometerlib/html/*.css include nfsometerlib/workloads/*.nfsometer include nfsometerlib/workloads/*.sh include nfsometerlib/scripts/*.sh nfsometer-1.9/README0000644000000000000000000000342713125073146014213 0ustar rootroot00000000000000nfsometer - A framework for the running and reporting of performance characteristics of workloads across NFS protocol versions, options and Linux kernels. About ================== author: Weston Andros Adamson website: http://wiki.linux-nfs.org/wiki/index.php/NFSometer Dependencies ================== nfsometer depends on several third-party packages and will not run without them installed. - matplotlib - used for graph plotting - numpy - used by matplotlib, stats functions - mako - used for html templating of reports - nfs-utils - mount.nfs, mountstats, etc - time - /bin/time On a fedora system the following command will install these packages: # sudo yum install python-matplotlib numpy python-mako nfs-utils time Other distros will have a similar command. Installation ================ nfsometer is designed to be able to run without installation, ie: # ./nfsometer.py --help nfsometer can also be installed to standard python site-packages and executable directories (must be run as root): # python setup.py install # nfsometer --help Using nfsometer ================= See the nfsometer manpage: man nfsometer Or use the help: ./nfsometer.py --help And see examples (the same as in the manpage): ./nfsometer.py examples Adding a workload ====================== Adding a workload is designed to be very simple. Copy the 'nfsometer-workload-template' file to a file in the workloads directory that ends with ".nfsometer", edit this file following the instructions contained within. Most of the time it's probably easier to just use a custom workload - see the manpage for more info on the custom workload. Contributing back ================== See howto-contribute.txt nfsometer-1.9/howto-contribute.txt0000644000000000000000000000411113125073146017377 0ustar rootroot00000000000000== Developer's Certificate of Origin == NFSometer uses the linux kernel model of using git not only a source repository, but also as a way to track contributions and copyrights. Each submitted patch must have a "Signed-off-by" line. Patches without this line will not be accepted. The sign-off is a simple line at the end of the explanation for the patch, which certifies that you wrote it or otherwise have the right to pass it on as an open-source patch. The rules are pretty simple: if you can certify the below: Developer's Certificate of Origin 1.1 By making a contribution to this project, I certify that: (a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or (b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or (c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it. (d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved. then you just add a line saying Signed-off-by: Random J Developer using your real name (sorry, no pseudonyms or anonymous contributions.) == Sending patches == Please send git formatted patches (git format-patch) to dros@monkey.org and cc the Linux Kernel NFS mailing list: linux-nfs@vger.kernel.org. nfsometer-1.9/nfsometer.10000644000000000000000000001715213125073406015416 0ustar rootroot00000000000000." Manual for nfsometer .TH man 1 "1.9" "nfsometer" .SH NAME nfsometer \- NFS performance measurement tool .SH SYNOPSIS nfsometer [options] [mode] [[] [workloads...]] .SH DESCRIPTION nfsometer is a performance measurement framework for running workloads and reporting results across NFS protocol versions, NFS options and Linux NFS client implementations .SH MODES Basic usage (no mode specified): \fBnfsometer [workloads...]\fR This will fetch needed files, run traces, and generate reports, same as running the the 'fetch', 'trace' and 'report' stages. Advanced usage (specify modes): \fBnfsometer list\fR List the contents of the results directory. \fBnfsometer workloads\fR List available and unavailable workloads. \fBnfsometer notes\fR Edit the notes file of the results directory. These notes will be displayed in report headers. \fBnfsometer loadgen \fR Run in loadgen mode: don't record any stats, just loop over against . Only one \-o option is allowed. Use the \-n option to run multuple instances of the loadgen workload. When running more than one instance, the intial start times are staggered. \fBnfsometer fetch [workloads...]\fR Fetch all needed files for the specified workload(s). If no workloads are specified, all workloads are fetched. Fetched files are only downloaded once and are cached for future runs. \fBnfsometer trace [workloads...]\fR Run traces against . The traces run will be: (options + always options + tags) X (workloads) X (num runs) This will only run traces that don't already exist in the results directory. \fBnfsometer report\fR Generate all reports available from the results directory. \fBnfsometer example\fR Show examples from man page .SH OPTIONS .sp 1 .TP 0.5i .BR " \fB\-r \fR, \fB\-\-resultdir=\fR " The directory used to save results. default: '/root/nfsometer_results' .sp 1 .TP 0.5i .BR " \fB\-o \fR, \fB\-\-options=\fR " Mount options to iterate through. This option may be used multiple times. Each mount option must have a version specified. .sp 1 .TP 0.5i .BR " \fB\-a \fR, \fB\-\-always\-options=\fR " Options added to every trace. This option may be used multiple times. .sp 1 .TP 0.5i .BR " \fB\-t \fR, \fB\-\-tag=\fR " Tag all new traces with 'tags'. This option may be used multiple times. .sp 1 .TP 0.5i .BR " \fB\-n \fR, \fB\-\-num\-runs=\fR " Number of runs for each trace of X X default: 1 .sp 1 .TP 0.5i .BR " \fB\-\-serial\-graphs\fR " Generate graphs inline while generating reports. Useful for debugging graphing issues. .sp 1 .TP 0.5i .BR " \fB\-\-rand\fR " Randomize the order of traces .sp 1 .TP 0.5i .BR " \fB\-h\fR, \fB\-\-help\fR " Show the help message .SH EXAMPLES Example 1: See what workloads are available \fB$ nfsometer workloads\fR This command lists available workloads and will tell you why workloads are unavailable (if any exist). Example 2: Compare cthon, averaged over 3 runs, across nfs protocol versions \fBnfsometer \-n 3 server:/export cthon\fR This example uses the default for \-o: "\-o v3 \-o v4 \-o v4.1". To see the results, open results/index.html in a web browser. Example 3: Compare cthon, averaged over 3 runs, between v3 and v4.0 only \fBnfsometer \-n 3 \-o v3 \-o v4 server:/export cthon\fR This example specifies v3 and v4 only. To see the results, open results/index.html in a web browser. Example 4: Compare two kernels running iozone workload, averaged over 2 runs, across all nfs protocol versions nfsometer can compare two (or more) kernel versions, but has no way of building, installing or booting new kernels. It's up to the user to install new kernels. In order for these kernels to be differentiated, 'uname \-a' must be different. 1) boot into kernel #1 2) \fBnfsometer \-n 2 server:/export iozone\fR 3) boot into kernel #2 4) \fBnfsometer \-n 2 server:/export iozone\fR 5) open results/index.html in a web browser To see the results, open results/index.html in a web browser. Example 5: Using tags Tags (the \-t option) can be used to mark nfsometer runs as occurring with some configuration not captured by mount options or detectable tags, such as different sysctl settings (client side), different server side options, or different network conditions. 1) set server value foo to 2.3 2) \fBnfsometer \-o v4 \-o v4.1 \-t foo=2.3\fR 3) set server value foo to 10 4) \fBnfsometer \-o v4 \-o v4.1 \-t foo=10\fR What is passed to \-t is entirely up to the user \- it will not be interpreted or checked by nfsometer at all, so be careful! To see the results, open results/index.html in a web browser. Example 6: Always options The \-o flag specifies distinct option sets to run, but sometimes there are options that should be present in each. Instead of writing each one out, you can use the \-a option: \fBnfsometer \-o v3 \-o v4 \-a sec=krb5 server:/export iozone\fR this is equivalent to: \fBnfsometer \-o v3,sec=krb5 \-o v4,sec=krb5 server:/export iozone\fR Example 7: Using the "custom" workload A main use case of nfsometer is the "custom" workload \- it allows the user to specify the command that nfsometer is to run. NOTE: the command's cwd (current working directory) is the runroot created on the server. \fBexport NFSOMETER_CMD="echo foo > bar"\fR \fBexport NFSOMETER_NAME="echo"\fR \fBexport NFSOMETER_DESC="Writes 4 bytes to a file"\fR \fBnfsometer server:/export custom\fR This will run 3 traces (v3, v4, v4.1) against server:/export of the command: \fBecho foo > bar\fR. Example 8: Using the loadgen mode Loadgen runs several instances of a workload without capturing traces. The idea is that you use several clients to generate load, then another client to measure performance of a loaded server. The "real" run of nfsometer (not loadgen) should mark the traces using the \-t option. 1) On client A, run the cthon workload to get a baseline of a server without any load. \fBnfsometer trace server:/export cthon\fR 2) When that's done, start loadgen on client B: \fBnfsometer \-n 10 loadgen server:/export dd_100m_1k\fR This runs 10 instances of dd_100m_1k workload on server:/export. It can take several minutes to start in an attempt to stagger all the workload instances. 3) once all instances are started, run the "real" nfsometer trace on client A. Use the \-t option to mark the traces as having run under load conditions: \fBnfsometer \-t "10_dd" trace server:/export cthon\fR 4) Explain how the tests were set up in the result notes. This should be run on client A (which has the traces: \fBnfsometer notes\fR 5) Now generate the reports: \fBnfsometer report\fR Example 8: Long running nfsometer trace The nfsometer.py script currently runs in the foreground. As such, it will be killed if the tty gets a hangup or the connection to the client is closed. For the time being, nfsometer should be run in a screen session, or run with nohup and the output redirected to a file. 1) \fBscreen \-RD\fR 2) \fBnfsometer \-n 2 server:/export iozone\fR 3) close terminal window (or ^A^D) ... 4) reattach later with \fBscreen \-RD\fR 5) once nfsometer.py is done, results will be in results/index.html .SH SEE ALSO mountstats, nfsstats .SH BUGS No known bugs. .SH AUTHOR Weston Andros Adamson (dros@netapp.com)nfsometer-1.9/nfsometer.py0000755000000000000000000000733213125073146015711 0ustar rootroot00000000000000#!/usr/bin/env python """ Copyright 2012 NetApp, Inc. All Rights Reserved, contribution by Weston Andros Adamson This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. """ import posix import sys import os from nfsometerlib import trace from nfsometerlib import options from nfsometerlib.cmd import * from nfsometerlib.config import * from nfsometerlib.workloads import * from nfsometerlib.report import ReportSet from nfsometerlib.collection import TraceCollection def check_idle_before_start(opts): try: trace.idle_check(wait=False) except: res = cmd('mount | grep " type nfs "', raiseerrorout=False, raiseerrorcode=False) mounts = '\n'.join(res[0]) res = cmd('mount | grep " type nfs4 "', raiseerrorout=False, raiseerrorcode=False) mounts += '\n'.join(res[0]) if not mounts.strip(): opts.error("NFS client not idle (check /proc/fs/nfsfs/servers)") else: opts.error("nfsometer cannot run with any active NFS mounts:\n\n%s" % mounts) def mode_notes(opts): collection = TraceCollection(opts.resultdir) collection.notes_edit() print 'Saved notes for results %s' % (opts.resultdir) def mode_list(opts): collection = TraceCollection(opts.resultdir) print 'Result directory \'%s\' contains:\n\n%s' % \ (opts.resultdir, '\n'.join(collection.show_contents(pre=''))) def mode_workloads(opts): print "Available workloads:" print " %s" % '\n '.join(available_workloads()) print "Unavailable workloads:" print " %s" % '\n '.join(unavailable_workloads()) def mode_loadgen(opts): # XXX check idle? trace.loadgen(opts) def mode_help(opts): opts.usage() def mode_examples(opts): opts.examples() def mode_fetch_trace(opts, fetch_only=False): check_idle_before_start(opts) collection = TraceCollection(opts.resultdir) trace.run_traces(collection, opts, fetch_only=fetch_only) print def mode_report(opts): collection = TraceCollection(opts.resultdir) if not collection.empty(): rpt = ReportSet(collection, opts.serial_graph_gen) rpt.generate_reports() else: print "No tracedirs found" def main(): opts = options.Options() opts.parse() if not os.path.isdir(opts.resultdir): try: os.mkdir(opts.resultdir) except: opts.usage("Can't make result directory: %s" % opts.resultdir) inform("Using results directory: %s" % opts.resultdir) if opts.mode == 'list': mode_list(opts) elif opts.mode == 'workloads': mode_workloads(opts) elif opts.mode == 'notes': mode_notes(opts) elif opts.mode == 'loadgen': mode_loadgen(opts) elif opts.mode == 'help': mode_help(opts) elif opts.mode == 'examples': mode_examples(opts) elif opts.mode in ('all', 'fetch', 'trace', 'report'): if opts.mode in ('all', 'trace', 'fetch'): fetch_only = False if opts.mode == 'fetch': fetch_only = True mode_fetch_trace(opts, fetch_only=fetch_only) if opts.mode in ('all', 'report'): mode_report(opts) if __name__ == '__main__': try: main() except KeyboardInterrupt: print >>sys.stderr, "\nCancelled by user...\n" nfsometer-1.9/setup.py0000644000000000000000000000562113125073146015043 0ustar rootroot00000000000000#!/usr/bin/env python """ Copyright 2012 NetApp, Inc. All Rights Reserved, contribution by Weston Andros Adamson This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. """ from distutils.core import setup from setuptools.command.install import install as _install from setuptools.command.sdist import sdist as _sdist import os from nfsometerlib.config import NFSOMETER_VERSION, NFSOMETER_MANPAGE import nfsometerlib.options class sdist(_sdist): def run(self): if not self.dry_run: print "generating manpage %s" % NFSOMETER_MANPAGE o = nfsometerlib.options.Options() o.generate_manpage(NFSOMETER_MANPAGE) # distutils uses old-style classes, so no super() _sdist.run(self) class install(_install): def install_manpage(self, manpage): manpath = os.path.join(self.prefix, 'share', 'man', 'man1') gzpath = os.path.join(manpath, '%s.gz' % manpage) if self.root: manpath = self.root + manpath gzpath = self.root + gzpath print "gzipping manpage %s" % (gzpath,) os.system('mkdir -p %s' % manpath) os.system('gzip -f --stdout "%s" > "%s"' % (manpage, gzpath)) def fix_script(self, scriptname): if not scriptname.endswith('.py'): return old = os.path.join(self.prefix, 'bin', scriptname) new = os.path.join(self.prefix, 'bin', scriptname[:-3]) if self.root: old = self.root + old new = self.root + new print "stripping .py from script %s" % (old,) os.rename(old, new) def run(self): _install.run(self) self.fix_script('nfsometer.py') self.install_manpage(NFSOMETER_MANPAGE) setup(name='nfsometer', version=NFSOMETER_VERSION, description='NFS performance measurement tool', author='Weston Andros Adamson', author_email='dros@monkey.org', license='GPLv2', url='http://wiki.linux-nfs.org/wiki/index.php/NFSometer', cmdclass={'sdist': sdist, 'install': install}, scripts=['nfsometer.py'], packages=['nfsometerlib'], package_dir={'nfsometerlib': 'nfsometerlib'}, package_data={'nfsometerlib': ['html/*.js', 'html/*.html', 'html/*.css', 'workloads/*.nfsometer', 'workloads/*.sh', 'scripts/*.sh'],}, ) nfsometer-1.9/PKG-INFO0000644000000000000000000000041513125073406014421 0ustar rootroot00000000000000Metadata-Version: 1.0 Name: nfsometer Version: 1.9 Summary: NFS performance measurement tool Home-page: http://wiki.linux-nfs.org/wiki/index.php/NFSometer Author: Weston Andros Adamson Author-email: dros@monkey.org License: GPLv2 Description: UNKNOWN Platform: UNKNOWN nfsometer-1.9/setup.cfg0000644000000000000000000000007313125073406015145 0ustar rootroot00000000000000[egg_info] tag_build = tag_date = 0 tag_svn_revision = 0