diff --git a/multimechanize/graph.py b/multimechanize/graph.py
index 8f80130..efb3956 100644
--- a/multimechanize/graph.py
+++ b/multimechanize/graph.py
@@ -19,8 +19,8 @@
# response time graph for raw data
-def resp_graph_raw(nested_resp_list, image_name, dir='./'):
- fig = figure(figsize=(8, 3.3)) # image dimensions
+def resp_graph_raw(nested_resp_list, image_name, graph_width, graph_height, dir='./'):
+ fig = figure(figsize=(graph_width, graph_height)) # image dimensions
ax = fig.add_subplot(111)
ax.set_xlabel('Elapsed Time In Test (secs)', size='x-small')
ax.set_ylabel('Response Time (secs)' , size='x-small')
@@ -38,8 +38,8 @@ def resp_graph_raw(nested_resp_list, image_name, dir='./'):
# response time graph for bucketed data
-def resp_graph(avg_resptime_points_dict, percentile_80_resptime_points_dict, percentile_90_resptime_points_dict, image_name, dir='./'):
- fig = figure(figsize=(8, 3.3)) # image dimensions
+def resp_graph(avg_resptime_points_dict, percentile_80_resptime_points_dict, percentile_90_resptime_points_dict, image_name, graph_width, graph_height, dir='./'):
+ fig = figure(figsize=(graph_width, graph_height)) # image dimensions
ax = fig.add_subplot(111)
ax.set_xlabel('Elapsed Time In Test (secs)', size='x-small')
ax.set_ylabel('Response Time (secs)' , size='x-small')
@@ -82,8 +82,8 @@ def resp_graph(avg_resptime_points_dict, percentile_80_resptime_points_dict, per
# throughput graph
-def tp_graph(throughputs_dict, image_name, dir='./'):
- fig = figure(figsize=(8, 3.3)) # image dimensions
+def tp_graph(throughputs_dict, image_name, graph_width, graph_height, dir='./'):
+ fig = figure(figsize=(graph_width, graph_height)) # image dimensions
ax = fig.add_subplot(111)
ax.set_xlabel('Elapsed Time In Test (secs)', size='x-small')
ax.set_ylabel('Transactions Per Second (count)' , size='x-small')
diff --git a/multimechanize/results.py b/multimechanize/results.py
index dd95693..2601224 100644
--- a/multimechanize/results.py
+++ b/multimechanize/results.py
@@ -15,7 +15,7 @@
-def output_results(results_dir, results_file, run_time, rampup, ts_interval, user_group_configs=None, xml_reports=False):
+def output_results(results_dir, results_file, run_time, rampup, ts_interval, graph_width, graph_height, user_group_configs=None, xml_reports=False):
results = Results(results_dir + results_file, run_time)
report = reportwriter.Report(results_dir)
@@ -62,7 +62,7 @@ def output_results(results_dir, results_file, run_time, rampup, ts_interval, use
t = (resp_stats.elapsed_time, resp_stats.trans_time)
trans_timer_points.append(t)
trans_timer_vals.append(resp_stats.trans_time)
- graph.resp_graph_raw(trans_timer_points, 'All_Transactions_response_times.png', results_dir)
+ graph.resp_graph_raw(trans_timer_points, 'All_Transactions_response_times.png', graph_width, graph_height, results_dir)
report.write_line('
Transaction Response Summary (secs)
')
report.write_line('')
@@ -111,7 +111,7 @@ def output_results(results_dir, results_file, run_time, rampup, ts_interval, use
percentile_90_resptime_points[interval_start] = pct_90
report.write_line('
')
- graph.resp_graph(avg_resptime_points, percentile_80_resptime_points, percentile_90_resptime_points, 'All_Transactions_response_times_intervals.png', results_dir)
+ graph.resp_graph(avg_resptime_points, percentile_80_resptime_points, percentile_90_resptime_points, 'All_Transactions_response_times_intervals.png', graph_width, graph_height, results_dir)
report.write_line('Graphs
')
@@ -130,7 +130,7 @@ def output_results(results_dir, results_file, run_time, rampup, ts_interval, use
splat_series = split_series(trans_timer_points, interval_secs)
for i, bucket in enumerate(splat_series):
throughput_points[int((i + 1) * interval_secs)] = (len(bucket) / interval_secs)
- graph.tp_graph(throughput_points, 'All_Transactions_throughput.png', results_dir)
+ graph.tp_graph(throughput_points, 'All_Transactions_throughput.png', graph_width, graph_height, results_dir)
@@ -145,14 +145,14 @@ def output_results(results_dir, results_file, run_time, rampup, ts_interval, use
custom_timer_vals.append(val)
except KeyError:
pass
- graph.resp_graph_raw(custom_timer_points, timer_name + '_response_times.png', results_dir)
+ graph.resp_graph_raw(custom_timer_points, timer_name + '_response_times.png', graph_width, graph_height, results_dir)
throughput_points = {} # {intervalnumber: numberofrequests}
interval_secs = ts_interval
splat_series = split_series(custom_timer_points, interval_secs)
for i, bucket in enumerate(splat_series):
throughput_points[int((i + 1) * interval_secs)] = (len(bucket) / interval_secs)
- graph.tp_graph(throughput_points, timer_name + '_throughput.png', results_dir)
+ graph.tp_graph(throughput_points, timer_name + '_throughput.png', graph_width, graph_height, results_dir)
report.write_line('
')
report.write_line('Custom Timer: %s
' % timer_name)
@@ -205,7 +205,7 @@ def output_results(results_dir, results_file, run_time, rampup, ts_interval, use
percentile_80_resptime_points[interval_start] = pct_80
percentile_90_resptime_points[interval_start] = pct_90
report.write_line('')
- graph.resp_graph(avg_resptime_points, percentile_80_resptime_points, percentile_90_resptime_points, timer_name + '_response_times_intervals.png', results_dir)
+ graph.resp_graph(avg_resptime_points, percentile_80_resptime_points, percentile_90_resptime_points, timer_name + '_response_times_intervals.png', graph_width, graph_height, results_dir)
report.write_line('Graphs
')
@@ -350,4 +350,4 @@ def percentile(seq, percentile):
if __name__ == '__main__':
- output_results('./', 'results.csv', 60, 30, 10)
+ output_results('./', 'results.csv', 60, 30, 10, 8, 3.3)
diff --git a/multimechanize/utilities/newproject.py b/multimechanize/utilities/newproject.py
index 66b9aaa..778b1c8 100755
--- a/multimechanize/utilities/newproject.py
+++ b/multimechanize/utilities/newproject.py
@@ -25,6 +25,10 @@
console_logging = off
xml_report = off
+[reporting]
+graph_width = 8
+graph_height = 3.3
+
[user_group-1]
threads = 3
diff --git a/multimechanize/utilities/run.py b/multimechanize/utilities/run.py
index f667414..cb9ffcf 100755
--- a/multimechanize/utilities/run.py
+++ b/multimechanize/utilities/run.py
@@ -74,7 +74,7 @@ def run_test(project_name, cmd_opts, remote_starter=None):
remote_starter.test_running = True
remote_starter.output_dir = None
- run_time, rampup, results_ts_interval, console_logging, progress_bar, results_database, post_run_script, xml_report, user_group_configs = configure(project_name, cmd_opts)
+ run_time, rampup, results_ts_interval, console_logging, progress_bar, results_database, post_run_script, xml_report, graph_width, graph_height, user_group_configs = configure(project_name, cmd_opts)
run_localtime = time.localtime()
output_dir = '%s/%s/results/results_%s' % (cmd_opts.projects_dir, project_name, time.strftime('%Y.%m.%d_%H.%M.%S/', run_localtime))
@@ -136,7 +136,7 @@ def run_test(project_name, cmd_opts, remote_starter=None):
# all agents are done running at this point
time.sleep(.2) # make sure the writer queue is flushed
print '\n\nanalyzing results...\n'
- results.output_results(output_dir, 'results.csv', run_time, rampup, results_ts_interval, user_group_configs, xml_report)
+ results.output_results(output_dir, 'results.csv', run_time, rampup, results_ts_interval, graph_width, graph_height, user_group_configs, xml_report)
print 'created: %sresults.html\n' % output_dir
if xml_report:
print 'created: %sresults.jtl' % output_dir
@@ -151,7 +151,7 @@ def run_test(project_name, cmd_opts, remote_starter=None):
print 'loading results into database: %s\n' % results_database
import multimechanize.resultsloader
multimechanize.resultsloader.load_results_database(project_name, run_localtime, output_dir, results_database,
- run_time, rampup, results_ts_interval, user_group_configs)
+ run_time, rampup, results_ts_interval, graph_width, graph_height, user_group_configs)
if post_run_script is not None:
print 'running post_run_script: %s\n' % post_run_script
@@ -170,9 +170,9 @@ def run_test(project_name, cmd_opts, remote_starter=None):
def rerun_results(project_name, cmd_opts, results_dir):
output_dir = '%s/%s/results/%s/' % (cmd_opts.projects_dir, project_name, results_dir)
saved_config = '%s/config.cfg' % output_dir
- run_time, rampup, results_ts_interval, console_logging, progress_bar, results_database, post_run_script, xml_report, user_group_configs = configure(project_name, cmd_opts, config_file=saved_config)
+ run_time, rampup, results_ts_interval, console_logging, progress_bar, results_database, post_run_script, xml_report, graph_width, graph_height, user_group_configs = configure(project_name, cmd_opts, config_file=saved_config)
print '\n\nanalyzing results...\n'
- results.output_results(output_dir, 'results.csv', run_time, rampup, results_ts_interval, user_group_configs, xml_report)
+ results.output_results(output_dir, 'results.csv', run_time, rampup, results_ts_interval, graph_width, graph_height, user_group_configs, xml_report)
print 'created: %sresults.html\n' % output_dir
if xml_report:
print 'created: %sresults.jtl' % output_dir
@@ -213,6 +213,15 @@ def configure(project_name, cmd_opts, config_file=None):
xml_report = config.getboolean(section, 'xml_report')
except ConfigParser.NoOptionError:
xml_report = False
+ elif section == 'reporting':
+ try:
+ graph_width = config.getfloat(section, 'graph_width')
+ except ConfigParser.NoOptionError:
+ graph_width = 8
+ try:
+ graph_height = config.getfloat(section, 'graph_height')
+ except ConfigParser.NoOptionError:
+ graph_height = 3.3
else:
threads = config.getint(section, 'threads')
script = config.get(section, 'script')
@@ -220,7 +229,7 @@ def configure(project_name, cmd_opts, config_file=None):
ug_config = UserGroupConfig(threads, user_group_name, script)
user_group_configs.append(ug_config)
- return (run_time, rampup, results_ts_interval, console_logging, progress_bar, results_database, post_run_script, xml_report, user_group_configs)
+ return (run_time, rampup, results_ts_interval, console_logging, progress_bar, results_database, post_run_script, xml_report, graph_width, graph_height, user_group_configs)