Commit abb9e1ef authored by Thorsten Simons's avatar Thorsten Simons

1.3.1 - added timestamp of first and last record to xlsx file; added SQL...

1.3.1 - added timestamp of first and last record to xlsx file; added SQL function ``tp(size, latency)`` to calculate the throughput; adopted queries to use ``tp()``
parent 248524db
......@@ -167,7 +167,7 @@ freeze pane : E5
[500_largest]
comment : The records with the 500 largest requests
query : SELECT request, httpcode, node, latency, size,
tp(size,latency) as 'Bytes/sec', clientip, user,
tp(size,latency) as Bytes_sec, clientip, user,
timestamp, timestampstr, path, namespace
FROM (SELECT * FROM logrecs ORDER BY size DESC LIMIT 500)
ORDER BY request, httpcode, node
......@@ -175,10 +175,11 @@ freeze pane : D5
[500_worst_latency]
comment : The records with the 500 worst latencies
query : SELECT request, httpcode, latency, size, clientip, user,
timestamp, timestampstr, path, namespace
FROM (SELECT * FROM logrecs ORDER BY latency DESC LIMIT 500)
ORDER BY request, httpcode
query : SELECT request, httpcode, latency, size, tp(size,latency) as Bytes_sec,
clientip, user, timestamp, timestampstr, path, namespace
FROM (SELECT * FROM logrecs WHERE size > 0
ORDER BY latency DESC LIMIT 500)
ORDER BY request, httpcode
freeze pane : C5
[percentile_req]
......@@ -215,18 +216,20 @@ query : SELECT request, count(*),
freeze pane : C5
[500_highest_throughput]
comment : The 500 records with the highest throughput (Bytes/sec) for objects >= 1 Byte
query : SELECT * from
(select request, node, clientip, httpcode,
tp(size, latency) as 'Bytes/sec', size,
latency from logrecs where size >= 1)
order by 'Bytes/sec' desc limit 500;
freeze pane : E5
comment : The 500 records with the highest throughput (Bytes/sec)
query : SELECT request, httpcode, clientip, tp(size, latency) as Bytes_sec,
latency, size, user, timestamp, timestampstr, path, namespace
FROM logrecs
WHERE size > 0 and latency > 0
ORDER BY Bytes_sec DESC LIMIT 500;
freeze pane : D5
[percentile_throughput_b]
comment : No. of records per request, with percentiles on throughput (Bytes/sec) for objects >= 10MB
query : SELECT request,
count(*), min(size), avg(size), max(size),
[percentile_throughput_128kb]
comment : No. of records per request, with percentiles on throughput (Bytes/sec) for objects >= 128KB
query : SELECT request, count(*),
min(tp(size, latency)) as 'min(B/sec)',
avg(tp(size, latency)) as 'avg(B/sec)',
max(tp(size, latency)) as 'max(B/sec)',
percentile(tp(size, latency), 10) as 'pctl-10 (B/sec)',
percentile(tp(size, latency), 20) as 'pctl-20 (B/sec)',
percentile(tp(size, latency), 30) as 'pctl-30 (B/sec)',
......@@ -239,5 +242,6 @@ query : SELECT request,
percentile(tp(size, latency), 95) as 'pctl-95 (B/sec)',
percentile(tp(size, latency), 99) as 'pctl-99 (B/sec)',
percentile(tp(size, latency), 99.9) as 'pctl-99.9 (B/sec)'
FROM logrecs where size >= 10048576 and latency > 500 GROUP BY request
FROM logrecs where size >= 131072 GROUP BY request
freeze pane : C5
......@@ -129,9 +129,9 @@ class Xlsx(Csv):
# write the comment into the header, plus a link to the CONTENT sheet
self.ws.set_row(0, 20, self.title0)
self.ws.merge_range(0, 0, 0, 9, comment)
self.ws.merge_range(0, 10, 0, 12, '', self.title0)
self.ws.write_url(0, 10, 'internal:CONTENT!B2', self.linkback,
self.ws.merge_range(0, 0, 0, 7, comment)
self.ws.merge_range(0, 8, 0, 10, '', self.title0)
self.ws.write_url(0, 8, 'internal:CONTENT!B2', self.linkback,
'<<< back to CONTENT <<<')
# insert a spacer row
self.ws.set_row(1, 8, self.title0)
......
......@@ -27,8 +27,8 @@ class Gvars:
"""
# version control
s_version = "1.3.0"
s_builddate = '2017-10-03'
s_version = "1.3.1"
s_builddate = '2017-10-05'
s_build = "{}/Sm".format(s_builddate)
s_minPython = "3.4.3"
s_description = "hcprequestanalytics"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment