Commit e193f063 authored by Thorsten Simons's avatar Thorsten Simons

1.2.3 - more XlSX luxury, more queries

parent 7bb15c56
......@@ -3,7 +3,7 @@ Release History
**1.2.3 2017-09-29**
* some more XLsX luxury
* some more xlsx luxury
* added more queries
**1.2.2 2017-09-26**
......
......@@ -254,7 +254,10 @@ class DB():
first = True
for rec in data:
if first:
_csv.newsheet(mps[fu], list(rec.keys()))
_csv.newsheet(mps[fu], list(rec.keys()),
comment=self.queries.c.get(mps[fu],
'comment',
fallback=''))
first = False
_csv.writerow(row2dict(rec))
......
......@@ -24,15 +24,15 @@
[count]
comment : count all records
query : SELECT count(*) FROM logrecs
freeze pane : A3
freeze pane : A5
[count_day]
comment : count-per-day analysis
query : SELECT printf("%%s/%%s", substr(timestampstr, 4, 3),
substr(timestampstr, 1, 2)) AS day,
substr(timestampstr, 1, 2)) AS day,
count(*)
FROM logrecs GROUP BY substr(timestampstr, 0, 7)
freeze pane : A3
freeze pane : A5
[clientip]
comment : per-clientIP analysis
......@@ -41,7 +41,7 @@ query : SELECT clientip, count(*),
min(latency), avg(latency),
max(latency)
FROM logrecs GROUP BY clientip
freeze pane : C3
freeze pane : C5
[clientip_httpcode]
comment : httpcode-per-clientIP analysis
......@@ -50,7 +50,7 @@ query : SELECT clientip, httpcode, count(*),
min(latency), avg(latency),
max(latency)
FROM logrecs GROUP BY clientip, httpcode
freeze pane : D3
freeze pane : D5
[clientip_request_httpcode]
comment : httpcode-per-request-per-clientIP analysis
......@@ -59,7 +59,7 @@ query : SELECT clientip, request, httpcode, count(*),
min(latency), avg(latency),
max(latency)
FROM logrecs GROUP BY clientip, request, httpcode
freeze pane : E3
freeze pane : E5
[req]
comment : per-request analysis
......@@ -68,7 +68,7 @@ query : SELECT request, count(*),
min(latency), avg(latency),
max(latency)
FROM logrecs GROUP BY request
freeze pane : C3
freeze pane : C5
[req_httpcode]
comment : httpcode-per-request analysis
......@@ -77,7 +77,7 @@ query : SELECT request, httpcode, count(*),
min(latency), avg(latency),
max(latency)
FROM logrecs GROUP BY request, httpcode
freeze pane : C3
freeze pane : C5
[req_httpcode_node]
comment : node-per-httpcode-per-request analysis
......@@ -86,7 +86,7 @@ query : SELECT request, httpcode, node, count(*),
min(latency), avg(latency),
max(latency)
FROM logrecs GROUP BY request, httpcode, node
freeze pane : E3
freeze pane : E5
[node]
comment : per-node analysis
......@@ -103,7 +103,7 @@ query : SELECT node, request, count(*),
min(latency), avg(latency),
max(latency)
FROM logrecs GROUP BY node, request
freeze pane : D3
freeze pane : D5
[node_req_httpcode]
comment : node-per-request-per-httpcode analysis
......@@ -112,7 +112,7 @@ query : SELECT node, request, httpcode, count(*),
min(latency), avg(latency),
max(latency)
FROM logrecs GROUP BY node, request, httpcode
freeze pane : E3
freeze pane : E5
[day_req]
comment : request-per-day analysis
......@@ -123,7 +123,7 @@ query : SELECT printf("%%s/%%s", substr(timestampstr, 4, 3),
min(latency), avg(latency),
max(latency)
FROM logrecs GROUP BY substr(timestampstr, 0, 7), request
freeze pane : D3
freeze pane : D5
[day_req_httpcode]
comment : httpcode-per-request-per-day analysis
......@@ -134,15 +134,15 @@ query : SELECT printf("%%s/%%s", substr(timestampstr, 4, 3),
min(latency), avg(latency),
max(latency)
FROM logrecs GROUP BY substr(timestampstr, 0, 7), request, httpcode
freeze pane : E3
freeze pane : E5
[biggest_500]
[size_biggest_500]
comment : the 500 recs with the biggest object size
query : SELECT request, httpcode, node, latency, size, clientip, user,
timestamp, timestampstr, path, namespace
FROM (SELECT * FROM logrecs ORDER BY size DESC LIMIT 500)
ORDER BY request, httpcode, node
freeze pane : C3
freeze pane : C5
[latency_worst_100]
comment : the 100 recs with the worst latency
......@@ -150,26 +150,65 @@ query : SELECT request, httpcode, latency, size, clientip, user,
timestamp, timestampstr, path, namespace
FROM (SELECT * FROM logrecs ORDER BY latency DESC LIMIT 100)
ORDER BY request, httpcode
freeze pane : C3
freeze pane : C5
[percentile_req]
comment : per-request analysis, including percentiles
comment : per-request analysis, including percentiles for size and latency
query : SELECT request, count(*),
min(size), avg(size), max(size),
percentile(size, 10), percentile(size, 20),
percentile(size, 30), percentile(size, 40),
percentile(size, 50), percentile(size, 60),
percentile(size, 70), percentile(size, 80),
percentile(size, 90), percentile(size, 95),
percentile(size, 99), percentile(size, 99.9),
percentile(size, 10) as 'pctl-10 (size)',
percentile(size, 20) as 'pctl-20 (size)',
percentile(size, 30) as 'pctl-30 (size)',
percentile(size, 40) as 'pctl-40 (size)',
percentile(size, 50) as 'pctl-50 (size)',
percentile(size, 60) as 'pctl-60 (size)',
percentile(size, 70) as 'pctl-70 (size)',
percentile(size, 80) as 'pctl-80 (size)',
percentile(size, 90) as 'pctl-90 (size)',
percentile(size, 95) as 'pctl-95 (size)',
percentile(size, 99) as 'pctl-99 (size)',
percentile(size, 99.9) as 'pctl-99.9 (size)',
min(latency), avg(latency),
max(latency),
percentile(latency, 10), percentile(latency, 20),
percentile(latency, 30), percentile(latency, 40),
percentile(latency, 50), percentile(latency, 60),
percentile(latency, 70), percentile(latency, 80),
percentile(latency, 90), percentile(latency, 95),
percentile(latency, 99), percentile(latency, 99.9)
percentile(latency, 10) as 'pctl-10 (latency)',
percentile(latency, 20) as 'pctl-20 (latency)',
percentile(latency, 30) as 'pctl-30 (latency)',
percentile(latency, 40) as 'pctl-40 (latency)',
percentile(latency, 50) as 'pctl-50 (latency)',
percentile(latency, 60) as 'pctl-60 (latency)',
percentile(latency, 70) as 'pctl-70 (latency)',
percentile(latency, 80) as 'pctl-80 (latency)',
percentile(latency, 90) as 'pctl-90 (latency)',
percentile(latency, 95) as 'pctl-95 (latency)',
percentile(latency, 99) as 'pctl-99 (latency)',
percentile(latency, 99.9) as 'pctl-99.9 (latency)'
FROM logrecs GROUP BY request
freeze pane : C3
freeze pane : C5
[throughput_highest_500]
comment : the 500 requests with the highest throughput (KB/sec) for objects >= 1 Byte
query : SELECT * from
(select request, node, clientip, httpcode,
size/(latency/1000)/1024 as 'KB/sec', size,
latency from logrecs where size >= 1)
order by MB_per_sec desc limit 500;
freeze pane : E5
[percentile_throughput_kb]
comment : per-request analysis, percentiles on throughput (KB/sec) for objects >= 10MB
query : SELECT request,
count(*), min(size), avg(size), max(size),
percentile(size/(latency/1000)/1024, 10) as 'pctl-10 (KB/sec)',
percentile(size/(latency/1000)/1024, 20) as 'pctl-20 (KB/sec)',
percentile(size/(latency/1000)/1024, 30) as 'pctl-30 (KB/sec)',
percentile(size/(latency/1000)/1024, 40) as 'pctl-40 (KB/sec)',
percentile(size/(latency/1000)/1024, 50) as 'pctl-50 (KB/sec)',
percentile(size/(latency/1000)/1024, 60) as 'pctl-60 (KB/sec)',
percentile(size/(latency/1000)/1024, 70) as 'pctl-70 (KB/sec)',
percentile(size/(latency/1000)/1024, 80) as 'pctl-80 (KB/sec)',
percentile(size/(latency/1000)/1024, 90) as 'pctl-90 (KB/sec)',
percentile(size/(latency/1000)/1024, 95) as 'pctl-95 (KB/sec)',
percentile(size/(latency/1000)/1024, 99) as 'pctl-99 (KB/sec)',
percentile(size/(latency/1000)/1024, 99.9) as 'pctl-99.9 (KB/sec)'
FROM logrecs where size >= 10048576 and latency > 500 GROUP BY request
freeze pane : C5
......@@ -94,29 +94,38 @@ class Xlsx(Csv):
'readthedocs.io',
})
self.bold = self.wb.add_format({'bold': True})
self.title0 = self.wb.add_format({'bold': False,
'font_size': 14,
'bg_color': 'yellow'})
self.title = self.wb.add_format({'bold': True,
'font_size': 14,
'bg_color': 'yellow',
'bottom': 5})
self.num = self.wb.add_format({'num_format': '#,##0'})
def newsheet(self, name, fieldnames):
def newsheet(self, name, fieldnames, comment=''):
"""
Create a new worksheet
:param name: the files base name
:param fieldnames: a list of field names
:param comment: a comment to be added
"""
self.fieldnames = fieldnames
self.row = 0
self.ws = self.wb.add_worksheet(name=name)
# write the header
self.ws.write_row(self.row, 0, fieldnames, self.title)
# write the comment into the header
self.ws.set_row(0, 20, self.title0)
self.ws.merge_range(0, 0, 0, 9, comment)
# insert a spacer row
self.ws.set_row(1, 8, self.title0)
# write the field names
self.ws.set_row(2, 20, self.title)
self.ws.write_row(2, 0, fieldnames)
# insert a spacer row
self.ws.set_row(1, 8)
self.row += 2
self.ws.set_row(3, 8)
self.row = 4
def writerow(self, row):
"""
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment