Commit e9f1caed authored by Elger Jonker's avatar Elger Jonker

removing legacy code and refactoring out unneeded scanmanager class


Former-commit-id: afd6e44e
parent 2d7fb8b6
......@@ -3,7 +3,7 @@ from argparse import ArgumentTypeError
from django.core.management.base import BaseCommand
from failmap.map.rating import calculate_map_data
from failmap.map.report import calculate_map_data
log = logging.getLogger(__package__)
......
......@@ -3,7 +3,7 @@ from argparse import ArgumentTypeError
from django.core.management.base import BaseCommand
from failmap.map.rating import calculate_vulnerability_statistics
from failmap.map.report import calculate_vulnerability_statistics
log = logging.getLogger(__package__)
......
from django.core.management.base import BaseCommand
from failmap.map.rating import default_organization_rating
from failmap.map.report import default_organization_rating
class Command(BaseCommand):
......
......@@ -2,7 +2,7 @@ import logging
from failmap.app.management.commands._private import TaskCommand
from ...rating import rebuild_organization_ratings
from ...report import rebuild_organization_ratings
log = logging.getLogger(__name__)
......
......@@ -2,7 +2,7 @@ import logging
from django.core.management.base import BaseCommand
from failmap.map.rating import create_timeline, inspect_timeline
from failmap.map.report import create_timeline, inspect_timeline
from failmap.organizations.models import Url
log = logging.getLogger(__package__)
......
......@@ -2,7 +2,7 @@ import logging
from celery import group
from failmap.map.rating import (calculate_map_data, calculate_vulnerability_statistics,
from failmap.map.report import (calculate_map_data, calculate_vulnerability_statistics,
rebuild_organization_ratings, rebuild_url_ratings)
from failmap.organizations.models import Organization, Url
from failmap.scanners.scanner.scanner import q_configurations_to_report
......
......@@ -22,8 +22,9 @@ log = logging.getLogger(__package__)
ENDPOINT_SCAN_TYPES = ['Strict-Transport-Security', 'X-Content-Type-Options', 'X-Frame-Options',
'X-XSS-Protection', 'tls_qualys', 'plain_https', 'ftp', 'tls_qualys_certificate_trusted',
'X-XSS-Protection', 'plain_https', 'ftp', 'tls_qualys_certificate_trusted',
'tls_qualys_encryption_quality']
URL_SCAN_TYPES = ['DNSSEC']
ALL_SCAN_TYPES = URL_SCAN_TYPES + ENDPOINT_SCAN_TYPES
......@@ -704,13 +705,8 @@ def rate_timeline(timeline, url: Url):
these_endpoint_scans = {}
if endpoint.id in endpoint_scans:
for scan in endpoint_scans[endpoint.id]:
if isinstance(scan, TlsQualysScan):
these_endpoint_scans['tls_qualys'] = scan
if isinstance(scan, EndpointGenericScan):
if scan.type in ['Strict-Transport-Security', 'X-Content-Type-Options',
'X-Frame-Options', 'X-XSS-Protection', 'plain_https', 'ftp',
'tls_qualys_certificate_trusted', 'tls_qualys_encryption_quality']:
these_endpoint_scans[scan.type] = scan
if scan.type in ENDPOINT_SCAN_TYPES:
these_endpoint_scans[scan.type] = scan
# enrich the ratings with previous ratings, without overwriting them.
for endpoint_scan_type in ENDPOINT_SCAN_TYPES:
......@@ -844,9 +840,8 @@ def rate_timeline(timeline, url: Url):
if url.id in url_scans:
for scan in url_scans[url.id]:
if isinstance(scan, UrlGenericScan):
if scan.type in ['DNSSEC']:
these_url_scans[scan.type] = scan
if scan.type in ['DNSSEC']:
these_url_scans[scan.type] = scan
# enrich the ratings with previous ratings, which saves queries.
for url_scan_type in url_scan_types:
......
......@@ -17,7 +17,7 @@ from leaflet.admin import LeafletGeoAdminMixin
import failmap.scanners.scanner.http as scanner_http
from failmap import types
from failmap.map.rating import OrganizationRating, UrlRating
from failmap.map.report import OrganizationRating, UrlRating
from failmap.scanners.admin import UrlIp
from failmap.scanners.models import Endpoint, EndpointGenericScan, TlsQualysScan, UrlGenericScan
from failmap.scanners.scanner import dns, dnssec, onboard, plain_http, security_headers, tls_qualys
......@@ -527,7 +527,7 @@ class UrlAdmin(ActionMixin, ImportExportModelAdmin, nested_admin.NestedModelAdmi
actions.append('declare_dead')
def timeline_debug(self, request, queryset):
from failmap.map.rating import create_timeline, inspect_timeline
from failmap.map.report import create_timeline, inspect_timeline
from django.http import HttpResponse
content = "<pre>"
......
import logging
from django.core.management.base import BaseCommand
from failmap.organizations.models import Organization
log = logging.getLogger(__package__)
class Command(BaseCommand):
help = 'Changes country "netherlands" to "NL"'
def handle(self, *args, **options):
countries = Organization.objects.all().filter(country="netherlands")
for country in countries:
country.country = "NL"
country.save()
import logging
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import BaseCommand
from failmap.scanners.models import Endpoint, EndpointGenericScan, TlsQualysScan, Url
log = logging.getLogger(__package__)
class Command(BaseCommand):
# todo: url on save, change to lower
help = 'Get some ideas about relationships with uppercase characters (they should be lower case)'
def handle(self, *args, **options):
urls = Url.objects.all().filter(url__regex="[A-Z]")
for url in urls:
print(url)
try:
lowercase_urls = Url.objects.all().filter(url=url.url.lower())
if lowercase_urls:
print("Has lower case variant %s, deleting uppercase." % lowercase_urls)
endpoints = Endpoint.objects.all().filter(url=url)
for endpoint in endpoints:
EndpointGenericScan.objects.all().filter(endpoint=endpoint).delete()
TlsQualysScan.objects.all().filter(endpoint=endpoint).delete()
url.delete() # doesn't this result in inconsistent relations?
except ObjectDoesNotExist:
print("Has NO lower case variant, updating to lowercase.")
url.url = url.url.lower()
url.save()
import datetime
import logging
import pytz
from django.core.management.base import BaseCommand
from failmap.scanners.models import Endpoint
log = logging.getLogger(__package__)
class Command(BaseCommand):
"""
Undoes certain things that happened recently. This is a specialists tool that is usually a one-shot.
It can fix certain issues that where caused by mass-scanning when for example the network died and as a result
a lot of urls or endpoints died.
As urls, endpoints and organizations stack over time (being dead etc), soem scanenrs will have already created a
new endpoint to replace the one that died accidentally. For this you can use the "merge" command, which is also
a specialists tool that requires reading the manual.
Usually run this script after merge:
failmap merge
failmap undo
"""
help = 'Merges similar things that have been dead for a very short while.'
def handle(self, *args, **options):
# a short warning to help not running this command by accident.
# in a next commit this command should be empty.
answer = input("Do you want to undo all endpoint deaths on IPv6/4 in the last 4 days?")
if answer == "YES":
http_scanner_undo_endpoint_deaths(in_the_last_n_days=4, ip_version=4)
http_scanner_undo_endpoint_deaths(in_the_last_n_days=4, ip_version=6)
def http_scanner_undo_endpoint_deaths(in_the_last_n_days: int = 1, ip_version: int = 6):
"""
Sets all ipv6 or 4 endpoints to alive that where killed in the past N days.
Run this if you did a scan for ipv6 networks when no ipv6 network was available.
:param in_the_last_n_days: number of days between now and the moment a mistake was made
:param ip_version: 4 or 6
:return:
"""
# the endpoint cannot have a "new" endpoint within this timeframe. If so, you should merge.
dead_endpoints = Endpoint.objects.all().filter(
is_dead=True,
is_dead_since__gte=datetime.datetime.now(pytz.utc) - datetime.timedelta(days=in_the_last_n_days),
ip_version=ip_version,
is_dead_reason="Not found in HTTP Scanner anymore."
)
# can't revive if there is a new endpoint already, those should be merged (as it contains all kinds of related data)
for dead_endpoint in dead_endpoints:
has_similar_alive_endpoints = Endpoint.objects.all().filter(
is_dead=False, # given only one can be alive at any point.
ip_version=dead_endpoint.ip_version,
protocol=dead_endpoint.protocol,
port=dead_endpoint.port,
url=dead_endpoint.url
)
if not has_similar_alive_endpoints:
log.info("Undoing death on %s" % dead_endpoint)
dead_endpoint.is_dead = False
dead_endpoint.is_dead_reason = ""
dead_endpoint.is_dead_since = None
dead_endpoint.save()
else:
log.info("Can't undo death on %s as there is a similar alive. Try and merge." % dead_endpoint)
......@@ -2,7 +2,7 @@ import logging
from django.core.management.base import BaseCommand
from failmap.map.rating import (add_organization_rating, create_timeline, inspect_timeline,
from failmap.map.report import (add_organization_rating, create_timeline, inspect_timeline,
rebuild_url_ratings)
from failmap.organizations.models import Organization, Url
from failmap.scanners.models import Endpoint
......@@ -62,7 +62,7 @@ def test_osaft():
def rebuild_ratings():
from failmap.map.rating import rebuild_organization_ratings
from failmap.map.report import rebuild_organization_ratings
organization = Organization.objects.filter(name="Arnhem").get()
rebuild_url_ratings(list(Url.objects.all().filter(organization=organization)))
......@@ -168,7 +168,7 @@ def develop_determineratings():
# return
from datetime import datetime
import pytz
from failmap.map.rating import relevant_endpoints_at_timepoint
from failmap.map.report import relevant_endpoints_at_timepoint
u = Url.objects.all().filter(url='www.arnhem.nl').get()
relevant_endpoints_at_timepoint(url=u, when=datetime(2016, 12, 31, 0, 0, tzinfo=pytz.utc))
......
import logging
from django.core.management.base import BaseCommand
log = logging.getLogger(__package__)
class Command(BaseCommand):
"""
This serves as documentation for a legacy issue. If the issue pops up again, these queries and documentation
might be able to help solving the problem, though it is doubtful this will pop up ever again.
We've got a support request that stated scans where not updating. Confirmed that the website showed outdated data.
Looking at the database on the TLS scans, we found two records on the same endpoint.
12418 IPv4 https/443 | [13708] langparkeren.amsterdam.nl A A Ready 27 oktober 2017 14:35 22 september 2017
12417 IPv4 https/443 | [13708] langparkeren.amsterdam.nl A A Ready 5 mei 2018 20:31 22 september 2017
The second one, with the lower ID has the TLS scan updates. So scans where performed. The one with the higher ID
is automatically selected in the map and report result set due to efficiency reasons.
Weird things:
- Both first ratings have the same creation date.
- The system was not meant to create multiple the same scans on the same endpoint.
This seems to be a bug with endpoints created on the same moment. Amsterdam has another endpoint that is not
updated correctly. avproxy.vga.amsterdam.nl. On ipv4. Shows a scan of week 2017/43. Here we see the same pattern:
there are two created on the same moment and the TLS scan is saved to the oldest one.
We can fix this in several ways:
- in TLS only save it to the one with the highest ID. This seems to be a garbage fix as there should not be dupes
in the first place.
- figure out how many there are with the same creation date that point to the same endpoint (and learn from it).
we're choosing this one.
The first check is to gain insight into the scope of the problem, and to see if it still occurs.
It doesn't still occur it seems, as the creation dates are well in the past.
It seems that this issue comes from removing the IP from the endpoint table. These urls are known for having
more than one IP (sip, iburgerzaken...) and many of them still have 0 ratings from qualys which we stopped using.
Issue:
Sometimes the rating gets appended to the lower ID, sometimes to the higher. That indicates that this part of
the code is not deterministic. The reason not everyone complained is that the amount of urls of amsterdam is
much larger and they are much more likely to see if things are off. Thankfully they did and now we can fix it :)
This issue also happened on endpointgenericscan.
IPv4 https/443 | [4832] www.amsterdam.nl X-Content-Type-Options True nosniff 12 november 2017 . 3 oktober 2017 13:49
IPv4 https/443 | [4832] www.amsterdam.nl X-Content-Type-Options True nosniff 8 mei 2018 . 3 oktober 2017 13:49
Two actions:
- We should clean the DB, remove the doubles and retain the one with the most recent scan.
? We should match with the highest ID when the rating_determined_on are the same (and some integrity bug sneeked in)
- Should we be weary that rating determined on and endpoint_id cannot be the same? We can make a rule for this
so we will see exceptions happen.
"""
help = __doc__
def handle(self, *args, **options):
"""
# [X] Verified that these manual querys return the same on MySQL and SQLlite.
# Remediation strategy:
# Find all endpoints of a certain double, keep the one with the last scan moment, ditch the others.
:param args:
:param options:
:return:
"""
# Amsterdam has 82 doubles.
# There is a total of 483 doubles. With 1469 endpoints. Removing all doubles: 1469 - 483 = 986 removed.
"""
MYSQL / SQLITE:
SELECT endpoint_id, url, COUNT(rating_determined_on) FROM scanner_tls_qualys
INNER JOIN scanners_endpoint ON (endpoint_id = scanners_endpoint.id)
INNER JOIN url ON (url_id = url.id)
WHERE url LIKE '%%'
group by endpoint_id, rating_determined_on
HAVING COUNT(rating_determined_on) > 1
ORDER BY scan_date DESC
"""
# deletes 181 rows...
# Only saves the newer record.
# This does not work in one step in MySQL. So we need a temp table:
"""
SQLITE:
DELETE FROM scanner_tls_qualys WHERE ID IN (SELECT one.id FROM scanner_tls_qualys AS one
INNER JOIN scanner_tls_qualys AS two ON (one.endpoint_id = two.endpoint_id)
WHERE one.rating_determined_on = two.rating_determined_on
AND one.last_scan_moment < two.last_scan_moment)
"""
"""
MYSQL:
CREATE TEMPORARY TABLE IF NOT EXISTS double_tls_different AS (SELECT one.id FROM scanner_tls_qualys AS one
INNER JOIN scanner_tls_qualys AS two ON (one.endpoint_id = two.endpoint_id)
WHERE one.rating_determined_on = two.rating_determined_on
AND one.last_scan_moment < two.last_scan_moment)
DELETE FROM scanner_tls_qualys WHERE ID IN (SELECT id FROM double_tls_different);
"""
# now delete the ones with the same scan moment AND same rating detemined on.
# deletes 805 records. For a total of 986 records removed. Just like predicted.
"""
SQLITE:
DELETE FROM scanner_tls_qualys WHERE ID IN (SELECT one.id FROM scanner_tls_qualys AS one
INNER JOIN scanner_tls_qualys AS two ON (one.endpoint_id = two.endpoint_id)
WHERE one.rating_determined_on = two.rating_determined_on AND
one.last_scan_moment = two.last_scan_moment
AND one.id < two.id)
"""
"""
MYSQL:
CREATE TEMPORARY TABLE IF NOT EXISTS double_tls_same AS (SELECT one.id FROM scanner_tls_qualys AS one
INNER JOIN scanner_tls_qualys AS two ON (one.endpoint_id = two.endpoint_id)
WHERE one.rating_determined_on = two.rating_determined_on AND
one.last_scan_moment = two.last_scan_moment
AND one.id < two.id)
DELETE FROM scanner_tls_qualys WHERE ID IN (SELECT id FROM double_tls_same);
"""
#
# Endpoint genericscans.
# 500 rows, 1301 endpoints (counted in excel). There will be 1301-500 removed = 801
"""
SQLITE / MYSQL:
SELECT COUNT(rating_determined_on) FROM scanners_endpointgenericscan
INNER JOIN scanners_endpoint ON (endpoint_id = scanners_endpoint.id)
INNER JOIN url ON (url_id = url.id)
WHERE url LIKE '%%'
group by type, endpoint_id, rating_determined_on
HAVING COUNT(rating_determined_on) > 1
"""
# deletes 724 rows in endpointgenericscan, only saving the newer ones
"""
SQLITE:
DELETE FROM scanners_endpointgenericscan WHERE ID IN (
SELECT one.id FROM scanners_endpointgenericscan AS one
INNER JOIN scanners_endpointgenericscan AS two
ON (one.endpoint_id = two.endpoint_id AND one.type = two.type)
WHERE one.rating_determined_on = two.rating_determined_on
AND one.last_scan_moment < two.last_scan_moment)
"""
"""
MYSQL:
CREATE TEMPORARY TABLE IF NOT EXISTS double_epg_different AS (
SELECT one.id FROM scanners_endpointgenericscan AS one
INNER JOIN scanners_endpointgenericscan AS two
ON (one.endpoint_id = two.endpoint_id AND one.type = two.type)
WHERE one.rating_determined_on = two.rating_determined_on
AND one.last_scan_moment < two.last_scan_moment
)
DELETE FROM scanners_endpointgenericscan WHERE ID IN (SELECT id FROM double_epg_different);
"""
# deletes double records, only saving the one with highest ID.
# deletes 77 rows. Totalling for 801 double rows.
"""
SQLITE:
DELETE FROM scanners_endpointgenericscan WHERE ID IN (
SELECT one.id FROM scanners_endpointgenericscan AS one
INNER JOIN scanners_endpointgenericscan AS two
ON (one.endpoint_id = two.endpoint_id
AND one.type = two.type
AND one.rating_determined_on = two.rating_determined_on
AND one.last_scan_moment = two.last_scan_moment)
AND one.id < two.id)
"""
"""
MYSQL
CREATE TEMPORARY TABLE IF NOT EXISTS double_epg_same AS (
SELECT one.id FROM scanners_endpointgenericscan AS one
INNER JOIN scanners_endpointgenericscan AS two
ON (one.endpoint_id = two.endpoint_id
AND one.type = two.type
AND one.rating_determined_on = two.rating_determined_on
AND one.last_scan_moment = two.last_scan_moment)
AND one.id < two.id
)
DELETE FROM scanners_endpointgenericscan WHERE ID IN (SELECT id FROM double_epg_same);
"""
import logging
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import BaseCommand
from failmap.scanners.models import Endpoint, Url
log = logging.getLogger(__package__)
class Command(BaseCommand):
help = 'Some endpoints have a different IP every day. In some cases this was built up, while' \
'the endpoints where never declared dead: so at one point dozens of endpoints are seen' \
'as dead. This script cleans up large chunks of endpoints that where declared dead at ' \
'the same time. Be very careful of running this.'
"""
Example endpoints
URL Discsovered on is dead since.
opendata.arnhem.nl 25 december 2016 22:45 s 28 augustus 2017 17:10 1
opendata.arnhem.nl 25 december 2016 22:45 s 28 augustus 2017 17:10 1
opendata.arnhem.nl 22 december 2016 21:54 28 augustus 2017 17:10 1
opendata.arnhem.nl 22 december 2016 21:54 s 28 augustus 2017 17:10 1
opendata.arnhem.nl 21 december 2016 21:37 s 28 augustus 2017 17:10 1
opendata.arnhem.nl 9 december 2016 18:01 s 28 augustus 2017 17:10 1
opendata.arnhem.nl 9 december 2016 18:01 s 28 augustus 2017 17:10 1
opendata.arnhem.nl 6 december 2016 16:56 s 28 augustus 2017 17:10 1
opendata.arnhem.nl 6 december 2016 16:56 s 28 augustus 2017 17:10 1
opendata.arnhem.nl 4 december 2016 15:50 s 28 augustus 2017 17:10 1
opendata.arnhem.nl 4 december 2016 15:50 s 28 augustus 2017 17:10 1
opendata.arnhem.nl 2 december 2016 15:01 s 28 augustus 2017 17:10 1
opendata.arnhem.nl 2 december 2016 15:01 28 augustus 2017 17:10 1
opendata.arnhem.nl 28 november 2016 13:42 28 augustus 2017 17:10 1
To
opendata.arnhem.nl 22 december 2016 21:54 25 december 2016 22:45 1
opendata.arnhem.nl 22 december 2016 21:54 s 22 december 2016 21:54 1
opendata.arnhem.nl 21 december 2016 21:37 s 22 december 2016 21:54 1
opendata.arnhem.nl 9 december 2016 18:01 s 21 december 2016 21:37 1
opendata.arnhem.nl 9 december 2016 18:01 s 21 december 2016 21:37 1
opendata.arnhem.nl 6 december 2016 16:56 s 9 december 2016 18:01 1
opendata.arnhem.nl 6 december 2016 16:56 s 9 december 2016 18:01 1
opendata.arnhem.nl 4 december 2016 15:50 s 6 december 2016 16:56 1
opendata.arnhem.nl 4 december 2016 15:50 s 6 december 2016 16:56 1
opendata.arnhem.nl 2 december 2016 15:01 s 4 december 2016 15:50 1
opendata.arnhem.nl 2 december 2016 15:01 4 december 2016 15:50 1
"""
# https://docs.djangoproject.com/en/1.11/howto/custom-management-commands/
def handle(self, *args, **options):
# pyflakes endpoints = Endpoint.objects.all()
# Command.urls_with_ridiculous_number_of_endpoints()
Command.resequence_endpoint_deaths()
@staticmethod
# todo: does not account for ipv6.
def resequence_endpoint_deaths():
"""Changes the is_dead_since dates in above example with a better sequenced one:
The endpoint is dead once a new one has been discovered.
Warning: do this only for the same protocol.
opendata.arnhem.nl
sites.zoetermeer.nl
webmail.zaltbommel.nl
webmail.gemeentemolenwaard.nl
sites.almelo.nl
pers.alkmaar.nl
"""
url = Url.objects.all().filter(url="mail.rhenen.nl").get()
log.debug("url %s" % url)
endpoints = Endpoint.objects.all().filter(url=url,
protocol="https",
port="443").order_by('discovered_on')
for endpoint in endpoints:
log.debug("endpoint: %s, Discovered on: %s" % (endpoint, endpoint.discovered_on))
if endpoint.is_dead_since:
log.debug('Would replace dead date %s' % endpoint.is_dead_since)
try:
endpoint.is_dead_since = Endpoint.objects.all().filter(
url=url,
discovered_on__gt=endpoint.discovered_on).earliest('discovered_on').\
discovered_on
log.debug('With date: %s' % endpoint.is_dead_since)
endpoint.save()
except ObjectDoesNotExist:
log.warning('Not replaced at all, since there is no date before this.')
@staticmethod
def urls_with_ridiculous_number_of_endpoints(protocol="https"):
"""
Warning: this is just an indication! This does not have to be true.
Note that most SIP sites have 6 endpoints. Some provider does that.
:param protocol:
:return:
"""
from django.db.models import Count
# can't filter on annotations.
urls = Url.objects.all().filter(endpoint__protocol__exact=protocol).annotate(
count_endpoints=Count('endpoint'))
ridiculous_urls = []
for url in urls:
if url.count_endpoints > 6:
ridiculous_urls.append(url)
for ridiculous_url in ridiculous_urls:
log.debug("Ridiculous amount of endpoints on: %s" % ridiculous_url)
log.debug("You are looking for the ones that have _a lot_ of the same Is dead Since")
endpoints = Endpoint.objects.all().filter(url=ridiculous_url)
for endpoint in endpoints:
log.debug("Is dead since: %s, Endpoint: %s," %
(endpoint.is_dead_since, endpoint))
return ridiculous_urls
import ipaddress
import logging
from django.core.management.base import BaseCommand
from failmap.scanners.models import Endpoint
log = logging.getLogger(__package__)
class Command(BaseCommand):
help = 'Compress IPv6 addresses in the database.'
"""
It should only be needed to run this script once when upgrading from very early versions
of faalkaart. You probably don't need to run this anymore...
"""
# https://docs.djangoproject.com/en/1.11/howto/custom-management-commands/
def handle(self, *args, **options):
endpoints = Endpoint.objects.all()
for endpoint in endpoints:
compressed = ipaddress.ip_address(endpoint.ip).compressed
if compressed != endpoint.ip:
logging.debug("Endpoint %s" % endpoint)
logging.debug("Compressed %s to %s. Saving." % (endpoint.ip, compressed))
endpoint.ip = compressed
endpoint.save()
import logging
from django.core.management.base import BaseCommand
from failmap.scanners.models import Endpoint, Url
log = logging.getLogger(__package__)
class Command(BaseCommand):
help = 'Cleans up duiven.n.duiven.nl domains, created due to a bug.'
"""
You probably don't need to run this anymore...
"""
# https://docs.djangoproject.com/en/1.11/howto/custom-management-commands/
def handle(self, *args, **options):
endpoints = Endpoint.objects.all().filter(url__url__regex=".*\.n\..*\..*")
for endpoint in endpoints:
log.debug("Found possible weird endpoint: %s" % endpoint)
# endpoint.delete()
urls = Url.objects.all().filter(url__iregex=".*\.n\..*\..*")
for url in urls:
log.debug("Found possible weird url: %s" % url)
# url.delete()
import logging
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import BaseCommand
from failmap.scanners.models import TlsQualysScan, TlsQualysScratchpad, Url
log = logging.getLogger(__package__)
class Command(BaseCommand):
help = 'Cleans up TLS scans that have a 0 rating and no message. Usually that would be a' \
'unable to resolve domain or something like that. We correlate to unresolvable domains.'
"""
You probably don't need to run this anymore...
Non resolvable, alsways 0 scans are just nonsense: the domain just doesn't exist and it creates
false scores.
"""