Commit dddb73b1 authored by Elger Jonker's avatar Elger Jonker

lint, merge issues and overall code fixes

parent 34fc1ec7
......@@ -2,7 +2,6 @@ from django.contrib import admin
from jet.admin import CompactInline
from failmap_admin.map.determineratings import rate_url
from failmap_admin.scanners.scanner_tls_qualys import scan
from .models import (Endpoint, EndpointGenericScan, EndpointGenericScanScratchpad, Screenshot,
......
......@@ -7,7 +7,7 @@ from failmap_admin.map.determineratings import (rate_organization_efficient,
timeline)
from failmap_admin.organizations.models import Organization, Url
from failmap_admin.scanners.models import Endpoint
from failmap_admin.scanners.scanner_security_headers import scan_all_urls_celery, scan_headers
from failmap_admin.scanners.scanner_security_headers import scan as scan_headers
logger = logging.getLogger(__package__)
......@@ -85,10 +85,6 @@ def test_sslscan_real():
test_real('johnkr.com', 443)
def develop_celery_test_async_tasks():
scan_all_urls_celery()
def develop_celery_advanced():
url = Url.objects.all().filter(url='www.ibdgemeenten.nl').get()
http_endpoints = Endpoint.objects.all().filter(url=url, is_dead=False, protocol='http')
......@@ -103,11 +99,6 @@ def develop_celery_advanced():
# dispatch_scan_security_headers(endpoint)
def develop_celery():
from celery_test import add
add.delay(1, 2)
def develop_security_headers_scanner():
u = Url.objects.all().filter(url='zoeken.haarlemmermeer.nl').get()
u = Url.objects.all().filter(url='www.ibdgemeenten.nl').get()
......@@ -127,8 +118,6 @@ def develop_determineratings():
# when = datetime.now(pytz.utc)
organization = Organization.objects.filter(name="Arnhem").get()
# pyflakes clear_organization_and_urls(organization)
# pyflakes rate_organization_urls_efficient(organization, create_history=True)
# ratings are always different since we now also save last scan date.
# only creates things for near midnight. Should check if today, and then save for now.
rate_organization_efficient(organization, create_history=True)
......
import logging
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import BaseCommand
from failmap_admin.organizations.models import Organization, Url
from failmap_admin.scanners.models import Endpoint, UrlIp, \
EndpointGenericScan, TlsQualysScan, Screenshot
from failmap_admin.scanners.scanner_http import scan_url, scan_urls
from .support.arguments import add_discover_verify, add_organization_argument
from failmap_admin.scanners.models import (Endpoint, EndpointGenericScan, Screenshot, TlsQualysScan,
UrlIp)
logger = logging.getLogger(__package__)
......@@ -28,6 +22,7 @@ class Command(BaseCommand):
# scanners should work differently: ip has to be stored separately.
# life-cycle of discovery changes completely.
def move_ip_information():
"""
Step 1
......@@ -61,6 +56,7 @@ def move_ip_information():
for epip in epips:
UrlIp.objects.all().filter(url=epip.url, ip=epip.ip).exclude(id=epip.id).delete()
"""
Going back:
rm db.sqlite3
......
......@@ -69,6 +69,7 @@ def onboard_gather():
if never_onboarded.count() > 0:
cyber = """
................................................................................
.......-:////:.....:-.......::...-///////:-......://////////:..../////////:.....
...../mMMMMMMMN...NMM+.....hMMy..+MMMMMMMMMNy-...dMMMMMMMMMMMN..-MMMMMMMMMMNy...
......
......@@ -3,8 +3,8 @@ import logging
from django.core.management.base import BaseCommand
from failmap_admin.map.determineratings import rate_organization_efficient, rerate_url_with_timeline
from failmap_admin.scanners.scanner_tls_qualys import scan_task, scan
from failmap_admin.scanners.models import Url
from failmap_admin.scanners.scanner_tls_qualys import scan, scan_task
logger = logging.getLogger(__package__)
......
import logging
from datetime import datetime, timedelta
from time import sleep
import pytz
from django.core.management.base import BaseCommand
from failmap_admin.scanners.models import Endpoint
from failmap_admin.scanners.scanner_screenshot import screenshot_endpoint
from failmap_admin.scanners.scanner_screenshot import screenshots_of_new_urls
logger = logging.getLogger(__package__)
# todo: when tls scanner ends, it hangs.
# Only the latest ratings...
class Command(BaseCommand):
help = 'Create a screenshot'
help = 'Create screenshots of urls that don\'t have a screenshot yet'
def handle(self, *args, **options):
try:
while True:
Command.make_new_screenshots()
logger.info("Waiting for more endpoints to create screenshots. "
"Sleeping for 60 seconds.")
screenshots_of_new_urls()
logger.info("No more endpoints to screenshot. Waiting 60 seconds for more.")
sleep(60)
except KeyboardInterrupt:
logger.debug("ALL DONE!")
@staticmethod
def make_new_screenshots():
one_month_ago = datetime.now(pytz.utc) - timedelta(days=31)
# never had a screenshot or only has screenshots older than a month
no_screenshots = Endpoint.objects.all().filter(is_dead=False,
url__not_resolvable=False,
screenshot__isnull=True)
outdated_screenshots = Endpoint.objects.all().filter(
is_dead=False,
url__not_resolvable=False,
screenshot__created_on__lt=one_month_ago)
endpoints = list(no_screenshots) + list(outdated_screenshots)
if len(endpoints):
logger.info("Trying to make %s screenshot!" % len(endpoints))
# Chrome headless, albeit single threaded, is pretty reliable and fast for existing
# domains. This code is also the most updated. Waiting for firefox with screenshot
# support. (they use --screenshot=<path>, so that might work multithreaded)
# when only visiting existing domains (no timeouts) you'll have about 21 screenshots
# per minute. Which is pretty ok.
# todo: have a timeout of max N seconds per screenshot. Chrome doesn't have that.
# killing python process might result in a random chrome process staying alive.
# Warning: opening a browser might also mean it wants to play audio automatically(!)
# this can bring some nice surprises :)
for endpoint in endpoints:
try:
screenshot_endpoint(endpoint)
except TimeoutError:
logger.warning('Took too long to make screenshot of: %s' % endpoint)
logger.debug("Stopped. If this was killed when making screenshots: "
"please check if there are still some browsers running.")
......@@ -2,8 +2,8 @@
# Generated by Django 1.11.6 on 2017-10-30 17:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
......@@ -18,7 +18,8 @@ class Migration(migrations.Migration):
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip', models.CharField(help_text='IPv4 or IPv6 Address. Addresses have to be normalized to the compressed representation: removing as many zeros as possible. For example: IPv6: abcd:0000:0000:00fd becomes abcd::fd, or IPv4: 127.000.000.001 = 127.0.0.1', max_length=255)),
('rdns_name', models.CharField(help_text='The reverse name can be a server name, containing a provider or anything else.It might contain the name of a yet undiscovered url or hint to a service.', max_length=255)),
('rdns_name', models.CharField(
help_text='The reverse name can be a server name, containing a provider or anything else.It might contain the name of a yet undiscovered url or hint to a service.', max_length=255)),
('discovered_on', models.DateTimeField(auto_now_add=True, null=True)),
('is_unused', models.IntegerField(default=False, help_text="If the address was used in the past, but not anymore.It's possible that the same address is more than once associated with and endpoint over time, as some providersrotate a set of IP addresses.")),
('is_unused_since', models.DateTimeField(blank=True, null=True)),
......
......@@ -2,8 +2,8 @@
# Generated by Django 1.11.6 on 2017-10-30 19:59
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
......@@ -17,6 +17,7 @@ class Migration(migrations.Migration):
migrations.AddField(
model_name='endpointip',
name='url',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='organizations.Url'),
field=models.ForeignKey(blank=True, null=True,
on_delete=django.db.models.deletion.CASCADE, to='organizations.Url'),
),
]
......@@ -2,8 +2,8 @@
# Generated by Django 1.11.6 on 2017-10-30 20:42
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
......@@ -19,12 +19,14 @@ class Migration(migrations.Migration):
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip', models.CharField(help_text='IPv4 or IPv6 Address. Addresses have to be normalized to the compressed representation: removing as many zeros as possible. For example: IPv6: abcd:0000:0000:00fd becomes abcd::fd, or IPv4: 127.000.000.001 = 127.0.0.1', max_length=255)),
('rdns_name', models.CharField(help_text='The reverse name can be a server name, containing a provider or anything else.It might contain the name of a yet undiscovered url or hint to a service.', max_length=255)),
('rdns_name', models.CharField(
help_text='The reverse name can be a server name, containing a provider or anything else.It might contain the name of a yet undiscovered url or hint to a service.', max_length=255)),
('discovered_on', models.DateTimeField(auto_now_add=True, null=True)),
('is_unused', models.IntegerField(default=False, help_text="If the address was used in the past, but not anymore.It's possible that the same address is more than once associated with and endpoint over time, as some providersrotate a set of IP addresses.")),
('is_unused_since', models.DateTimeField(blank=True, null=True)),
('is_unused_reason', models.CharField(blank=True, max_length=255, null=True)),
('url', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='organizations.Url')),
('url', models.ForeignKey(blank=True, null=True,
on_delete=django.db.models.deletion.CASCADE, to='organizations.Url')),
],
),
migrations.RemoveField(
......
......@@ -85,9 +85,11 @@ class Endpoint(models.Model):
def __str__(self):
if self.is_dead:
return "✝ %s = %s IPv%s | %s/%s [%s]" % (self.domain, self.ip, self.ip_version, self.protocol, self.port, self.id)
return "✝ %s = %s IPv%s | %s/%s [%s]" % (self.domain, self.ip, self.ip_version,
self.protocol, self.port, self.id)
else:
return "%s = %s IPv%s | %s/%s [%s]" % (self.domain, self.ip, self.ip_version, self.protocol, self.port, self.id)
return "%s = %s IPv%s | %s/%s [%s]" % (self.domain, self.ip, self.ip_version,
self.protocol, self.port, self.id)
def uri_url(self):
return "%s://%s:%s" % (self.protocol, self.url.url, self.port)
......@@ -147,7 +149,6 @@ class UrlIp(models.Model):
is_unused_reason = models.CharField(max_length=255, blank=True, null=True)
class TlsQualysScan(models.Model):
"""
Model for scanner tls qualys
......
# This is going to scan DNS using well known tools.
# DNS Recon:
# The Harvester: - going to deprecated
"""
DNS Recon in some cases things all subdomains are valid, correctly, because there is always an
answer. So we're going to test if a few random domains exist and such.
Performs a range of DNS scans:
- Using Search engines
- Using Wordlists
- Using Certificate Transparency
Afterwards, we do know that a subdomain exist, but we don't know what ports give results we can
audit. We will check for TLS on 443. There are infinite possibilities.
We can check both for endpoints on http and https for the new domains. So they can be picked up by
other scanners.
This library deserves a cleanup.
todo: noordwijkerhout.nl, has a wildcard, but dnsrecon doesn't notice. Develop a wildcard detection.
Sometimes it detects it, sometimes it doesnt.
"""
# todo: if ScannerHttp.has_internet_connection():
import logging
import subprocess
......@@ -26,11 +20,6 @@ from failmap_admin.scanners.scanner_http import resolves
logger = logging.getLogger(__package__)
# todo: record that some domains have a catch all, and should not be scanned.
# the catch all is sometimes not detected by dnsrecon
harvester_path = settings.TOOLS['theHarvester']['executable']
dnsrecon_path = settings.TOOLS['dnsrecon']['executable']
......@@ -60,12 +49,8 @@ wordlists = {
}
}
# todo: make a "tool" dir, or something so the harvester and such are always available.
# todo: if ScannerHttp.has_internet_connection():
# todo: move this to url logic / url manager. The resolves bit is somewhat in the way. Refactor
def add_subdomain(subdomain, url):
fulldomain = subdomain + "." + url.url
logger.debug("Trying to add subdomain to database: %s" % fulldomain)
......@@ -172,8 +157,6 @@ def search_engines_scan(url):
addedlist.append(added)
return addedlist
# todo: also include censys, google and let's encrypt( if has one )
def certificate_transparency(urls):
"""
......@@ -226,9 +209,9 @@ def certificate_transparency(urls):
return addedlist
# todo: very ugly parsing, should be just reading the XML output.
def subdomains_harvester(url):
# deprecated
# todo: very ugly parsing, should be just reading the XML output.
# python theHarvester.py -d zutphen.nl -b google -l 100
engine = "google"
process = subprocess.Popen(['python', harvester_path,
......@@ -304,23 +287,22 @@ def create_nonsense():
text_file.write(word + '\n')
def organization_brute_dutch(self, organization):
def organization_brute_dutch(organization):
urls = topleveldomains(organization)
wordlist = wordlists["dutch_basic"]["path"]
return dnsrecon_brute(urls, wordlist)
def organization_brute_threeletters(self, organization):
def organization_brute_threeletters(organization):
urls = topleveldomains(organization)
wordlist = wordlists["three_letters"]["path"]
return dnsrecon_brute(urls, wordlist)
# hundreds of words
# todo: language matters, many of the NL subdomains don't make sense in other countries.
# todo: don't use the subdomains that are already known to exist.
def organization_brute_knownsubdomains(self, organization):
def organization_brute_knownsubdomains(organization):
update_wordlist_known_subdomains()
urls = topleveldomains(organization)
wordlist = wordlists["known_subdomains"]["path"]
......@@ -333,7 +315,7 @@ def brute_known_subdomains(urls):
return dnsrecon_brute(urls, wordlist)
def organization_standard_scan(self, organization):
def organization_standard_scan(organization):
urls = Url.objects.all().filter(organization=organization,
url__iregex="^[^.]*\.[^.]*$")
return dnsrecon_default(urls)
......@@ -402,10 +384,9 @@ def dnsrecon_default(urls):
return imported_urls
# This helps to determine at database level if the DNS uses wildcards, so it can be dealt
# with in another way.
def topleveldomains(organization):
# todo: move to manager, expand the queryset with the uses dns wildcard.
topleveldomains = Url.objects.all().filter(organization=organization,
......@@ -415,7 +396,7 @@ def topleveldomains(organization):
non_wildcard_toplevel_domains = []
# inspect if the url employs wildcards. If so, register it and make it a point of
# interest for people to test this by hand (or more advanced scanners)
create_nonsense() # Get some random stuff.
for url in topleveldomains:
if url_uses_wildcards(url):
logger.info("Domain %s uses wildcards, DNS brute force not possible" % url.url)
......@@ -431,6 +412,12 @@ def topleveldomains(organization):
def url_uses_wildcards(url):
"""
We need to perform a check ourselves, since we cannot get from the DNSRecon report if the url
uses wildcards. We store this ourselves so we can better filter domains.
In some cases DNSrecon makes a wrong assumption about wildcard usage. This is hopefully a bit better.
"""
logger.debug("Checking for DNS wildcards on domain: %s" % url.url)
file = "%s_data_wildcards.json" % url.url
path = settings.TOOLS['dnsrecon']['output_dir'] + file
......@@ -438,6 +425,7 @@ def url_uses_wildcards(url):
logger.debug("DNS results will be stored in file: %s" % path)
# never continue with wildcard domains
create_nonsense()
p = subprocess.Popen(['python', dnsrecon_path,
'--domain', url.url,
'-t', 'brt',
......@@ -460,7 +448,7 @@ def url_uses_wildcards(url):
return wildcard
def dnsrecon_google(url):
def dnsrecon_google():
raise NotImplemented
# todo: make this new manual scan.
# requires: netaddr, dnspython
......@@ -470,9 +458,6 @@ def dnsrecon_google(url):
# Brute force op DNS:
# python dnsrecon.py --domain amsterdam.nl -j output_brt.json
return
# todo: also perform basic endpoint scans for new subdomains
def import_dnsrecon_report(url, path):
......
......@@ -25,9 +25,8 @@ Likely: 80, 8080, 8008, 8888, 8088
"""
import logging
from datetime import datetime
import socket
from .timeout import timeout
from datetime import datetime
import pytz
import requests
......@@ -37,6 +36,7 @@ from requests.exceptions import ConnectionError
from failmap_admin.celery import app
from .models import Endpoint, UrlIp
from .timeout import timeout
logger = logging.getLogger(__package__)
......@@ -200,7 +200,7 @@ def can_connect(protocol, url, port):
- BadStatusLine
- CertificateError
- certificate verify failed
Perhaps: (todo)
- EOF occurred in violation of protocol
"""
......
"""
Check if a domain is only reachable on plain http, instead of both http and https
Check if a domain is only reachable on plain http, instead of both http and https.
Browsers first connect to http, not https when entering a domain. That will be changed in the
future.
Browsers first connect to http, not https when entering a domain. That will be changed in the future.
Further reading:
https://stackoverflow.com/questions/20475552/python-requests-library-redirect-new-url#20475712
"""
import logging
from celery import group
from failmap_admin.scanners.scanner_http import scan_urls as scanner_http_scan_urls
from failmap_admin.organizations.models import Url
from failmap_admin.scanners.endpoint_scan_manager import EndpointScanManager
from failmap_admin.scanners.scanner_http import scan_urls as scanner_http_scan_urls
from ..celery import app
from .models import Endpoint
......@@ -57,11 +60,14 @@ def scan_url(url):
has_https = False
http_endpoints = []
# The default ports matter for normal humans. All services on other ports are special services.
# we only give points if there is not a normal https site when there is a normal http site.
# todo: ip_version is relevant here.
for endpoint in endpoints:
if endpoint.protocol == "http":
if endpoint.protocol == "http" and endpoint.port == 80:
has_http = True
http_endpoints.append(endpoint)
if endpoint.protocol == "https":
if endpoint.protocol == "https" and endpoint.port == 443:
has_https = True
# calculate the score
......@@ -122,6 +128,7 @@ def scan_url(url):
def verify_is_secure(url):
# i've seen qualys saying there is no TLS, while there is!
# This _might_ revive an endpoint.
scanner_http_scan_urls([url], [443], ['https'])
endpoints = Endpoint.objects.all().filter(url=url, is_dead=False,
......@@ -132,8 +139,6 @@ def verify_is_secure(url):
logger.debug("Url is still not secure: %s" % url)
return False
# https://stackoverflow.com/questions/20475552/python-requests-library-redirect-new-url#20475712
def redirects_to_safety(url):
import requests
......
......@@ -8,7 +8,7 @@ import pytz
import untangle
from django.conf import settings
from celery_test import app
from failmap_admin.celery import app
from failmap_admin.scanners.models import Endpoint
from failmap_admin.scanners.timeout import timeout
......
......@@ -36,17 +36,17 @@ from django.core.exceptions import ObjectDoesNotExist
from failmap_admin.map.determineratings import rate_organization_efficient, rerate_url_with_timeline
from failmap_admin.organizations.models import Url
from failmap_admin.scanners.models import Endpoint, TlsQualysScan, TlsQualysScratchpad, EndpointGenericScan
from failmap_admin.scanners.state_manager import StateManager
from failmap_admin.scanners.models import (Endpoint, EndpointGenericScan, TlsQualysScan,
TlsQualysScratchpad)
from failmap_admin.scanners.scanner_http import store_url_ips
from failmap_admin.scanners.state_manager import StateManager
from ..celery import app
log = logging.getLogger(__name__)
def scan(urls: List[Url]):
def scan_url_list(urls: List[Url]):
urls = external_service_task_rate_limit(urls)
log.debug("Domains to scan: %s", len(urls))
......@@ -105,7 +105,7 @@ def scan_task(url):
"""
While the documentation says to check every 10 seconds, we'll do that between every
20 to 25, simply because it matters very little when scans are ran parralel.
20 to 25, simply because it matters very little when scans are ran parralel.
https://github.com/ssllabs/ssllabs-scan/blob/stable/ssllabs-api-docs.md
"""
sleep(20 + randint(0, 5)) # don't pulsate.
......@@ -445,9 +445,9 @@ def endpoints_alive_in_past_24_hours(url):
endpoint__protocol__in=["https"],
scan_date__gt=date.today() - timedelta(1)).exists()
if x:
log.debug("Scanned in past 24 hours: yes %s", url.url)
log.debug("Scanned in past 24 hours: yes: %s", url.url)
else:
log.debug("Scanned in past 24 hours: no %s", url.url)
log.debug("Scanned in past 24 hours: no : %s", url.url)
return x
......
......@@ -17,7 +17,7 @@ from freezegun import freeze_time
from httmock import HTTMock, response
from failmap_admin.scanners.models import Endpoint, TlsQualysScan
from failmap_admin.scanners.scanner_tls_qualys import ScannerTlsQualys
from failmap_admin.scanners.scanner_tls_qualys import scan
try:
# Python 3
......@@ -144,9 +144,7 @@ def test_tls_scan_qualys_sample_result(db):
announce_testcase(1, "Creating a new scan, where everything has to go right.")
with freeze_time('2000-1-1', tick=True, tz_offset=1):
with HTTMock(qualys_mock_a):
s = ScannerTlsQualys()
s.rate_limit = False
s.scan(["www.faalkaart.nl"])
scan(["www.faalkaart.nl"])
assert Endpoint.objects.filter(domain="www.faalkaart.nl").count() == 2 # ipv4 + ipv6
assert TlsQualysScan.objects.filter(qualys_rating="A").count() == 2
......@@ -154,9 +152,7 @@ def test_tls_scan_qualys_sample_result(db):
"or scan results. Scan results would only be updated after 24h.")
with freeze_time('2000-1-3', tick=True, tz_offset=1):
with HTTMock(qualys_mock_a):
s = ScannerTlsQualys()
s.rate_limit = False
s.scan(["www.faalkaart.nl"])
scan(["www.faalkaart.nl"])
assert Endpoint.objects.filter(domain="www.faalkaart.nl").count() == 2
assert TlsQualysScan.objects.filter(qualys_rating="A").count() == 2
......@@ -165,9 +161,7 @@ def test_tls_scan_qualys_sample_result(db):
"should increase as only changes are recorded.")
with freeze_time('2000-1-5', tick=True, tz_offset=1):
with HTTMock(qualys_mock_b):
s = ScannerTlsQualys()
s.rate_limit = False
s.scan(["www.faalkaart.nl"])
scan(["www.faalkaart.nl"])
assert Endpoint.objects.filter(domain="www.faalkaart.nl").count() == 2
assert TlsQualysScan.objects.filter(qualys_rating="A").count() == 2
assert TlsQualysScan.objects.filter(qualys_rating="B").count() == 2
......@@ -176,9 +170,7 @@ def test_tls_scan_qualys_sample_result(db):
"scan is dismissed")
with freeze_time('2000-1-5', tick=True, tz_offset=1):
with HTTMock(qualys_mock_c):
s = ScannerTlsQualys()
s.rate_limit = False
s.scan(["www.faalkaart.nl"])
scan(["www.faalkaart.nl"])
assert Endpoint.objects.filter(domain="www.faalkaart.nl").count() == 2
assert TlsQualysScan.objects.filter(qualys_rating="A").count() == 2
assert TlsQualysScan.objects.filter(qualys_rating="B").count() == 2
......@@ -187,9 +179,7 @@ def test_tls_scan_qualys_sample_result(db):
announce_testcase(5, "Verify that it's possible to scan multiple domains.")
with freeze_time('2000-1-7', tick=True, tz_offset=1):
with HTTMock(qualys_mirror):
s = ScannerTlsQualys()
s.rate_limit = False
s.scan(["www.faalkaart.nl", "www.elgerjonker.nl", "www.nu.nl"])
scan(["www.faalkaart.nl", "www.elgerjonker.nl", "www.nu.nl"])
assert Endpoint.objects.filter(domain="www.faalkaart.nl").count() == 2
assert Endpoint.objects.filter(domain="www.elgerjonker.nl").count() == 2
......@@ -205,9 +195,7 @@ def test_tls_scan_qualys_sample_result(db):
Counter().reset()
with freeze_time('2000-1-9', tick=True, tz_offset=1):
with HTTMock(qualys_realistic_scan):
s = ScannerTlsQualys()
s.rate_limit = False
s.scan(["www.faalkaart.nl"])
scan(["www.faalkaart.nl"])
# no update on the rating, so no new scans.
assert TlsQualysScan.objects.filter(qualys_rating="A").count() == 8
......@@ -218,9 +206,7 @@ def test_tls_scan_qualys_sample_result(db):
Counter().reset()
with freeze_time('2000-1-11', tick=True, tz_offset=1):
with HTTMock(qualys_error_scan):
s = ScannerTlsQualys()
s.rate_limit = False
s.scan(["www.faalkaart.nl"])
scan(["www.faalkaart.nl"])
# no update on the rating, so no new scan.
# the endpoints should now be set to dead...
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment