diff --git a/frontend/asfui/app/management/commands/external_data.py b/frontend/asfui/app/management/commands/external_data.py new file mode 100644 index 00000000..d1152a38 --- /dev/null +++ b/frontend/asfui/app/management/commands/external_data.py @@ -0,0 +1,216 @@ +#!/usr/bin/python3 +from django.core.management.base import BaseCommand, CommandError +from datetime import date, datetime +from django.utils import timezone +from cProfile import label +from app.models import vdTarget, vdInTarget, vdResult, vdServices, vdInServices, vdRegExp, vdJob + +from os import path +import sys +import subprocess +import os +import re +import argparse +import csv +import json +from urllib.request import localhost +from app.tools import autodetectType, delta +from app.targets import internal_delete + +#Static and Global Declarations + +def parser_default(PARSER_INPUT, PARSER_OUTPUT, vdZone, tag, mode, owner): + for DATA in PARSER_INPUT: + debug(str(DATA)) + return + +def parser_vmw_csv(PARSER_INPUT, PARSER_OUTPUT, vdZone, tag, mode, owner): + #,Public IP,OwnerEmail,Account ID,ServiceName,Environment,RequestedBy + CSV_READER = csv.reader(PARSER_INPUT, delimiter=',') + IgnoreTheFirstLine = True + for DATA in CSV_READER: + debug(str(DATA)+"\n") + debug("\tDomain:"+DATA[1]+"\n") + debug("\t\tOwners:"+DATA[2]+","+DATA[6]+"\n") + debug("\t\tEnvironment:"+DATA[5]+"\n") + debug("\t\tDescription:"+DATA[4]+"\n") + UNIT = {} + UNIT['owner'] = DATA[2]+","+DATA[6] + UNIT['accountid'] = DATA[3] + UNIT['environment'] = DATA[5] + UNIT['tag'] = tag + UNIT['domain'] = DATA[1] + UNIT['description'] = DATA[4] + if not IgnoreTheFirstLine: + JSON_LINE = json.dumps(UNIT) + PARSER_OUTPUT.write(JSON_LINE+"\n") + else: + IgnoreTheFirstLine = False + return + +def parser_vmw_csvfd(PARSER_INPUT, PARSER_OUTPUT, vdZone, tag, mode, owner): + #,accountID,entityId,PublicIP,OwnerEmail,ServiceName,Environment,RequestedBy + CSV_READER = csv.reader(PARSER_INPUT, delimiter=',') + IgnoreTheFirstLine = True + for DATA in CSV_READER: + debug(str(DATA)+"\n") + debug("\tIPAddr:"+DATA[3]+"\n") + debug("\t\tOwners:"+DATA[4]+","+DATA[7]+"\n") + debug("\t\tEnvironment:"+DATA[6]+"\n") + debug("\t\tDescription:"+DATA[5]+"\n") + UNIT = {} + UNIT['owner'] = DATA[4]+","+DATA[7] + UNIT['accountid'] = DATA[1] + UNIT['environment'] = DATA[6] + UNIT['tag'] = tag + UNIT['domain'] = DATA[3] + UNIT['description'] = DATA[5] + if not IgnoreTheFirstLine: + JSON_LINE = json.dumps(UNIT) + PARSER_OUTPUT.write(JSON_LINE+"\n") + else: + IgnoreTheFirstLine = False + return + +def parser_crt_sh(PARSER_INPUT, PARSER_OUTPUT, vdZone, tag, mode, owner): + for DATA in PARSER_INPUT: + DATA=DATA.rstrip() + debug("\tDomain:"+DATA+"\n") + debug("\t\tOwners:"+owner+"\n") + debug("\t\tTag:"+tag+"\n") + debug("\t\tDescription: Discovery from Crt.sh\n") + UNIT = {} + UNIT['owner'] = owner + UNIT['tag'] = tag + UNIT['domain'] = DATA + UNIT['description'] = "Discovery from Crt.sh" + JSON_LINE = json.dumps(UNIT) + PARSER_OUTPUT.write(JSON_LINE+"\n") + return + +def parser_vmw_json(PARSER_INPUT, PARSER_OUTPUT, vdZone, tag, mode, owner): + JSON_DATA = PARSER_INPUT.read() + JSON_DATA = json.loads(JSON_DATA) + for SECTION in JSON_DATA: + debug(str(SECTION)+"\n") + for DATA in JSON_DATA[SECTION]: + debug("\t"+str(DATA)+"\n") + debug("\t\tOwner:["+str(JSON_DATA[SECTION][1])+"]\n") + debug("\t\tEnvironment:["+str(JSON_DATA[SECTION][2])+"]\n") + for DOMAIN in JSON_DATA[SECTION][0]: + DOMAIN = DOMAIN[:-1] + debug("\t\tDomain:["+str(DOMAIN)+"]\n") + UNIT = {} + UNIT['owner'] = str(JSON_DATA[SECTION][1]) + UNIT['accountid'] = str(SECTION) + UNIT['environment'] = str(JSON_DATA[SECTION][2]) + UNIT['tag'] = tag + #The following line, removes the special char by an asterisk, but.... needed?? + #UNIT['domain'] = DOMAIN.replace("\\052", "*") + #Amass just requires the top domain, having a wildcard means we need to search for more findings? + #What if the search engine finds unknown owners by removing parts of a subdomain. + UNIT['domain'] = DOMAIN.replace("\\052.", "") + JSON_LINE = json.dumps(UNIT) + PARSER_OUTPUT.write(JSON_LINE+"\n") + return + +def parser_vmw_jsonl(PARSER_INPUT, PARSER_OUTPUT, vdZone, tag, mode, owner): + vdServicesModel = vdInServices + vdTargetModel = vdInTarget + if vdZone == "internal" or vdZone == "external": + if vdZone == "external": + vdServicesModel = vdServices + vdTargetModel = vdTarget + else: + debug("Error, vdZone:'"+str(vdZone)+"' not defined, breaking\n") + sys.exit(-1) + +# for line in PARSER_INPUT: +# JSONL = json.loads(line) +# debug("JSONL:"+str(JSONL)+"\n") + target_manager(vdTargetModel, vdServicesModel, PARSER_INPUT, vdZone, tag, mode) + +#Here is the global declaration of parsers, functions can be duplicated +action={'default':parser_default, 'vmw.csv':parser_vmw_csv, 'vmw.csvfd':parser_vmw_csvfd, 'vmw.json':parser_vmw_json, 'vmw.jsonl':parser_vmw_jsonl, 'crt.sh':parser_crt_sh} + +def parseLines(PARSER_INPUT, PARSER_OUTPUT, vdZone, parser, tag, mode, owner): + return action[parser](PARSER_INPUT, PARSER_OUTPUT, vdZone, tag, mode, owner) + +PARSER_DEBUG=False +def debug(text): + if PARSER_DEBUG: + sys.stderr.write(str(text)) + return + +class Command(BaseCommand): + help = 'Processes Worker Scans' + def add_arguments(self, parser): + #This single module reads the input file and convert it into + parser.add_argument('--input', help='The input file, if not provided stdin is used', default='stdin') + parser.add_argument('--mode', help='The action for targets/findings merge|delete|deletebytag|sync', default='merge') + parser.add_argument('--tag', help='Assign tag for targets [EXTERNAL]', default='EXTERNAL') + parser.add_argument('--output', help='The output File filename|stdout', default='stdout') + parser.add_argument('--vdzone', help='The output vdZone internal|external', default='error') + parser.add_argument('--parser', help='The parser algorithm vmw[.csv|.json]', default='default') + parser.add_argument('--owner', help='Default email user@domain', default='user@domain') + parser.add_argument('--debug', help='Print verbose data', action='store_true', default=False) + + def handle(self, *args, **kwargs): + PARSER_INPUT = sys.stdin + global PARSER_DEBUG + PARSER_DEBUG = kwargs['debug'] + PARSER_OUTPUT = sys.stdout + + debug(str(kwargs)+"\n") + + if kwargs['parser'] not in action: + PARSER_DEBUG = True + debug("Parser:"+kwargs['parser']+" not found in action declaration:"+str(action)+"\n") + sys.exit() + + if kwargs['input'] != "stdin": + PARSER_INPUT = open(kwargs['input'],'r') + debug("Input: "+kwargs['input']+"\n") + else: + debug("Input: STDIN \n") + if kwargs['output'] != "stdout": + PARSER_OUTPUT = open(kwargs['output'],'w+') + debug("Output: "+kwargs['output']+"\n") + else: + debug("Output: STDOUT \n") + + parseLines(PARSER_INPUT, PARSER_OUTPUT, kwargs['vdzone'], kwargs['parser'], kwargs['tag'], kwargs['mode'], kwargs['owner']) + debug("\n") + +def target_manager(vdTargetModel, vdServicesModel, JSONL_FILE, vdZone, Tag, WorkMode): + tz = timezone.get_current_timezone() + LastDate = datetime.now().replace(tzinfo=tz) + for JSONL_DATA in JSONL_FILE: + data = json.loads(JSONL_DATA) + domain = data['domain'] + Type = autodetectType(domain) + debug("JSONL Data:"+str(data)+"\n") + try: + Answer = vdTargetModel.objects.update_or_create(name=domain, defaults={'type': Type, 'tag':Tag, 'lastdate': LastDate, 'owner': data['owner'], 'metadata': JSONL_DATA}) + if Answer[1]: + debug("[Alerting about new object]") + MSG = data + MSG['message'] = "[NEW][OBJECT INTO "+vdZone.upper()+" TARGET DATABASE]" + MSG['type'] = Type + MSG['name'] = domain + MSG['lastupdate'] = str(LastDate) + delta(MSG) + except Exception as e: + sys.stderr.write(str(e)+"Error Target, Skipping:"+str(data)+"\n") + + sys.stderr.write("WorkMode:"+WorkMode+"\n") + if WorkMode != 'merge': + if WorkMode == 'sync': + DeleteTarget = vdTargetModel.objects.filter(tag=Tag).filter(lastdate__lt=LastDate) + if WorkMode == 'delete': + #The equals command in filter, does not work for datetimes, so we use __gte instead + DeleteTarget = vdTargetModel.objects.filter(tag=Tag).filter(lastdate__gte=LastDate) + if WorkMode == 'deletebytag': + DeleteTarget = vdTargetModel.objects.filter(tag=Tag) + internal_delete(vdTargetModel,vdServicesModel,DeleteTarget,autodetectType,delta) + diff --git a/frontend/asfui/app/management/commands/external_discovery.py b/frontend/asfui/app/management/commands/external_discovery.py new file mode 100644 index 00000000..d8ba89dd --- /dev/null +++ b/frontend/asfui/app/management/commands/external_discovery.py @@ -0,0 +1,176 @@ +#!/usr/bin/python3 +from django.core.management.base import BaseCommand, CommandError +from datetime import date, datetime +from django.utils import timezone +from cProfile import label +from app.models import vdTarget, vdInTarget, vdResult, vdServices, vdInServices, vdRegExp, vdJob + +from os import path +import sys +import subprocess +import os +import re +import argparse +import csv +import json +from urllib.request import localhost +from app.tools import autodetectType, delta +from app.discovery import internal_delete + +#Static and Global Declarations + +def parser_default(PARSER_INPUT, PARSER_OUTPUT, tag, mode, owner): + for DATA in PARSER_INPUT: + debug(str(DATA)) + return + +def parser_vmw_csv(PARSER_INPUT, PARSER_OUTPUT, tag, mode, owner): + CSV_READER = csv.reader(PARSER_INPUT, delimiter=',') + IgnoreTheFirstLine = True + for DATA in CSV_READER: + debug(str(DATA)+"\n") + debug("\tDomain:"+DATA[1]+"\n") + debug("\t\tOwners:"+DATA[2]+","+DATA[6]+"\n") + debug("\t\tTag:"+DATA[5]+"\n") + debug("\t\tDescription:"+DATA[4]+"\n") + UNIT = {} + UNIT['owner'] = DATA[2]+","+DATA[6] + UNIT['accountid'] = DATA[3] + UNIT['tag'] = DATA[5] + UNIT['domain'] = DATA[1] + UNIT['description'] = DATA[4] + if not IgnoreTheFirstLine: + JSON_LINE = json.dumps(UNIT) + PARSER_OUTPUT.write(JSON_LINE+"\n") + else: + IgnoreTheFirstLine = False + return + +def parser_crt_sh(PARSER_INPUT, PARSER_OUTPUT, tag, mode, owner): + for DATA in PARSER_INPUT: + DATA=DATA.rstrip() + debug("\tDomain:"+DATA+"\n") + debug("\t\tOwners:"+owner+"\n") + debug("\t\tTag:"+tag+"\n") + debug("\t\tDescription: Discovery from Crt.sh\n") + UNIT = {} + UNIT['owner'] = owner + UNIT['tag'] = tag + UNIT['domain'] = DATA + UNIT['description'] = "Discovery from Crt.sh" + JSON_LINE = json.dumps(UNIT) + PARSER_OUTPUT.write(JSON_LINE+"\n") + return + +def parser_vmw_json(PARSER_INPUT, PARSER_OUTPUT, tag, mode, owner): + JSON_DATA = PARSER_INPUT.read() + JSON_DATA = json.loads(JSON_DATA) + for SECTION in JSON_DATA: + debug(str(SECTION)+"\n") + for DATA in JSON_DATA[SECTION]: + debug("\t"+str(DATA)+"\n") + debug("\t\tOwner:["+str(JSON_DATA[SECTION][1])+"]\n") + debug("\t\tTag:["+str(JSON_DATA[SECTION][2])+"]\n") + for DOMAIN in JSON_DATA[SECTION][0]: + DOMAIN = DOMAIN[:-1] + debug("\t\tDomain:["+str(DOMAIN)+"]\n") + UNIT = {} + UNIT['owner'] = str(JSON_DATA[SECTION][1]) + UNIT['accountid'] = str(SECTION) + UNIT['tag'] = str(JSON_DATA[SECTION][2]) + #The following line, removes the special char by an asterisk, but.... needed?? + #UNIT['domain'] = DOMAIN.replace("\\052", "*") + #Amass just requires the top domain, having a wildcard means we need to search for more findings? + #What if the search engine finds unknown owners by removing parts of a subdomain. + UNIT['domain'] = DOMAIN.replace("\\052.", "") + JSON_LINE = json.dumps(UNIT) + PARSER_OUTPUT.write(JSON_LINE+"\n") + return + +def parser_vmw_jsonl(PARSER_INPUT, PARSER_OUTPUT, tag, mode, owner): + target_manager(vdTargetModel, vdServicesModel, PARSER_INPUT, vdZone, tag, mode) + +#Here is the global declaration of parsers, functions can be duplicated +action={'default':parser_default, 'vmw.csv':parser_vmw_csv, 'vmw.json':parser_vmw_json, 'vmw.jsonl':parser_vmw_jsonl, 'crt.sh':parser_crt_sh} + +def parseLines(PARSER_INPUT, PARSER_OUTPUT, vdZone, parser, tag, mode, owner): + return action[parser](PARSER_INPUT, PARSER_OUTPUT, vdZone, tag, mode, owner) + +PARSER_DEBUG=False +def debug(text): + if PARSER_DEBUG: + sys.stderr.write(str(text)) + return + +class Command(BaseCommand): + help = 'Processes Worker Scans' + def add_arguments(self, parser): + #This single module reads the input file and convert it into + parser.add_argument('--input', help='The input file, if not provided stdin is used', default='stdin') + parser.add_argument('--mode', help='The action for targets/findings merge|delete|deletebytag|sync', default='merge') + parser.add_argument('--tag', help='Assign tag for targets [EXTERNAL]', default='EXTERNAL') + parser.add_argument('--output', help='The output File filename|stdout', default='stdout') + parser.add_argument('--vdzone', help='The output vdZone internal|external', default='error') + parser.add_argument('--parser', help='The parser algorithm vmw[.csv|.json]', default='default') + parser.add_argument('--owner', help='Default email user@domain', default='user@domain') + parser.add_argument('--debug', help='Print verbose data', action='store_true', default=False) + + def handle(self, *args, **kwargs): + PARSER_INPUT = sys.stdin + global PARSER_DEBUG + PARSER_DEBUG = kwargs['debug'] + PARSER_OUTPUT = sys.stdout + + debug(str(kwargs)+"\n") + + if kwargs['parser'] not in action: + PARSER_DEBUG = True + debug("Parser:"+kwargs['parser']+" not found in action declaration:"+str(action)+"\n") + sys.exit() + + if kwargs['input'] != "stdin": + PARSER_INPUT = open(kwargs['input'],'r') + debug("Input: "+kwargs['input']+"\n") + else: + debug("Input: STDIN \n") + if kwargs['output'] != "stdout": + PARSER_OUTPUT = open(kwargs['output'],'w+') + debug("Output: "+kwargs['output']+"\n") + else: + debug("Output: STDOUT \n") + + parseLines(PARSER_INPUT, PARSER_OUTPUT, kwargs['vdzone'], kwargs['parser'], kwargs['tag'], kwargs['mode'], kwargs['owner']) + debug("\n") + +def discovery_manager(JSONL_FILE, Tag, WorkMode): + tz = timezone.get_current_timezone() + LastDate = datetime.now().replace(tzinfo=tz) + for JSONL_DATA in JSONL_FILE: + data = json.loads(JSONL_DATA) + domain = data['domain'] + Type = autodetectType(domain) + debug("JSONL Data:"+str(data)+"\n") + try: + Answer = vdResult.objects.update_or_create(name=domain, defaults={'type': Type, 'tag':Tag, 'lastdate': LastDate, 'owner': data['owner'], 'metadata': JSONL_DATA}) + if Answer[1]: + debug("[Alerting about new object]") + MSG = data + MSG['message'] = "[NEW][OBJECT INTO "+vdZone.upper()+" TARGET DATABASE]" + MSG['type'] = Type + MSG['name'] = domain + MSG['lastupdate'] = str(LastDate) + delta(MSG) + except Exception as e: + sys.stderr.write(str(e)+"Error Target, Skipping:"+str(data)+"\n") + + sys.stderr.write("WorkMode:"+WorkMode+"\n") + if WorkMode != 'merge': + if WorkMode == 'sync': + DeleteTarget = vdResult.objects.filter(tag=Tag).filter(lastdate__lt=LastDate) + if WorkMode == 'delete': + #The equals command in filter, does not work for datetimes, so we use __gte instead + DeleteTarget = vdResult.objects.filter(tag=Tag).filter(lastdate__gte=LastDate) + if WorkMode == 'deletebytag': + DeleteTarget = vdResult.objects.filter(tag=Tag) + internal_discovery_delete(DeleteTarget,autodetectType,delta) + diff --git a/frontend/asfui/app/management/commands/nucleialert.py b/frontend/asfui/app/management/commands/nucleialert.py index 0f7612d7..7b1d7b8c 100644 --- a/frontend/asfui/app/management/commands/nucleialert.py +++ b/frontend/asfui/app/management/commands/nucleialert.py @@ -84,10 +84,29 @@ def nuclei_blacklist_save(context={}): for template in TEMPLATES: IGNORE+=' - '+template+'\n' BLF.write(IGNORE) + BLF.close() + return + +def nuclei_config_save(context={}): + debug("Calling nuclei templates blacklist save in configuration\n") + TEMPLATES=get_nuclei_templates_4bl() + debug(str(TEMPLATES)) + BLF=open(context['configoutput'],'w+') + NTMPLT=open(context['configtemplate'],'r') + IGNORE=NTMPLT.read() + NTMPLT.close() + EXCLUDE_HEADER=False + for template in TEMPLATES: + if not EXCLUDE_HEADER: + EXCLUDE_HEADER=True + IGNORE+='\nexclude-templates: # Template based exclusion\n' + IGNORE+=' - '+template+'\n' + BLF.write(IGNORE) + BLF.close() return #Here is the global declaration of parsers, functions can be duplicated -action={'default':alert_duedate, 'alert.duedate':alert_duedate, 'clean':nuclei_clean, 'purge':nuclei_purge, 'templates':nuclei_templates, 'blacklist':nuclei_blacklist, 'blacklist.save':nuclei_blacklist_save} +action={'default':alert_duedate, 'alert.duedate':alert_duedate, 'clean':nuclei_clean, 'purge':nuclei_purge, 'templates':nuclei_templates, 'blacklist':nuclei_blacklist, 'blacklist.save':nuclei_blacklist_save, 'config.save':nuclei_config_save} class Command(BaseCommand): help = 'Processes Worker Scans' @@ -95,9 +114,11 @@ def add_arguments(self, parser): #This single module reads the input file and convert it into #parser.add_argument('--input', help='The input file, if not provided stdin is used', default='stdin') #parser.add_argument('--output', help='The output JobID:ID', default='error') - parser.add_argument('--mode', help='The algorithm [default(alert.duedate)|clean, purge, templates, blacklist] for reviewing the findings and alert for not attended', default='default') + parser.add_argument('--mode', help='The algorithm [default(alert.duedate)|clean, purge, templates, blacklist[.save]], config.save for reviewing the findings and alert for not attended', default='default') parser.add_argument('--templatesdir', help='The template directory, default /home/nuclei-templates', default="/home/nuclei-templates") parser.add_argument('--templatesignorefile', help='The template directory, default /home/nuclei-templates/.nuclei-ignore', default="/home/nuclei-templates/.nuclei-ignore") + parser.add_argument('--configtemplate', help='The template for default configuration, default /opt/asf/redteam/nuclei/config.yaml', default="/opt/asf/redteam/nuclei/config.yaml") + parser.add_argument('--configoutput', help='The file to store with the exclusions, default /home/asf/nuclei-config.yaml', default="/home/asf/nuclei-config.yaml") parser.add_argument('--debug', help='Print verbose data', action='store_true', default=False) def handle(self, *args, **kwargs): diff --git a/frontend/asfui/app/management/commands/owners_tools.py b/frontend/asfui/app/management/commands/owners_tools.py new file mode 100644 index 00000000..a9771d60 --- /dev/null +++ b/frontend/asfui/app/management/commands/owners_tools.py @@ -0,0 +1,176 @@ +#!/usr/bin/python3 +#VER:1 +from django.core.management.base import BaseCommand, CommandError +from django.utils import timezone +from cProfile import label +from app.models import vdTarget, vdResult, vdServices, vdInServices, vdRegExp, vdJob, vdNucleiResult + +from os import path +import sys +import subprocess +import os +import re +import argparse +import csv +import json +from urllib.request import localhost +from app.tools import * +from app.nuclei import * + + +def merge_csv(context={}): + debug("Calling Join CSV\n"+str(context)+"\n") + FILES=context['input'].split(",") + ODESC=sys.stdout + if 'output' in context and context['output'] != 'stdout': + ODESC=open(context['output'],"w+") + + for FILE in FILES: + FDESC=None + if FILE=="stdin": + FDESC=sys.stdin + else: + FDESC=open(FILE,"+r") + DISCARD1ST = True + for line in FDESC: + if DISCARD1ST: + DISCARD1ST=False + else: + ODESC.write(line) + + +def merge_ip(context={}): + debug("Calling Merge IP\n"+str(context)+"\n") + FILES=context['input'].split(",") + ODESC=sys.stdout + if 'output' in context and context['output'] != 'stdout': + ODESC=open(context['output'],"w+") + + for FILE in FILES: + if Path(FILE).is_file(): + FDESC=None + if FILE=="stdin": + FDESC=sys.stdin + else: + FDESC=open(FILE,"+r") + DISCARD1ST = True + for line in FDESC: + if DISCARD1ST: + DISCARD1ST=False + else: + ip=line.split(",")[3] + ODESC.write(ip+"\n") + return + +def slice_list(context): + debug("Slice list\n"+str(context)+"\n") + FILES=context['input'].split(",") + OUTPUT_COUNTER=0 + ODESC=sys.stdout + ODESC_IPONLY=sys.stdout + IP_CACHE=[] + def cycle_output(counter,context): + if 'output' in context and context['output'] != 'stdout': + BASE_NAME=context['output']+"."+str(counter).zfill(8) + return open(BASE_NAME+".list","w+"), open(BASE_NAME+".ip","w+") + else: + return sys.stdout + + ODESC, ODESC_IPONLY=cycle_output(OUTPUT_COUNTER,context) + PARTIAL_COUNTER=0 + MAX_COUNTER=int(context['size']) + for FILE in FILES: + FDESC=None + if FILE=="stdin": + FDESC=sys.stdin + else: + FDESC=open(FILE,"+r") + + for line in FDESC: + PARTIAL_COUNTER+=1 + if PARTIAL_COUNTER>=MAX_COUNTER: + ODESC.close() + ODESC_IPONLY.close() + OUTPUT_COUNTER+=1 + ODESC, ODESC_IPONLY=cycle_output(OUTPUT_COUNTER,context) + PARTIAL_COUNTER=0 + IP_SEARCH = DETECTOR_IPADDRESS_IN_URI.findall(line) + #debug("LEN="+str(len(IP_SEARCH))+str(IP_SEARCH)+"\n") + if len(IP_SEARCH)==1: + IP=IP_SEARCH[0] + if IP not in IP_CACHE: + ODESC_IPONLY.write(IP+"\n") + IP_CACHE.append(IP) + ODESC.write(line) + + return + +def validate_list(context): + if not context['input'].endswith(".ip"): + debug("Error, the list for validations must end in .ip, and .list is URI list and .ip.valid the valid ip list") + sys.exit(-1) + BASENAME=context['input'].split(".ip")[0] + IP_FILE_LIST=BASENAME+".ip.valid" + IP_LIST=open(IP_FILE_LIST,"r") + IP_CACHE=[] + for IP in IP_LIST: + IP_CACHE.append(IP.rstrip()) + debug(IP_CACHE) + IP_LIST.close() + URI_LIST_VALID=open(BASENAME+".list.valid","w+") + URI_LIST=open(BASENAME+".list","r") + for uri in URI_LIST: + SEARCH_IP=DETECTOR_IPADDRESS_IN_URI.findall(uri) + if len(SEARCH_IP)==1: + if SEARCH_IP[0] in IP_CACHE: + URI_LIST_VALID.write(uri) + else: + debug("Ignoring:"+SEARCH_IP[0]+" because is not more in use\n") + else: + URI_LIST_VALID.write(uri) + return +def remove_services_by_tag(context): + vdServicesModel = vdServices + if 'scope' in context and context['scope']=='internal': + vdServicesModel = vdInServices + + if 'tag' not in context: + debug("Error, tag should be set") + return + + OBJECTS_TO_DELETE=vdServicesModel.objects.filter(tags=context['tag']) + for OBJECT_TO_DELETE in OBJECTS_TO_DELETE: + debug("Preparing to delete:"+str(OBJECT_TO_DELETE)) + #OBJECTS_TO_DELETE.delete() + return + +#Here is the global declaration of parsers, functions can be duplicated +action={'default':merge_csv, 'merge.csv':merge_csv, 'merge.ip':merge_ip, 'slice.list':slice_list, 'validate.list':validate_list, 'remove.bytag':remove_services_by_tag} + +class Command(BaseCommand): + help = 'Processes Worker Scans' + def add_arguments(self, parser): + #This single module reads the input file and convert it into + parser.add_argument('--input', help='The input file/files, separated by commas.', default='stdin') + parser.add_argument('--output', help='The output filename', default='stdout') + parser.add_argument('--mode', help='The algorithm [default(merge.csv|merge.ip|slice.list|validate.list)]', default='default') + parser.add_argument('--size', help='The size of the slice', default='200') + parser.add_argument('--debug', help='Print verbose data', action='store_true', default=False) + parser.add_argument('--scope', help='Scope internal or external if required by action', default='external') + parser.add_argument('--tag', help='Tag if required by action', default='EXTERNAL') + + def handle(self, *args, **kwargs): + PARSER_INPUT = sys.stdin + global PARSER_DEBUG + PARSER_DEBUG = kwargs['debug'] + + debug(str(kwargs)+"\n") + + if kwargs['mode'] not in action: + PARSER_DEBUG = True + debug("Parser:"+kwargs['mode']+" not found in action declaration:"+str(action)+"\n") + sys.exit() + + #Main code here + debug("Starting operations for "+kwargs['mode']+"\n") + action[kwargs['mode']](kwargs) \ No newline at end of file diff --git a/frontend/asfui/app/management/commands/parse_tools.py b/frontend/asfui/app/management/commands/parse_tools.py index b8d2be7b..63c3f4fe 100644 --- a/frontend/asfui/app/management/commands/parse_tools.py +++ b/frontend/asfui/app/management/commands/parse_tools.py @@ -153,9 +153,126 @@ def parser_subfinder_output(kwargs): lines +=1 debug("Done printing "+str(lines)+" lines..\n") return + +def parser_socialsearch_output(kwargs): + report=sys.stdin + lines=0 + if kwargs['input']!='stdin': + report=open(kwargs['input'],'r') + Finding = json.loads(report.read()) + Tag = "" + if "meta" in Finding: + debug(str(Finding['meta'])) + else: + debug("Error in JSON, not meta key, ABORTING\n") + return + if "posts" in Finding: + debug("\nFound ["+str(len(Finding['posts']))+"] results\n") + else: + debug("Error in JSON, not posts key, ABORTING\n") + return + for POST in Finding['posts']: + MSG=Finding['meta'] + MSG.update(POST) + MSG['message']="[SOCIALSEARCH][FOUND]" + debug("Finding and Called Delta:"+str(MSG)+"\n\n") + delta(MSG) + return + +def parser_pwndb_output(kwargs): + #https://haveibeenpwned.com/api/v3/breach/Adobe + report=sys.stdin + lines=0 + if kwargs['input']!='stdin': + report=open(kwargs['input'],'r') + Finding = json.loads(report.read()) + Tag = "" + if "Name" in Finding[0]: + debug(str(Finding[0]['Name'])) + else: + debug("Error in JSON, not Name key, ABORTING\n"+str(Finding)+"\n") + return + MSG={} + MSG.update(Finding[0]) + MSG['message']="[PWNDB][FOUND]" + debug("Finding and Called Delta:"+str(MSG)+"\n\n") + delta(MSG) + return + +def parser_ddosify_output(kwargs): + #https://haveibeenpwned.com/api/v3/breach/Adobe + report=sys.stdin + lines=0 + if kwargs['input']!='stdin': + report=open(kwargs['input'],'r') + Finding = json.loads(report.read()) + if "success_perc" in Finding: + debug(str(Finding)) + else: + debug("Error in JSON, not success_perc key, ABORTING\n") + return + MSG=geodata_qdict(kwargs['target']) + MSG['hostname']=kwargs['target'] + MSG.update(Finding) + FindingKeys = ["success_perc", + "fail_perc", + "success_count", + "server_fail_count", + "assertion_fail_count"] + DDOS_SUCCESS=1 + for key in FindingKeys: + if Finding[key]==0: + DDOS_SUCCESS=0 + if DDOS_SUCCESS == 1: + MSG['message']="[DDOSIFY][FOUND][SUCESS]" + MSG['level']='critical' + MSG['status']='success' + else: + MSG['message']="[DDOSIFY][FOUND][INFO]" + MSG['level']='info' + MSG['status']='failed' + debug("Finding and Called Delta:"+str(MSG)+"\n\n") + delta(MSG) + return + + +def parser_list_domains(kwargs): + if kwargs['input'] != "stdin": + if "JobID:" in kwargs['input']: + JobID = kwargs['input'].split("JobID:")[1] + debug("Requested to extract data from database backend for JobID:"+JobID+"\n") + FileTargets=sys.stdout + if kwargs['output']!='stdout': + FileTargets=open(kwargs['output'],'w+') + JOB_FOLDER = "/home/asf/jobs/"+JobID+"/" + JOB_FILENAME = JOB_FOLDER+"app.asf" + try: + Job = vdJob.objects.filter(id = JobID)[0] + HostsFromModel = search(Job.regexp, Job.input, Job.exclude) + if not path.exists(JOB_FOLDER): + os.makedirs(JOB_FOLDER) + INPUT_FILE = FileTargets + HOSTS_COUNTER=0 + if Job.input == 'discovery': + for Host in HostsFromModel: + INPUT_FILE.write(Host.name+"\n") + HOSTS_COUNTER = HOSTS_COUNTER + 1 + else: + for HostWithServices in HostsFromModel: + INPUT_FILE.write(HostWithServices.name+"\n") + HOSTS_COUNTER = HOSTS_COUNTER + 1 + debug("All hosts data ("+str(HOSTS_COUNTER)+") Written on "+kwargs['output']+"\n") + INPUT_FILE.close() + except Exception as e: + debug("Error creating the input for JobID:"+str(JobID)+"\n") + debug(str(e)+"\n") + sys.exit() + return + + #Here is the global declaration of parsers, functions can be duplicated -action={'default':parser_default, 'nuclei.waf.rc':parser_nuclei_waf_rc, 'subfinder.input':parser_subfinder_input, 'subfinder.output':parser_subfinder_output, 'wpscan.output':parser_wpscan_output} +action={'default':parser_default, 'nuclei.waf.rc':parser_nuclei_waf_rc, 'subfinder.input':parser_subfinder_input, 'subfinder.output':parser_subfinder_output, 'wpscan.output':parser_wpscan_output, 'socialsearch.output':parser_socialsearch_output, 'pwndb.output':parser_pwndb_output, 'ddosify.output':parser_ddosify_output, 'list.domains':parser_list_domains,} def getJobID(kwargs): if "JobID:" in kwargs['output']: diff --git a/frontend/asfui/app/management/commands/remaster_output.py b/frontend/asfui/app/management/commands/remaster_output.py index e4e439ff..b3d6ec1d 100644 --- a/frontend/asfui/app/management/commands/remaster_output.py +++ b/frontend/asfui/app/management/commands/remaster_output.py @@ -256,9 +256,10 @@ def master_parser_nuclei(PARSER_INPUT, PARSER_OUTPUT, FILTER): scope="I" for line in PARSER_INPUT: debug(line+"\n") - if NUCLEI_FINDING.match(line): - debug("Line contains the FINDING regular expression\n") + if line.startswith("{"): + debug("Line contains the JSONL START CHAR\n") DATA = NFinding(line,scope) + EDATA = DATA.getList() if FILTER is not None: DATA=FILTER(DATA) debug(str(DATA)+"\n") @@ -278,7 +279,7 @@ def master_parser_nuclei(PARSER_INPUT, PARSER_OUTPUT, FILTER): #Here we work in the main data information, grouping the findings all together. OldData = PARSER_OUTPUT.objects.filter(name=DATA.name) NEWMETADATA="" - MDT={'owner':'Unknown'} + MDT = {'owner':'Unknown'} if OldData.count()==1: MDT,METADATA = get_metadata(DATA.name) OLDMETADATA=OldData[0].metadata @@ -292,8 +293,11 @@ def master_parser_nuclei(PARSER_INPUT, PARSER_OUTPUT, FILTER): OldData.update(nuclei_http=line, owner=MDT['owner'], metadata=NEWMETADATA) if ((DATA.name in delta_cache) and (line not in delta_cache[DATA.name])) or (DATA.name not in delta_cache): - MSG = {'message':"[NUCLEI][New Finding]", 'host':DATA.name, 'finding':line} + #Removing line, since all fields goes trow the alert. + #MSG = {'message':"[NUCLEI][New Finding]", 'host':DATA.name, 'finding':line} + MSG = {'message':"[NUCLEI][New Finding]", 'host':DATA.name} MSG.update(MDT) + MSG.update(EDATA) delta(MSG) else: #This line is a temporary MOD, please comment for system integrity, all objects should exist @@ -325,12 +329,12 @@ def parser_nuclei_waf(PARSER_INPUT, PARSER_OUTPUT): return master_parser_nuclei(PARSER_INPUT, PARSER_OUTPUT, filter_nuclei_waf) def filter_nuclei_waf(DATA): - if DATA.temp_array[2]=='failed': - DATA.level="medium" - DATA.vulnerability="MISSING-WAF" - return DATA - else: - return None + if 'matcher-status' in DATA.parsedline: + if DATA.parsedline['matcher-status']==False: + DATA.level="medium" + DATA.vulnerability="MISSING-WAF" + return DATA + return None def parser_nuclei_onlyalert(PARSER_INPUT, PARSER_OUTPUT): @@ -344,28 +348,20 @@ def parser_nuclei_onlyalert(PARSER_INPUT, PARSER_OUTPUT): scope="I" for line in PARSER_INPUT: debug(line+"\n") - if NUCLEI_FINDING.match(line): + if line.startswith("{"): debug("Line contains the FINDING regular expression\n") DATA = NFinding(line,scope) debug(str(DATA)+"\n") if DATA.full_uri is not None: #NFinding class now does all the job parsing Nuclei findings, even domain or IP address. UNIQUE_KEY = str(DATA.name)+"|"+str(DATA.vulnerability) - debug("Searching:"+str(UNIQUE_KEY)+"\n") - debug("Values:"+str(DATA.temp_array)+"\n") - MSG = {'message':"[NUCLEI][New Finding]", 'host':DATA.name, 'finding':line} - MSG['datetime']=str(DATA.detectiondate) - MSG['url']=DATA.full_uri - MSG['waf']=DATA.temp_array[1] - MSG['status']=DATA.temp_array[2] - MSG['protocol']=DATA.temp_array[3] - #Level is always 'info' - MSG['level']=DATA.temp_array[4] + debug("OnlyAlert for:"+str(UNIQUE_KEY)+"\n") + MSG = {'message':"[NUCLEI][New Finding]", 'host':DATA.name} + MSG.update(DATA.getList()) delta(MSG) return - #Here is the global declaration of parsers, functions can be duplicated action={'default':parser_default, 'patator.ssh':parser_patator_ssh, 'patator.rdp':parser_patator_rdp, 'patator.ftp':parser_patator_ftp, 'patator.telnet':parser_patator_telnet, 'hydra.ftp':parser_hydra_ftp, 'hydra.telnet':parser_hydra_telnet, 'nuclei.http':parser_nuclei_http, 'nuclei.network':parser_nuclei_network, 'nuclei':parser_nuclei, 'nuclei.onlyalert':parser_nuclei_onlyalert, 'nuclei.waf':parser_nuclei_waf} diff --git a/frontend/asfui/app/nuclei.py b/frontend/asfui/app/nuclei.py index af415479..010f3009 100644 --- a/frontend/asfui/app/nuclei.py +++ b/frontend/asfui/app/nuclei.py @@ -14,7 +14,7 @@ import re from app.models import vdNucleiResult -NUCLEI_SEPARATOR = re.compile("[\[\]\n ]{2,}") +# NUCLEI_SEPARATOR = re.compile("[\[\]\n ]{2,}") NUCLEI_IP_PORT = re.compile("\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\:\d+") NUCLEI_IP = re.compile("(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})") NUCLEI_PORT = re.compile("\:(\d+)") @@ -23,14 +23,11 @@ NUCLEI_DOMAIN = re.compile("(?!\-)(?:[a-zA-Z\d\-]{0,62}[a-zA-Z\d]\.){1,126}(?!\d+)[a-zA-Z\d]{1,63}") NUCLEI_TEMPLATE_EXTENSIONS_PATTERNS = ['**/*.[yY][aA][mM][lL]'] NUCLEI_BLACKLIST_FILE = "/etc/vdnuclei.bl" -#DEFAULT VALUES, can be tweaked if required -NUCLEI_DEFAULT_SCOPE='E' -NUCLEI_DEFAULT_LEVEL='medium' NUCLEI_PTIME = { - 'S':{'P0E':72, 'P1I':336, 'P1E':336, 'P2I':720, 'P2E': 720, 'P3I': 1440, 'P4E':2160, 'P4I':2160}, - 'I':{'critical':'P1I', 'high':'P2I', 'medium':'P3I', 'low':'P4I'}, - 'E':{'critical':'P0E', 'high':'P1E', 'medium':'P2E', 'low':'P4E'} + 'S':{'P0E':72, 'P1I':336, 'P1E':336, 'P2I':720, 'P2E': 720, 'P3I': 1440, 'P4E':2160, 'P4I':2160, 'P5I':24*30, 'P5E':24*30}, + 'I':{'critical':'P1I', 'high':'P2I', 'medium':'P3I', 'low':'P4I', 'info':'P5I'}, + 'E':{'critical':'P0E', 'high':'P1E', 'medium':'P2E', 'low':'P4E', 'info':'P5E'} } class NFinding: def __init__(self, line=None, scope='E'): @@ -48,7 +45,8 @@ def __init__(self, line=None, scope='E'): self.detectiondate = None self.vulnerability = None self.engine = None - self.level = None + #self.level = None + self.level = 'info' self.scope = scope self.ptime = None self.uri = None @@ -61,37 +59,47 @@ def __init__(self, line=None, scope='E'): self.line = line self.owner = None self.metadata = None - self.temp_array = None - if (self.line is not None): - self.temp_array = NUCLEI_SEPARATOR.split(self.line) - #debug("Nuclei Array size:"+str(len(self.temp_array))+"\n") - if len(self.temp_array) >= 5: - self.detectiondate = self.temp_array[0][1:] - debug("DateOnFile:"+self.temp_array[0]+":") - self.detectiondate = datetime.strptime(self.detectiondate, "%Y-%m-%d %H:%M:%S") - self.temp_array[0]=self.detectiondate + self.parsedline = None + temp_array = {} + if (self.line is not None and self.line.startswith("{")): + try: + temp_array = json.loads(self.line) + self.parsedline = temp_array + except Exception as e: + debug("Discovery error in data:"+str(self.line)+"\n") + return + #{"template-id":"Application-dos","info":{"name":"Application_level_dos","author":["mr.iambatman"],"tags":null,"description":"application_dos","reference":null,"severity":"critical"},"type":"http","host":"http://a01.klt.rip","matched-at":"http://a01.klt.rip/","meta":{"header":"X-XSRF-TOKEN"},"ip":"35.235.88.151","timestamp":"2022-11-11T17:45:16.270214245Z","curl-command":"curl -X 'GET' -d '' -H 'Host: a01.klt.rip' -H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0' -H 'X-XSRF-TOKEN: ab72a99f16a2ff1249c98ccbd2916fa8' 'http://a01.klt.rip/'","matcher-status":true,"matched-line":null} + if len(temp_array) >= 5: + #"timestamp":"2022-11-11T17:45:16.270214245Z" + debug("Converting timestamp:"+temp_array['timestamp']+"\n") + self.detectiondate = datetime.strptime(temp_array['timestamp'][0:24], "%Y-%m-%dT%H:%M:%S.%f") debug("DateAsObject:"+str(self.detectiondate)+"\n") - self.vulnerability = self.temp_array[1] - self.engine = self.temp_array[2] - #self.level = self.temp_array[3].lower() - self.level = self.temp_array[-2].lower() - #self.full_uri = self.temp_array[4] - self.full_uri = self.temp_array[-1] + self.vulnerability = temp_array['template-id'] + self.engine = temp_array['type'] + self.level = temp_array['info']['severity'].lower() + if self.level not in NUCLEI_PTIME[self.scope]: + self.level = 'info' + if 'matched-at' in temp_array: + self.full_uri = temp_array['matched-at'] + else: + self.full_uri = temp_array['host'] self.uri = self.full_uri if len(self.full_uri) > vdNucleiResult._meta.get_field('uri').max_length: self.uriistruncated = 1 - if len(self.temp_array) >= 6: - self.info = self.temp_array[5] - else: - self.info = "" + self.info = temp_array['info'] self.setPortandName() - debug("Port detection debug:"+self.name+":"+str(self.port)+"\n") + if 'ip' in temp_array: + if ":" in temp_array['ip']: + self.ipv6=temp_array['ip'] + else: + self.ipv4=temp_array['ip'] + debug("Port detection debug:"+str(self.name)+":"+str(self.port)+"\n") self.type=autodetectType(self.name) self.ptime,hours=nuclei_ptime(self.level, self.scope) self.bumpdate=self.detectiondate+timedelta(hours=hours) debug("Delta date:"+str(self.bumpdate)+":"+self.ptime+":"+self.level+":"+str(self.detectiondate)+"\n") else: - debug("Error parsing finding in Nuclei Format, received:"+str(len(self.temp_array))+"/6 for line: "+str(line)+"\n") + debug("Error parsing finding in Nuclei Format, received:"+str(len(temp_array))+"/6 for line: "+str(line)+"\n") else: debug("Error parsing Line in Nuclei Format, received:"+str(line)+":Broken or empty\n") @@ -138,7 +146,20 @@ def setPortandName(self): self.nname=self.name def getList(self): - return {'port':self.port, 'state':self.state, 'protocol':self.protocol, 'owner':self.owner, 'name':self.name, 'info':self.rpc_info, 'version':self.version} + FINAL = {} + BASIC = {'port':self.port, 'severity':self.level, 'protocol':self.protocol, 'owner':self.owner, 'name':self.name, 'ipv4':self.ipv4, 'ipv6':self.ipv6} + for field in self.parsedline: + debug("Field:"+field+"["+str(type(self.parsedline[field]))+"]\n") + if type(self.parsedline[field]) is dict: + debug("Found a dict:"+field+"\n") + for subfield in self.parsedline[field]: + debug("Constructing subfield:"+subfield+"\n") + FINAL[field+"_"+subfield]=self.parsedline[field][subfield] + else: + FINAL[field]=self.parsedline[field] + FINAL.update(BASIC) + debug("\n\nFinal Array for Merge: "+str(FINAL)+"\n\n") + return FINAL def match(self, Other): if Other.port != self.port: @@ -151,7 +172,7 @@ def match(self, Other): return False if Other.name != self.name: return False - if Other.rpc_info != self.rpc_info: + if Other.info != self.info: return False if Other.version != self.version: return False @@ -284,13 +305,13 @@ def is_filter_enabled(context,filter): def nuclei_ptime(level,scope): if scope not in ['E','I']: - scope=NUCLEI_DEFAULT_SCOPE + return 'P1E', NUCLEI_PTIME['S']['P1E'] if level not in ['critical', 'high', 'medium', 'low']: - level=NUCLEI_DEFAULT_LEVEL + return 'P1E', NUCLEI_PTIME['S']['P1E'] return NUCLEI_PTIME[scope][level], NUCLEI_PTIME['S'][NUCLEI_PTIME[scope][level]] def get_nuclei_ptime_selector(context): - SELECTOR="\n" DEFAULT='P0E' if 'nuclei_ptime' in context: if context['nuclei_ptime'] in NUCLEI_PTIME['S']: diff --git a/frontend/asfui/app/systemd.py b/frontend/asfui/app/systemd.py index 74edfcab..86606659 100644 --- a/frontend/asfui/app/systemd.py +++ b/frontend/asfui/app/systemd.py @@ -74,7 +74,8 @@ def readTimerFromRequest(self, request): self.config['Minute'] = Minute self.config['Repeat'] = Repeat self.config['Days'] = ADays - self.config['Timer']['OnCalendar'] = Days+" "+Hour+":"+Minute+OnCalendarRepeat + #self.config['Timer']['OnCalendar'] = Days+" "+Hour+":"+Minute+OnCalendarRepeat + self.config['Timer']['OnCalendar'] = Days+" *-*-* "+Hour+":"+Minute def write(self): global MainDictionaries diff --git a/frontend/asfui/app/views.py b/frontend/asfui/app/views.py index fbbe696e..abae2a78 100644 --- a/frontend/asfui/app/views.py +++ b/frontend/asfui/app/views.py @@ -444,12 +444,20 @@ def nmap_save_regexp(): regexp_info = "Default" if 'regexp_info' in request.POST: regexp_info = request.POST['regexp_info'] + NewRegExp = vdRegExp(name = regexp_name, regexp = regexp_query, exclude = regexp_exclude, info = regexp_info) try: - NewRegExp = vdRegExp(name = regexp_name, regexp = regexp_query, exclude = regexp_exclude, info = regexp_info) NewRegExp.save() except: sys.stderr.write("Duplicated RegExpr, Skipping:"+regexp_name) - context['error'] = "Duplicated or Wrong Data" + if 'regexp_id' in request.POST: + OldID=int(request.POST['regexp_id']) + OldReg=vdRegExp.objects.get(id=OldID) + OldReg.regexp = regexp_query + OldReg.exclude = regexp_exclude + OldReg.info = regexp_info + OldReg.save() + context['error'] = "Updating duplicated name or Wrong Data" + def nmap_delete_regexp(): if "regexp_id" in request.POST: regexp_id = request.POST['regexp_id'] @@ -951,9 +959,11 @@ def export_services(writer): sys.stderr.write("Error looking for the regular expression\n") query = search(regexp, 'services', exclude) - writer.writerow(['name', 'cname', 'ipv4', 'lastdate', 'ports', 'full_ports', 'service_ssh', 'service_rdp', 'service_telnet', 'service_ftp', 'service_smb', 'nuclei_http', 'owner', 'tag', 'metadata']) + #writer.writerow(['name', 'cname', 'ipv4', 'lastdate', 'ports', 'full_ports', 'service_ssh', 'service_rdp', 'service_telnet', 'service_ftp', 'service_smb', 'nuclei_http', 'owner', 'tag', 'metadata']) + writer.writerow(['name', 'cname', 'ipv4', 'lastdate', 'ports', 'full_ports', 'owner', 'tag', 'metadata']) for host in query: - writer.writerow([host.name, host.nname, host.ipv4, host.lastdate, host.ports, host.full_ports, host.service_ssh, host.service_rdp, host.service_telnet, host.service_ftp, host.service_smb, host.nuclei_http, host.owner, host.tag, host.metadata]) + #writer.writerow([host.name, host.nname, host.ipv4, host.lastdate, host.ports, host.full_ports, host.service_ssh, host.service_rdp, host.service_telnet, host.service_ftp, host.service_smb, host.nuclei_http, host.owner, host.tag, host.metadata]) + writer.writerow([host.name, host.nname, host.ipv4, host.lastdate, host.ports, host.full_ports, host.owner, host.tag, host.metadata]) return def export_inservices(writer): @@ -969,11 +979,13 @@ def export_inservices(writer): sys.stderr.write("Error looking for the regular expression\n") query = search(regexp, 'inservices', exclude) - writer.writerow(['name', 'cname', 'ipv4', 'lastdate', 'ports', 'full_ports', 'service_ssh', 'service_rdp', 'service_telnet', 'service_ftp', 'service_smb', 'nuclei_http', 'owner', 'tag', 'metadata']) + #writer.writerow(['name', 'cname', 'ipv4', 'lastdate', 'ports', 'full_ports', 'service_ssh', 'service_rdp', 'service_telnet', 'service_ftp', 'service_smb', 'nuclei_http', 'owner', 'tag', 'metadata']) + writer.writerow(['name', 'cname', 'ipv4', 'lastdate', 'ports', 'full_ports', 'owner', 'tag', 'metadata']) for host in query: - writer.writerow([host.name, host.nname, host.ipv4, host.lastdate, host.ports, host.full_ports, host.service_ssh, host.service_rdp, host.service_telnet, host.service_ftp, host.service_smb, host.nuclei_http, host.owner, host.tag, host.metadata]) + #writer.writerow([host.name, host.nname, host.ipv4, host.lastdate, host.ports, host.full_ports, host.service_ssh, host.service_rdp, host.service_telnet, host.service_ftp, host.service_smb, host.nuclei_http, host.owner, host.tag, host.metadata]) + writer.writerow([host.name, host.nname, host.ipv4, host.lastdate, host.ports, host.full_ports, host.owner, host.tag, host.metadata]) return - + def export_targets(writer): query = vdTarget.objects.all() writer.writerow(['name', 'type', 'lastdate', 'itemcount', 'tag', 'owner', 'metadata']) diff --git a/frontend/asfui/core/settings.py b/frontend/asfui/core/settings.py index 253412e7..469f4142 100644 --- a/frontend/asfui/core/settings.py +++ b/frontend/asfui/core/settings.py @@ -14,14 +14,14 @@ PROJECT_DIR = Path(__file__).parent # SECURITY WARNING: keep the secret key used in production secret! -SECRET_KEY = config('SECRET_KEY', default='PutYourSecretHere') +SECRET_KEY = config('SECRET_KEY', default='S#perS3crEt_1122') # SECURITY WARNING: don't run with debug turned on in production! #DEBUG = config('DEBUG', default=False) -DEBUG = False +DEBUG = True # load production server from .env -ALLOWED_HOSTS = ['localhost', '127.0.0.1', config('SERVER', default='127.0.0.1')] +ALLOWED_HOSTS = ['localhost', '127.0.0.1', '192.168.11.161', '10.0.0.190', '10.169.255.100', config('SERVER', default='127.0.0.1')] # Application definition diff --git a/frontend/asfui/core/static/assets/asf/logo.png b/frontend/asfui/core/static/assets/asf/logo.png new file mode 100644 index 00000000..5ea27ec2 Binary files /dev/null and b/frontend/asfui/core/static/assets/asf/logo.png differ diff --git a/frontend/asfui/core/static/assets/asf/logo2.png b/frontend/asfui/core/static/assets/asf/logo2.png new file mode 100644 index 00000000..62fa99e1 Binary files /dev/null and b/frontend/asfui/core/static/assets/asf/logo2.png differ diff --git a/frontend/asfui/core/static/assets/asf/logo3.png b/frontend/asfui/core/static/assets/asf/logo3.png new file mode 100644 index 00000000..ea123ec2 Binary files /dev/null and b/frontend/asfui/core/static/assets/asf/logo3.png differ diff --git a/frontend/asfui/core/static/assets/css/material-dashboard.css b/frontend/asfui/core/static/assets/css/material-dashboard.css index b28cecf4..5708dc21 100644 --- a/frontend/asfui/core/static/assets/css/material-dashboard.css +++ b/frontend/asfui/core/static/assets/css/material-dashboard.css @@ -13,9 +13,7 @@ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.*/ - /* brand Colors */ - .card { font-size: .875rem; } diff --git a/frontend/asfui/core/static/assets/img/imac1.png b/frontend/asfui/core/static/assets/img/imac1.png new file mode 100644 index 00000000..de060d92 Binary files /dev/null and b/frontend/asfui/core/static/assets/img/imac1.png differ diff --git a/frontend/asfui/core/static/assets/img/ipad.png b/frontend/asfui/core/static/assets/img/ipad.png new file mode 100644 index 00000000..0f4b1d63 Binary files /dev/null and b/frontend/asfui/core/static/assets/img/ipad.png differ diff --git a/frontend/asfui/core/templates/includes/footer.html b/frontend/asfui/core/templates/includes/footer.html index 48213107..a0b308b3 100644 --- a/frontend/asfui/core/templates/includes/footer.html +++ b/frontend/asfui/core/templates/includes/footer.html @@ -8,6 +8,7 @@
  • + redteam@vmware.com
  • diff --git a/frontend/asfui/core/templates/includes/sidebar.html b/frontend/asfui/core/templates/includes/sidebar.html index 1b0e8f40..7d1d7aaa 100644 --- a/frontend/asfui/core/templates/includes/sidebar.html +++ b/frontend/asfui/core/templates/includes/sidebar.html @@ -5,9 +5,10 @@ target="_blank" rel="noopener noreferrer" href="https://www.creative-tim.com/product/material-dashboard-django" class="simple-text logo-normal"> - --> + + {% if user.is_authenticated %}