Skip to content

Commit

Permalink
Various readability improvements for logging
Browse files Browse the repository at this point in the history
  • Loading branch information
aloftus23 committed Dec 17, 2024
1 parent 5f8ee5f commit 4acc2dc
Show file tree
Hide file tree
Showing 7 changed files with 26 additions and 11 deletions.
2 changes: 1 addition & 1 deletion src/pe_mailer/email_reports.py
Original file line number Diff line number Diff line change
Expand Up @@ -271,6 +271,7 @@ def send_pe_reports(ses_client, pe_report_dir, to):

# to_emails should contain at least one email
if not to_emails:
reports_not_mailed += 1
continue

# Find and mail the Posture and Exposure report, if necessary
Expand Down Expand Up @@ -317,7 +318,6 @@ def send_pe_reports(ses_client, pe_report_dir, to):
print("Report File:", pe_report_filename)
print("ASM Summary File", pe_asm_filename, "\n")


try:
agencies_emailed_pe_reports = send_message(
ses_client, message, agencies_emailed_pe_reports
Expand Down
3 changes: 3 additions & 0 deletions src/pe_source/cybersixgill.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,9 @@ def run_cybersixgill(self):
else:
continue

# alphabetize orgs for consistent order
pe_orgs_final = sorted(pe_orgs_final, key=lambda d: d["cyhy_db_name"])

# Get Cybersixgill org info
sixgill_orgs = get_sixgill_organizations()
failed = []
Expand Down
16 changes: 8 additions & 8 deletions src/pe_source/data/sixgill/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def get_sixgill_organizations():
retry_count, max_retries, time_delay = 0, 10, 5
while orgs.status_code != 200 and retry_count < max_retries:
endpoint_name = url.split('/')[-1]
LOGGER.warning(f"Retrying Cybersixgill /{endpoint_name} endpoint, attmept {retry_count+1} of {max_retries}")
LOGGER.warning(f"Retrying Cybersixgill /{endpoint_name} endpoint (code {resp.status_code}), attmept {retry_count+1} of {max_retries}")
time.sleep(time_delay)
orgs = requests.get(url, headers=headers)
retry_count += 1
Expand Down Expand Up @@ -58,7 +58,7 @@ def org_assets(org_id):
retry_count, max_retries, time_delay = 0, 10, 5
while resp.status_code != 200 and retry_count < max_retries:
endpoint_name = url.split('/')[-1]
LOGGER.warning(f"Retrying Cybersixgill /{endpoint_name} endpoint, attmept {retry_count+1} of {max_retries}")
LOGGER.warning(f"Retrying Cybersixgill /{endpoint_name} endpoint (code {resp.status_code}), attmept {retry_count+1} of {max_retries}")
time.sleep(time_delay)
resp = requests.get(url, headers=headers, params=payload)
retry_count += 1
Expand Down Expand Up @@ -93,7 +93,7 @@ def intel_post(auth, query, frm, scroll, result_size):
retry_count, max_retries, time_delay = 0, 10, 5
while resp.status_code != 200 and retry_count < max_retries:
endpoint_name = url.split('/')[-1]
LOGGER.warning(f"Retrying Cybersixgill /{endpoint_name} endpoint, attmept {retry_count+1} of {max_retries}")
LOGGER.warning(f"Retrying Cybersixgill /{endpoint_name} endpoint (code {resp.status_code}), attmept {retry_count+1} of {max_retries}")
time.sleep(time_delay)
resp = requests.post(url, headers=headers, json=payload)
retry_count += 1
Expand Down Expand Up @@ -152,7 +152,7 @@ def alerts_count(auth, organization_id):
retry_count, max_retries, time_delay = 0, 10, 5
while resp.status_code != 200 and retry_count < max_retries:
endpoint_name = url.split('/')[-1]
LOGGER.warning(f"Retrying Cybersixgill /{endpoint_name} endpoint, attmept {retry_count+1} of {max_retries}")
LOGGER.warning(f"Retrying Cybersixgill /{endpoint_name} endpoint (code {resp.status_code}), attmept {retry_count+1} of {max_retries}")
time.sleep(time_delay)
resp = requests.get(url, headers=headers, params=payload)
retry_count += 1
Expand All @@ -176,7 +176,7 @@ def alerts_content(auth, organization_id, alert_id):
retry_count, max_retries, time_delay = 0, 10, 5
while content.status_code != 200 and retry_count < max_retries:
endpoint_name = url.split('/')[-1]
LOGGER.warning(f"Retrying Cybersixgill /actionable_alert_content endpoint, attmept {retry_count+1} of {max_retries}")
LOGGER.warning(f"Retrying Cybersixgill /actionable_alert_content endpoint (code {resp.status_code}), attmept {retry_count+1} of {max_retries}")
time.sleep(time_delay)
content = requests.get(url, headers=headers, params=payload)
retry_count += 1
Expand Down Expand Up @@ -221,7 +221,7 @@ def dve_top_cves():
retry_count, max_retries, time_delay = 0, 10, 5
while resp.status_code != 200 and retry_count < max_retries:
endpoint_name = url.split('/')[-1]
LOGGER.warning(f"Retrying Cybersixgill /{endpoint_name} endpoint, attmept {retry_count+1} of {max_retries}")
LOGGER.warning(f"Retrying Cybersixgill /{endpoint_name} endpoint (code {resp.status_code}), attmept {retry_count+1} of {max_retries}")
time.sleep(time_delay)
resp = requests.post(url, headers=headers, data=data)
retry_count += 1
Expand Down Expand Up @@ -263,7 +263,7 @@ def credential_auth(params):
retry_count, max_retries, time_delay = 0, 10, 5
while resp.status_code != 200 and retry_count < max_retries:
endpoint_name = url.split('/')[-1]
LOGGER.warning(f"Retrying Cybersixgill /{endpoint_name} endpoint, attmept {retry_count+1} of {max_retries}")
LOGGER.warning(f"Retrying Cybersixgill /{endpoint_name} endpoint (code {resp.status_code}), attmept {retry_count+1} of {max_retries}")
time.sleep(time_delay)
resp = requests.get(url, headers=headers, params=params)
retry_count += 1
Expand Down Expand Up @@ -402,7 +402,7 @@ def get_bulk_cve_resp(cve_list):
retry_count, max_retries, time_delay = 0, 10, 5
while resp.status_code != 200 and retry_count < max_retries:
endpoint_name = url.split('/')[-1]
LOGGER.warning(f"Retrying Cybersixgill /{endpoint_name} endpoint, attmept {retry_count+1} of {max_retries}")
LOGGER.warning(f"Retrying Cybersixgill /{endpoint_name} endpoint (code {resp.status_code}), attmept {retry_count+1} of {max_retries}")
time.sleep(time_delay)
resp = requests.get(url, headers=headers, params=params)
retry_count += 1
Expand Down
3 changes: 3 additions & 0 deletions src/pe_source/dnsmonitor.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,9 @@ def run_dnsMonitor(self):
else:
continue

# alphabetize orgs for consistent order
pe_orgs_final = sorted(pe_orgs_final, key=lambda d: d["cyhy_db_name"])

# Fetch the bearer token
token = dnsmonitor_token()
# Get all of the Domains being monitored
Expand Down
4 changes: 4 additions & 0 deletions src/pe_source/intelx_identity.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,9 @@ def run_intelx(self):
else:
continue

# alphabetize orgs for consistent order
pe_orgs_final = sorted(pe_orgs_final, key=lambda d: d["cyhy_db_name"])

success = 0
failed = 0
for org_idx, pe_org in enumerate(pe_orgs_final):
Expand All @@ -78,6 +81,7 @@ def run_intelx(self):
# Verify the org is in the list of orgs to scan
if cyhy_org_id in orgs_list or orgs_list == "all" or orgs_list == "DEMO":
LOGGER.info(f"Running IntelX on {cyhy_org_id} ({org_idx+1} of {len(pe_orgs_final)})")
print(f"Running IntelX on {cyhy_org_id} ({org_idx+1} of {len(pe_orgs_final)})")
if self.get_credentials(cyhy_org_id, pe_org_uid) == 1:
LOGGER.error("Failed to get credentials for %s", cyhy_org_id)
failed += 1
Expand Down
6 changes: 4 additions & 2 deletions src/pe_source/pe_scripts.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,8 @@ def run_pe_script(source, orgs_list, cybersix_methods, soc_med_included):
cybersix_methods = cybersix_methods.split(",")

# LOGGER.info("Running %s on these orgs: %s", source, orgs_list)
first_org = orgs_list[0]
final_org = orgs_list[-1]

if source == "cybersixgill":
if sixgill_scan_name == "Topcves":
Expand All @@ -73,7 +75,7 @@ def run_pe_script(source, orgs_list, cybersix_methods, soc_med_included):
cybersix = Cybersixgill(orgs_list, cybersix_methods, soc_med_included)
cybersix.run_cybersixgill()
sixgill_end_time = time.time()
LOGGER.info(f"Execution time for Cybersixgill {sixgill_scan_name} scan: {str(timedelta(seconds=(sixgill_end_time - sixgill_start_time)))} (H:M:S)")
LOGGER.info(f"Execution time for Cybersixgill {sixgill_scan_name} scan ({first_org} - {final_org}): {str(timedelta(seconds=(sixgill_end_time - sixgill_start_time)))} (H:M:S)")
LOGGER.info(f"--- Cybersixgill {sixgill_scan_name} Scan Complete ---")
elif source == "shodan":
LOGGER.info("--- Shodan Scan Starting ---")
Expand Down Expand Up @@ -108,7 +110,7 @@ def run_pe_script(source, orgs_list, cybersix_methods, soc_med_included):
intelx = IntelX(orgs_list)
intelx.run_intelx()
intelx_end_time = time.time()
LOGGER.info(f"Execution time for IntelX scan: {str(timedelta(seconds=(intelx_end_time - intelx_start_time)))} (H:M:S)")
LOGGER.info(f"Execution time for IntelX scan ({first_org} - {final_org}): {str(timedelta(seconds=(intelx_end_time - intelx_start_time)))} (H:M:S)")
LOGGER.info("--- IntelX Scan Complete ---")
elif source == "pshtt":
launch_pe_pshtt()
Expand Down
3 changes: 3 additions & 0 deletions src/pe_source/shodan_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,9 @@ def run_shodan(self):
else:
continue

# alphabetize orgs for consistent order
pe_orgs_final = sorted(pe_orgs_final, key=lambda d: d["cyhy_db_name"])

# Get list of initialized API objects
api_list = shodan_api_init()

Expand Down

0 comments on commit 4acc2dc

Please sign in to comment.