Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Ruff: Add and fix SIM113 #11652

Merged
merged 1 commit into from
Feb 6, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 2 additions & 4 deletions dojo/finding/helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -516,11 +516,9 @@ def prepare_duplicates_for_delete(test=None, engagement=None):

# remove the link to the original from the duplicates inside the cluster so they can be safely deleted by the django framework
total = len(originals)
i = 0
# logger.debug('originals: %s', [original.id for original in originals])
for original in originals:
i += 1
logger.debug("%d/%d: preparing duplicate cluster for deletion of original: %d", i, total, original.id)
for i, original in enumerate(originals):
logger.debug("%d/%d: preparing duplicate cluster for deletion of original: %d", i + 1, total, original.id)
cluster_inside = original.original_finding.all()
if engagement:
cluster_inside = cluster_inside.filter(test__engagement=engagement)
Expand Down
16 changes: 3 additions & 13 deletions dojo/reports/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -880,19 +880,15 @@ def get(self, request):
fields.append(finding.test.engagement.product.name)

endpoint_value = ""
num_endpoints = 0
for endpoint in finding.endpoints.all():
num_endpoints += 1
endpoint_value += f"{endpoint}; "
endpoint_value = endpoint_value.removesuffix("; ")
if len(endpoint_value) > EXCEL_CHAR_LIMIT:
endpoint_value = endpoint_value[:EXCEL_CHAR_LIMIT - 3] + "..."
fields.append(endpoint_value)

vulnerability_ids_value = ""
num_vulnerability_ids = 0
for vulnerability_id in finding.vulnerability_ids:
num_vulnerability_ids += 1
for num_vulnerability_ids, vulnerability_id in enumerate(finding.vulnerability_ids):
if num_vulnerability_ids > 5:
vulnerability_ids_value += "..."
break
Expand All @@ -903,9 +899,7 @@ def get(self, request):
fields.append(vulnerability_ids_value)
# Tags
tags_value = ""
num_tags = 0
for tag in finding.tags.all():
num_tags += 1
for num_tags, tag in enumerate(finding.tags.all()):
if num_tags > 5:
tags_value += "..."
break
Expand Down Expand Up @@ -1029,9 +1023,7 @@ def get(self, request):
col_num += 1

endpoint_value = ""
num_endpoints = 0
for endpoint in finding.endpoints.all():
num_endpoints += 1
endpoint_value += f"{endpoint}; \n"
endpoint_value = endpoint_value.removesuffix("; \n")
if len(endpoint_value) > EXCEL_CHAR_LIMIT:
Expand All @@ -1040,9 +1032,7 @@ def get(self, request):
col_num += 1

vulnerability_ids_value = ""
num_vulnerability_ids = 0
for vulnerability_id in finding.vulnerability_ids:
num_vulnerability_ids += 1
for num_vulnerability_ids, vulnerability_id in enumerate(finding.vulnerability_ids):
if num_vulnerability_ids > 5:
vulnerability_ids_value += "..."
break
Expand Down
13 changes: 3 additions & 10 deletions dojo/tools/openvas/csv_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -262,10 +262,8 @@ def create_chain(self):

def read_column_names(self, row):
column_names = {}
index = 0
for column in row:
for index, column in enumerate(row):
column_names[index] = column
index += 1
return column_names

def get_findings(self, filename, test):
Expand All @@ -276,21 +274,17 @@ def get_findings(self, filename, test):
if isinstance(content, bytes):
content = content.decode("utf-8")
reader = csv.reader(io.StringIO(content), delimiter=",", quotechar='"')
row_number = 0
for row in reader:
for row_number, row in enumerate(reader):
finding = Finding(test=test)
finding.unsaved_vulnerability_ids = []
finding.unsaved_endpoints = [Endpoint()]
if row_number == 0:
column_names = self.read_column_names(row)
row_number += 1
continue
column_number = 0
for column in row:
for column_number, column in enumerate(row):
chain.process_column(
column_names[column_number], column, finding,
)
column_number += 1
if finding is not None and row_number > 0:
if finding.title is None:
finding.title = ""
Expand All @@ -309,5 +303,4 @@ def get_findings(self, filename, test):
).hexdigest()
if key not in dupes:
dupes[key] = finding
row_number += 1
return list(dupes.values())
11 changes: 3 additions & 8 deletions dojo/tools/sarif/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,10 +126,8 @@ def get_result_cwes_properties(result):

def get_artifacts(run):
artifacts = {}
custom_index = 0 # hack because some tool doesn't generate this attribute
for tree_artifact in run.get("artifacts", []):
for custom_index, tree_artifact in enumerate(run.get("artifacts", [])):
artifacts[tree_artifact.get("index", custom_index)] = tree_artifact
custom_index += 1
return artifacts


Expand Down Expand Up @@ -229,9 +227,8 @@ def get_codeFlowsDescription(codeFlows):
continue

description = f"**{_('Code flow')}:**\n"
line = 1

for location in threadFlow.get("locations", []):
for line, location in enumerate(threadFlow.get("locations", [])):
physicalLocation = location.get("location", {}).get("physicalLocation", {})
region = physicalLocation.get("region", {})
uri = physicalLocation.get("artifactLocation").get("uri")
Expand All @@ -249,7 +246,7 @@ def get_codeFlowsDescription(codeFlows):
if "snippet" in region:
snippet = f"\t-\t{region.get('snippet').get('text')}"

description += f"{line}. {uri}{start_line}{start_column}{snippet}\n"
description += f"{line + 1}. {uri}{start_line}{start_column}{snippet}\n"

if "message" in location.get("location", {}):
message_field = location.get("location", {}).get("message", {})
Expand All @@ -260,8 +257,6 @@ def get_codeFlowsDescription(codeFlows):

description += f"\t{message}\n"

line += 1

return description


Expand Down
14 changes: 3 additions & 11 deletions dojo/tools/skf/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,10 +86,8 @@ def create_chain(self):
return date_column_strategy

def read_column_names(self, column_names, row):
index = 0
for column in row:
for index, column in enumerate(row):
column_names[index] = column
index += 1

def get_findings(self, filename, test):
content = filename.read()
Expand All @@ -99,26 +97,22 @@ def get_findings(self, filename, test):
column_names = {}
chain = self.create_chain()

row_number = 0
reader = csv.reader(
io.StringIO(content), delimiter=",", quotechar='"', escapechar="\\",
)
dupes = {}
for row in reader:
for row_number, row in enumerate(reader):
finding = Finding(test=test)
finding.severity = "Info"

if row_number == 0:
self.read_column_names(column_names, row)
row_number += 1
continue

column_number = 0
for column in row:
for column_number, column in enumerate(row):
chain.process_column(
column_names[column_number], column, finding,
)
column_number += 1

if finding is not None:
key = hashlib.sha256(
Expand All @@ -134,6 +128,4 @@ def get_findings(self, filename, test):
if key not in dupes:
dupes[key] = finding

row_number += 1

return list(dupes.values())
1 change: 0 additions & 1 deletion ruff.toml
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,6 @@ ignore = [
"E501",
"E722",
"SIM102",
"SIM113",
"SIM115",
"SIM116",
"SIM117",
Expand Down
Loading