Skip to content

Commit 0c4a5d9

Browse files
committed
tests: fix pylint errors
Signed-off-by: alindima <[email protected]>
1 parent 813845d commit 0c4a5d9

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

57 files changed

+201
-188
lines changed

tests/conftest.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -86,9 +86,9 @@ def test_with_any_microvm(test_microvm_any):
8686

8787
import host_tools.cargo_build as build_tools
8888
import host_tools.network as net_tools
89-
import host_tools.proc as proc
90-
import framework.utils as utils
91-
import framework.defs as defs
89+
from host_tools import proc
90+
from framework import utils
91+
from framework import defs
9292
from framework.artifacts import ArtifactCollection
9393
from framework.microvm import Microvm
9494
from framework.s3fetcher import MicrovmImageS3Fetcher
@@ -144,7 +144,7 @@ def __init__(self, test_name: str, append=True):
144144
# Create the root directory, if it doesn't exist.
145145
self._root_path.mkdir(exist_ok=True)
146146

147-
self._file = open(self._root_path / test_name, flags)
147+
self._file = open(self._root_path / test_name, flags, encoding='utf-8')
148148

149149
def writeln(self, data: str):
150150
"""Write the `data` string to the output file, appending a newline."""

tests/framework/builder.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
ArtifactCollection, Artifact, DiskArtifact, Snapshot,
1414
SnapshotType, NetIfaceConfig
1515
)
16-
import framework.utils as utils
16+
from framework import utils
1717
import host_tools.logging as log_tools
1818
import host_tools.network as net_tools
1919

@@ -129,7 +129,8 @@ def build(self,
129129
)
130130
assert vm.api_session.is_status_no_content(response.status_code)
131131

132-
with open(config.local_path()) as microvm_config_file:
132+
with open(config.local_path(), encoding='utf-8') as \
133+
microvm_config_file:
133134
microvm_config = json.load(microvm_config_file)
134135

135136
response = vm.basic_config(

tests/framework/decorators.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,8 @@ class ApiTimeoutException(Exception):
1515
def __init__(self, duration, method, resource, payload):
1616
"""Compose the error message from the API call components."""
1717
super().__init__(
18-
'API call exceeded maximum duration: {:.2f} ms.\n'
19-
'Call: {} {} {}'.format(duration, method, resource, payload)
18+
f'API call exceeded maximum duration: {float(duration)} ms.\n'
19+
f'Call: {method} {resource} {payload}'
2020
)
2121

2222
def timed(*args, **kwargs):

tests/framework/gitlint_rules.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,9 @@
44

55
from gitlint.rules import CommitRule, RuleViolation
66

7+
# Too few public methods (1/2) (too-few-public-methods)
8+
# pylint: disable=R0903
9+
710

811
class SignedOffBy(CommitRule):
912
"""Make sure that each commit contains a "Signed-off-by" line."""

tests/framework/jailer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,8 @@
77
import stat
88
from pathlib import Path
99
from retry.api import retry_call
10-
import framework.utils as utils
11-
import framework.defs as defs
10+
from framework import utils
11+
from framework import defs
1212
from framework.defs import FC_BINARY_NAME
1313

1414
# Default name for the socket used for API calls.

tests/framework/microvm.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828
import host_tools.memory as mem_tools
2929
import host_tools.network as net_tools
3030

31-
import framework.utils as utils
31+
from framework import utils
3232
from framework.defs import MICROVM_KERNEL_RELPATH, MICROVM_FSFILES_RELPATH, \
3333
FC_PID_FILE_NAME
3434
from framework.http import Session
@@ -337,7 +337,7 @@ def pid_in_new_ns(self):
337337
pid_file_path = f"{self.jailer.chroot_path()}/{FC_PID_FILE_NAME}"
338338
if os.path.exists(pid_file_path):
339339
# Read the PID stored inside the file.
340-
with open(pid_file_path) as file:
340+
with open(pid_file_path, encoding='utf-8') as file:
341341
fc_pid = int(file.readline())
342342

343343
return fc_pid
@@ -591,8 +591,8 @@ def spawn(self, create_logger=True,
591591
self._screen_pid = screen_pid
592592

593593
self.jailer_clone_pid = int(open('/proc/{0}/task/{0}/children'
594-
.format(screen_pid)
595-
).read().strip())
594+
.format(screen_pid),
595+
encoding='utf-8').read().strip())
596596

597597
# Configure screen to flush stdout to file.
598598
flush_cmd = 'screen -S {session} -X colon "logfile flush 0^M"'
@@ -883,7 +883,7 @@ def start_console_logger(self, log_fifo):
883883
"""
884884
def monitor_fd(microvm, path):
885885
try:
886-
fd = open(path, "r")
886+
fd = open(path, "r", encoding='utf-8')
887887
while True:
888888
try:
889889
if microvm().logging_thread.stopped():
@@ -936,7 +936,8 @@ def open(self):
936936
# serial already opened
937937
return
938938

939-
screen_log_fd = os.open(self._vm.screen_log, os.O_RDONLY)
939+
screen_log_fd = os.open(self._vm.screen_log,
940+
os.O_RDONLY)
940941
self._poller = select.poll()
941942
self._poller.register(screen_log_fd,
942943
select.POLLIN | select.POLLHUP)

tests/framework/report.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ def test_net_device():
5252
import subprocess
5353
from collections import namedtuple
5454
from pathlib import Path
55-
from . import mpsing # pylint: disable=relative-beyond-top-level
55+
from framework import mpsing
5656

5757
# Try to see if we're in a git repo. If yes, set the commit ID
5858
COMMIT_ID = ""
@@ -168,8 +168,8 @@ def parse_data(test):
168168

169169
# Create a dict with default values
170170
found_data = {
171-
key: Report.doc_items[key].value
172-
for key in Report.doc_items}
171+
key: value.value
172+
for (key, value) in Report.doc_items.items()}
173173
found_data["name"] = test.nodeid
174174

175175
# Handle None docstrings
@@ -205,7 +205,7 @@ def parse_data(test):
205205
f"{crt_doc_item.one_of}, not {item_value}")
206206

207207
# Check if the item was found twice
208-
if crt_item in docstring_items.keys():
208+
if crt_item in docstring_items:
209209
raise ValueError(f"Item {crt_item} specified twice.")
210210

211211
docstring_items[crt_item] = item_value
@@ -267,7 +267,8 @@ def write_report(self):
267267
self._data_loc.mkdir(exist_ok=True, parents=True)
268268

269269
# Dump the JSON file
270-
with open(self._data_loc / Report.FNAME_JSON, "w") as json_file:
270+
with open(self._data_loc / Report.FNAME_JSON, "w", encoding='utf-8') \
271+
as json_file:
271272
total_duration = 0
272273
test_items = []
273274
for test_name in sorted(self._collected_items):

tests/framework/scheduler.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -33,9 +33,7 @@
3333
from _pytest.mark import Expression, MarkMatcher
3434
from _pytest.main import ExitCode
3535

36-
from . import defs # pylint: disable=relative-beyond-top-level
37-
from . import mpsing # pylint: disable=relative-beyond-top-level
38-
from . import report as treport # pylint: disable=relative-beyond-top-level
36+
from framework import defs, mpsing, report as treport
3937

4038

4139
class PytestScheduler(mpsing.MultiprocessSingleton):
@@ -318,7 +316,7 @@ def _worker_main(self, items, startup_delay=0):
318316
for item, nextitem in zip(
319317
self.session.items,
320318
self.session.items[1:] + [None]
321-
):
319+
):
322320
item.ihook.pytest_runtest_protocol(item=item, nextitem=nextitem)
323321

324322
@mpsing.ipcmethod

tests/framework/stats/__init__.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@
137137
# The baseline provider is a requirement for the `DictProvider`.
138138
class RandintBaselineProvider(BaselineProvider):
139139
def __init__(self, exercise_id, env_id):
140-
super().__init__(DictQuery(dict()))
140+
super().__init__(DictQuery({}))
141141
if "baselines" in CONFIG:
142142
super().__init__(DictQuery(CONFIG["baselines"][exercise_id]))
143143
self._tag = "{}/" + env_id + "/{}"
@@ -184,9 +184,9 @@ def baseline(ms_name: str, st_name: str, exercise_id: str):
184184
}
185185
186186
def measurements(exercise_id: str):
187-
ms_list = list()
187+
ms_list = []
188188
for ms_name in CONFIG["measurements"][exercise_id]:
189-
st_list = list()
189+
st_list = []
190190
unit = CONFIG["measurements"][exercise_id][ms_name]["unit"]
191191
st_defs = CONFIG["measurements"][exercise_id][ms_name]["statistics"]
192192
for st_def in st_defs:

tests/framework/stats/consumer.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -37,15 +37,15 @@ def __init__(self,
3737
"""Initialize a consumer."""
3838
self._iteration = 0
3939
self._results = defaultdict() # Aggregated results.
40-
self._custom = dict() if not custom else custom
40+
self._custom = {} if not custom else custom
4141
self._metadata_provider = metadata_provider
4242

43-
self._measurements_defs = dict()
43+
self._measurements_defs = {}
4444
if metadata_provider:
4545
self._measurements_defs = metadata_provider.measurements
4646

4747
# Final statistics.
48-
self._statistics = dict()
48+
self._statistics = {}
4949

5050
self._failure_aggregator = ProcessingException()
5151

@@ -57,24 +57,24 @@ def consume_data(self, ms_name: str, value: Number):
5757
"""Aggregate measurement."""
5858
results = self._results.get(ms_name)
5959
if not results:
60-
self._results[ms_name] = dict()
61-
self._results[ms_name][self.DATA_KEY] = list()
60+
self._results[ms_name] = {}
61+
self._results[ms_name][self.DATA_KEY] = []
6262
self._results[ms_name][self.DATA_KEY].append(value)
6363

6464
def consume_stat(self, st_name: str, ms_name: str, value: Number):
6565
"""Aggregate statistics."""
6666
results = self._results.get(ms_name)
6767
if not results:
68-
self._results[ms_name] = dict()
68+
self._results[ms_name] = {}
6969
self._results[ms_name][st_name] = value
7070

7171
def consume_custom(self, name: str, value: Any):
7272
"""Aggregate custom information."""
7373
if not self._custom.get(self._iteration):
74-
self._custom[self._iteration] = dict()
74+
self._custom[self._iteration] = {}
7575

7676
if not self._custom[self._iteration].get(name):
77-
self._custom[self._iteration][name] = list()
77+
self._custom[self._iteration][name] = []
7878

7979
self._custom[self._iteration][name].append(value)
8080

@@ -120,7 +120,7 @@ def process(self, fail_fast=False) -> (dict, dict):
120120
self._statistics[ms_name][st_def.name] = {
121121
"value": st_def.func(self._results[ms_name][
122122
self.DATA_KEY])
123-
}
123+
}
124124
else:
125125
self._statistics[ms_name][st_def.name] = {
126126
"value": self._results[ms_name][st_def.name]
@@ -131,7 +131,7 @@ def process(self, fail_fast=False) -> (dict, dict):
131131
self._statistics[ms_name][st_def.name][
132132
"pass_criteria"] = {
133133
pass_criteria.name: pass_criteria.baseline
134-
}
134+
}
135135
res = self._statistics[ms_name][st_def.name]["value"]
136136
try:
137137
pass_criteria.check(res)

tests/framework/stats/metadata.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ def __init__(self,
113113
"""
114114
super().__init__(baseline_provider)
115115

116-
self._measurements = dict()
116+
self._measurements = {}
117117
for ms_name in measurements:
118118
assert DictProvider.UNIT_KEY in measurements[ms_name], \
119119
f"'{DictProvider.UNIT_KEY}' field is required for '" \
@@ -125,7 +125,7 @@ def __init__(self,
125125
unit = measurements[ms_name][DictProvider.UNIT_KEY]
126126
st_defs = measurements[ms_name][DictProvider.STATISTICS_KEY]
127127

128-
st_list = list()
128+
st_list = []
129129
for st_def in st_defs:
130130
# Mandatory.
131131
func_cls_name = st_def.get("function")

tests/framework/stats/types.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ def create_measurement(cls,
4141
else:
4242
pass_criteria = defaultdict(None, pass_criteria)
4343

44-
stats = list()
44+
stats = []
4545
for func in st_functions:
4646
stats.append(
4747
StatisticDef(

tests/framework/utils.py

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ def set_cpu_affinity(pid: int, cpulist: list) -> list:
5555
def get_cpu_percent(pid: int) -> float:
5656
"""Return the instant process CPU utilization percent."""
5757
_, stdout, _ = run_cmd(GET_CPU_LOAD.format(pid))
58-
cpu_percentages = dict()
58+
cpu_percentages = {}
5959

6060
# Take all except the last line
6161
lines = stdout.strip().split(sep="\n")
@@ -72,7 +72,7 @@ def get_cpu_percent(pid: int) -> float:
7272
# Handles `fc_vcpu 0` case as well.
7373
thread_name = info[11] + (" " + info[12] if info_len > 12 else "")
7474
if thread_name not in cpu_percentages:
75-
cpu_percentages[thread_name] = dict()
75+
cpu_percentages[thread_name] = {}
7676
cpu_percentages[thread_name][task_id] = cpu_percent
7777

7878
return cpu_percentages
@@ -89,14 +89,14 @@ class CpuMap:
8989
starting from 0.
9090
"""
9191

92-
arr = list()
92+
arr = []
9393

94-
def __new__(cls, x):
94+
def __new__(cls, cpu):
9595
"""Instantiate the class field."""
96-
assert CpuMap.len() > x
96+
assert CpuMap.len() > cpu
9797
if not CpuMap.arr:
9898
CpuMap.arr = CpuMap._cpus()
99-
return CpuMap.arr[x]
99+
return CpuMap.arr[cpu]
100100

101101
@staticmethod
102102
def len():
@@ -193,8 +193,8 @@ def with_arg(self, flag, value=""):
193193
def build(self):
194194
"""Build the command."""
195195
cmd = self._bin_path + " "
196-
for flag in self._args:
197-
cmd += flag + " " + "{}".format(self._args[flag]) + " "
196+
for (flag, value) in self._args.items():
197+
cmd += f"{flag} {value} "
198198
return cmd
199199

200200

@@ -240,9 +240,9 @@ class DictQuery:
240240
1
241241
"""
242242

243-
def __init__(self, d: dict):
243+
def __init__(self, inner: dict):
244244
"""Initialize the dict query."""
245-
self._inner = d
245+
self._inner = inner
246246

247247
def get(self, keys_path: str, default=None):
248248
"""Retrieve value corresponding to the key path."""
@@ -270,7 +270,7 @@ class ExceptionAggregator(Exception):
270270
def __init__(self, add_newline=False):
271271
"""Initialize the exception aggregator."""
272272
super().__init__()
273-
self.failures = list()
273+
self.failures = []
274274

275275
# If `add_newline` is True then the failures will start one row below,
276276
# in the logs. This is useful for having the failures starting on an
@@ -545,19 +545,19 @@ def get_cpu_percent(pid: int, iterations: int, omit: int) -> dict:
545545
"""
546546
assert iterations > 0
547547
time.sleep(omit)
548-
cpu_percentages = dict()
548+
cpu_percentages = {}
549549
for _ in range(iterations):
550550
current_cpu_percentages = ProcessManager.get_cpu_percent(pid)
551551
assert len(current_cpu_percentages) > 0
552552

553-
for thread_name in current_cpu_percentages:
553+
for (thread_name, task_ids) in current_cpu_percentages.items():
554554
if not cpu_percentages.get(thread_name):
555-
cpu_percentages[thread_name] = dict()
556-
for task_id in current_cpu_percentages[thread_name]:
555+
cpu_percentages[thread_name] = {}
556+
for task_id in task_ids:
557557
if not cpu_percentages[thread_name].get(task_id):
558-
cpu_percentages[thread_name][task_id] = list()
558+
cpu_percentages[thread_name][task_id] = []
559559
cpu_percentages[thread_name][task_id].append(
560-
current_cpu_percentages[thread_name][task_id])
560+
task_ids[task_id])
561561
time.sleep(1) # 1 second granularity.
562562
return cpu_percentages
563563

0 commit comments

Comments
 (0)