Skip to content

Commit

Permalink
Fix pylint warning unspecified-encoding
Browse files Browse the repository at this point in the history
  • Loading branch information
RatishT committed Jun 17, 2024
1 parent 8e55351 commit c4f4cbe
Show file tree
Hide file tree
Showing 11 changed files with 35 additions and 34 deletions.
2 changes: 1 addition & 1 deletion autogpt/agbenchmark_config/analyze_reports.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@

# Loop through each JSON file to collect suffixes and success values
for report_file in sorted(report_files):
with open(report_file) as f:
with open(report_file, encoding='utf-8') as f:
logger.info(f"Loading {report_file}...")

data = json.load(f)
Expand Down
2 changes: 1 addition & 1 deletion benchmark/agbenchmark/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ def run(
exit_code = None

if backend:
with open("backend/backend_stdout.txt", "w") as f:
with open("backend/backend_stdout.txt", "w", encoding="utf-8") as f:
sys.stdout = f
exit_code = run_benchmark(
config=agbenchmark_config,
Expand Down
3 changes: 2 additions & 1 deletion benchmark/agbenchmark/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,8 @@
except ValidationError as e:
if logging.getLogger().level == logging.DEBUG:
logger.warning(f"Spec file {challenge_relpath} failed to load:\n{e}")
logger.debug(f"Invalid challenge spec: {challenge_spec_file.read_text()}")
with open(challenge_spec_file, 'r', encoding='utf-8') as f:
logger.debug(f"Invalid challenge spec: {f.read_text()}")

Check warning on line 60 in benchmark/agbenchmark/app.py

View check run for this annotation

Codecov / codecov/patch

benchmark/agbenchmark/app.py#L60

Added line #L60 was not covered by tests
continue
challenge_info.spec_file = challenge_spec_file

Expand Down
2 changes: 1 addition & 1 deletion benchmark/agbenchmark/challenges/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def get_unique_categories() -> set[str]:
glob_path = f"{challenges_dir}/**/data.json"

for data_file in glob.glob(glob_path, recursive=True):
with open(data_file, "r") as f:
with open(data_file, "r", encoding="utf-8") as f:
try:
challenge_data = json.load(f)
categories.update(challenge_data.get("category", []))
Expand Down
42 changes: 21 additions & 21 deletions benchmark/agbenchmark/challenges/builtin.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@

logger = logging.getLogger(__name__)

with open(Path(__file__).parent / "optional_categories.json") as f:
with open(Path(__file__).parent / "optional_categories.json", encoding="utf-8") as f:
OPTIONAL_CATEGORIES: list[str] = json.load(f)["optional_categories"]


Expand Down Expand Up @@ -307,30 +307,30 @@ def get_outputs_for_eval(
# Otherwise, it is a specific file
matching_files = [os.path.join(script_dir, file_pattern)]

logger.debug(

Check warning on line 310 in benchmark/agbenchmark/challenges/builtin.py

View check run for this annotation

Codecov / codecov/patch

benchmark/agbenchmark/challenges/builtin.py#L310

Added line #L310 was not covered by tests
f"Files to evaluate for pattern `{file_pattern}`: {matching_files}"
)

for file_path in matching_files:
relative_file_path = Path(file_path).relative_to(workspace)

Check warning on line 315 in benchmark/agbenchmark/challenges/builtin.py

View check run for this annotation

Codecov / codecov/patch

benchmark/agbenchmark/challenges/builtin.py#L315

Added line #L315 was not covered by tests
logger.debug(
f"Files to evaluate for pattern `{file_pattern}`: {matching_files}"
f"Evaluating {relative_file_path} "
f"(eval type: {ground.eval.type})..."
)

for file_path in matching_files:
relative_file_path = Path(file_path).relative_to(workspace)
logger.debug(
f"Evaluating {relative_file_path} "
f"(eval type: {ground.eval.type})..."
if ground.eval.type == "python":
result = subprocess.run(

Check warning on line 321 in benchmark/agbenchmark/challenges/builtin.py

View check run for this annotation

Codecov / codecov/patch

benchmark/agbenchmark/challenges/builtin.py#L321

Added line #L321 was not covered by tests
[sys.executable, file_path],
cwd=os.path.abspath(workspace),
capture_output=True,
text=True,
)
if ground.eval.type == "python":
result = subprocess.run(
[sys.executable, file_path],
cwd=os.path.abspath(workspace),
capture_output=True,
text=True,
)
if "error" in result.stderr or result.returncode != 0:
yield relative_file_path, f"Error: {result.stderr}\n"
else:
yield relative_file_path, f"Output: {result.stdout}\n"
if "error" in result.stderr or result.returncode != 0:
yield relative_file_path, f"Error: {result.stderr}\n"

Check warning on line 328 in benchmark/agbenchmark/challenges/builtin.py

View check run for this annotation

Codecov / codecov/patch

benchmark/agbenchmark/challenges/builtin.py#L328

Added line #L328 was not covered by tests
else:
with open(file_path, "r") as f:
yield relative_file_path, f.read()
yield relative_file_path, f"Output: {result.stdout}\n"

Check warning on line 330 in benchmark/agbenchmark/challenges/builtin.py

View check run for this annotation

Codecov / codecov/patch

benchmark/agbenchmark/challenges/builtin.py#L330

Added line #L330 was not covered by tests
else:
with open(file_path, "r", encoding="utf-8") as f:
yield relative_file_path, f.read()

Check warning on line 333 in benchmark/agbenchmark/challenges/builtin.py

View check run for this annotation

Codecov / codecov/patch

benchmark/agbenchmark/challenges/builtin.py#L333

Added line #L333 was not covered by tests
else:
if ground.eval.type == "pytest":
result = subprocess.run(
Expand Down
2 changes: 1 addition & 1 deletion benchmark/reports/json_to_base_64.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import json

# Load JSON data from a file
with open("secrets.json", "r") as f:
with open("secrets.json", "r", encoding="utf-8") as f:
data = json.load(f)

# Convert the JSON object into a string
Expand Down
4 changes: 2 additions & 2 deletions benchmark/reports/match_records.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,8 @@ def get_reports():
for report_file in report_files:
# Check if the report.json file exists
if os.path.isfile(report_file):
# Open the report.json file
with open(report_file, "r") as f:
# Open the report.json file with UTF-8 encoding
with open(report_file, "r", encoding="utf-8") as f:
# Load the JSON data from the file
json_data = json.load(f)
print(f"Processing {report_file}")
Expand Down
2 changes: 1 addition & 1 deletion benchmark/reports/send_to_googledrive.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ def process_test(

if os.path.exists(report_path):
# Load the JSON data from the file
with open(report_path, "r") as f:
with open(report_path, "r", encoding="utf-8") as f:
data = json.load(f)
benchmark_start_time = data.get("benchmark_start_time", "")

Expand Down
6 changes: 3 additions & 3 deletions cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -296,7 +296,7 @@ def benchmark_categories_list():
# Use it as the base for the glob pattern, excluding 'deprecated' directory
for data_file in glob.glob(glob_path, recursive=True):
if "deprecated" not in data_file:
with open(data_file, "r") as f:
with open(data_file, "r", encoding="utf-8") as f:
try:
data = json.load(f)
categories.update(data.get("category", []))
Expand Down Expand Up @@ -340,7 +340,7 @@ def benchmark_tests_list():
# Use it as the base for the glob pattern, excluding 'deprecated' directory
for data_file in glob.glob(glob_path, recursive=True):
if "deprecated" not in data_file:
with open(data_file, "r") as f:
with open(data_file, "r", encoding="utf-8") as f:
try:
data = json.load(f)
category = data.get("category", [])
Expand Down Expand Up @@ -389,7 +389,7 @@ def benchmark_tests_details(test_name):
)
# Use it as the base for the glob pattern, excluding 'deprecated' directory
for data_file in glob.glob(glob_path, recursive=True):
with open(data_file, "r") as f:
with open(data_file, "r", encoding="utf-8") as f:
try:
data = json.load(f)
if data.get("name") == test_name:
Expand Down
2 changes: 1 addition & 1 deletion forge/forge/file_storage/local.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def _open_file(self, path: str | Path, mode: str) -> TextIO | BinaryIO:
full_path = self.get_path(path)
if any(m in mode for m in ("w", "a", "x")):
full_path.parent.mkdir(parents=True, exist_ok=True)
return open(full_path, mode) # type: ignore
return open(full_path, mode, encoding='utf-8')

@overload
def read_file(self, path: str | Path, binary: Literal[False] = False) -> str:
Expand Down
2 changes: 1 addition & 1 deletion forge/forge/llm/providers/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ def get_model_access_kwargs(self, model: str) -> dict[str, str]:
return kwargs

def load_azure_config(self, config_file: Path) -> None:
with open(config_file) as file:
with open(config_file, 'r', encoding='utf-8') as file:
config_params = yaml.load(file, Loader=yaml.SafeLoader) or {}

try:
Expand Down

0 comments on commit c4f4cbe

Please sign in to comment.