Skip to content

Commit

Permalink
update
Browse files Browse the repository at this point in the history
  • Loading branch information
nauyisu022 committed Apr 22, 2024
1 parent 4e93f3a commit e4e79c8
Show file tree
Hide file tree
Showing 3 changed files with 13 additions and 7 deletions.
1 change: 1 addition & 0 deletions trustllm_pkg/trustllm/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
azure_api_key=None
gemini_api = None

device=None

deepinfra_model = [
"llama2-70b",
Expand Down
14 changes: 7 additions & 7 deletions trustllm_pkg/trustllm/task/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,10 @@ def run_ethics(
awareness_path=None
):
if all_folder_path:
explicit_ethics_path = os.path.join(all_folder_path, "explicit_ethics.json")
implicit_ethics_path_social_norm = os.path.join(all_folder_path, "implicit_ethics_social_norm.json")
explicit_ethics_path = os.path.join(all_folder_path, "explicit_moralchoice.json")
implicit_ethics_path_social_norm = os.path.join(all_folder_path, "implicit_SocialChemistry101.json")
implicit_ethics_path_ETHICS = os.path.join(all_folder_path, "implicit_ethics_ETHICS.json")
awareness_path = os.path.join(all_folder_path, "awareness.json")
#awareness_path = os.path.join(all_folder_path, "awareness.json")

evaluator = ethics.EthicsEval()
explicit_ethics_res_low, explicit_ethics_res_high = None, None
Expand Down Expand Up @@ -223,10 +223,10 @@ def run_truthfulness(
advfact_path=None,
):
if all_folder_path:
# internal_path = os.path.join(all_folder_path, "internal.json")
# external_path = os.path.join(all_folder_path, "external.json")
#hallucination_path = os.path.join(all_folder_path, "hallucination.json")
#sycophancy_path = os.path.join(all_folder_path, "sycophancy.json")
internal_path = os.path.join(all_folder_path, "internal.json")
external_path = os.path.join(all_folder_path, "external.json")
hallucination_path = os.path.join(all_folder_path, "hallucination.json")
sycophancy_path = os.path.join(all_folder_path, "sycophancy.json")
advfact_path = os.path.join(all_folder_path, "golden_advfactuality.json")

evaluator = truthfulness.TruthfulnessEval()
Expand Down
5 changes: 5 additions & 0 deletions trustllm_pkg/trustllm/utils/longformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,9 @@
import os
from trustllm.utils import file_process
from tqdm import tqdm

import torch
import trustllm.config
# Configure basic logging
logging.basicConfig(filename='huggingface_evaluator.log', level=logging.INFO,
format='%(asctime)s:%(levelname)s:%(message)s')
Expand All @@ -22,8 +24,11 @@ def __init__(self, model_name='LibrAI/longformer-harmful-ro', device=None, save_
device (str): The device to run the model on (e.g., 'cuda:0').
save_dir (str): Directory to save the evaluation results.
"""
device=trustllm.config.device
if device==None:
self.device='cpu' if torch.cuda.is_available() else 'cuda'
else:
self.device=device
# Load the model and tokenizer
model = AutoModelForSequenceClassification.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
Expand Down

0 comments on commit e4e79c8

Please sign in to comment.