-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathinference.py
More file actions
53 lines (43 loc) · 2.28 KB
/
inference.py
File metadata and controls
53 lines (43 loc) · 2.28 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import time
import torch
import argparse
from utils import *
from vllm import LLM, SamplingParams
parser = argparse.ArgumentParser()
parser.add_argument("--mia", default="loss", choices=["loss", "ref", "neighbor", "zlib", "min-k", "min-k++", "distillation"])
parser.add_argument("--dataset_name", default="amc23", choices=["amc23", "aime24", "aime25", "gpqa_diamond",
"olympiadbench", "minerva_math", "gpqa_diamond"])
parser.add_argument("--dataset_path", default="./datasets", type=str)
parser.add_argument("--save_path", default="./results_member", type=str)
parser.add_argument("--global_size", default=1, type=int)
parser.add_argument("--sharding", default=1, type=int)
parser.add_argument("--model_name", default="deepseek-ai/DeepSeek-R1-Distill-Qwen-7B")
parser.add_argument("--gpu_memory_utilization", default=0.95, type=float)
parser.add_argument("--rollout_number", default=1, type=int)
parser.add_argument("--temperature", default=0.7, type=float)
parser.add_argument("--max_tokens", default=4096, type=int)
parser.add_argument("--top_p", default=0.95, type=float)
parser.add_argument("--seed", default=42, type=int)
args = parser.parse_args()
questions, answers, options, memberships = load_datasets(args)
print("=====================\n", args, "\n=====================")
print("Load the dataset: {} | Number of samples: {}".format(args.dataset_name, len(questions)))
model = LLM(
model=args.model_name,
quantization="fp8",
gpu_memory_utilization=args.gpu_memory_utilization,
tensor_parallel_size=torch.cuda.device_count()
)
tokenizer = model.get_tokenizer()
sampling_params = SamplingParams(
temperature=args.temperature, top_p=args.top_p, max_tokens=args.max_tokens,
seed=args.seed, n=args.rollout_number, detokenize=False
# logprobs=0, prompt_logprobs=1,
)
prompts = prepare_format(questions, options, tokenizer, args)
start = time.time()
outputs = model.generate(prompts, sampling_params, use_tqdm=False)
if args.sharding == 1:
print(f"Total inference time: {time.time() - start:.2f}s")
# save_sharding_outputs(outputs, answers, args, tokenizer)
save_sharding_outputs_no_logprob(outputs, answers, memberships, args, tokenizer)