-
Notifications
You must be signed in to change notification settings - Fork 3.3k
Added top n log probs #2262
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Added top n log probs #2262
Changes from 8 commits
2cbc985
724f80b
d6286fd
14247a5
bc4981c
d22a9b7
a8e4719
4d3ebf1
4a7e96e
af4cb7a
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -163,7 +163,7 @@ def get_inference_context( | |
| active_buffer_size_gb=args.inference_dynamic_batching_active_buffer_size_gb, | ||
| max_tokens=args.inference_dynamic_batching_max_tokens, | ||
| tensor_model_parallel_size=args.tensor_model_parallel_size, | ||
| materialize_only_last_token_logits=not args.return_log_probs, | ||
| materialize_only_last_token_logits=not (args.return_log_probs or args.return_prompt_top_n_logprobs), | ||
| layer_type_list=layer_type_list, | ||
| mamba_conv_states_shape=mamba_conv_states_shape, | ||
| mamba_ssm_states_shape=mamba_ssm_states_shape, | ||
|
|
@@ -327,9 +327,19 @@ def _add_request(): | |
| request.state = "finished" | ||
| request.request_id = finished_request.request_id | ||
| if finished_request.sampling_params.return_log_probs: | ||
| if not finished_request.prompt_log_probs: | ||
| finished_request.prompt_log_probs = [] | ||
| request.log_probs = ( | ||
| finished_request.prompt_log_probs + finished_request.generated_log_probs | ||
| ) | ||
| if finished_request.sampling_params.top_n_logprobs > 0: | ||
| request.generated_top_n_logprobs = getattr( | ||
| finished_request, 'generated_top_n_logprobs', None | ||
| ) | ||
| if finished_request.sampling_params.return_prompt_top_n_logprobs: | ||
| request.prompt_top_n_logprobs = getattr( | ||
| finished_request, 'prompt_top_n_logprobs', None | ||
| ) | ||
| num_requests_finished += 1 | ||
| output_times.append(get_curr_time() - output_start) | ||
|
|
||
|
|
@@ -372,9 +382,12 @@ def main(): | |
| temperature=args.temperature, | ||
| top_k=args.top_k, | ||
| top_p=args.top_p, | ||
| skip_prompt_log_probs=args.skip_prompt_log_probs, | ||
| return_log_probs=args.return_log_probs, | ||
| num_tokens_to_generate=args.num_tokens_to_generate, | ||
| termination_id=args.termination_id if args.termination_id is not None else tokenizer.eod, | ||
| top_n_logprobs=args.top_n_logprobs, | ||
| return_prompt_top_n_logprobs=args.return_prompt_top_n_logprobs, | ||
|
Comment on lines
+453
to
+454
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit, but I think that we should be consistent with our naming. The rest of the codebase uses There's several examples of |
||
| ) | ||
|
|
||
| model = get_model() | ||
|
|
@@ -493,13 +506,16 @@ def escape_str(s): | |
| # Write every 'n' requests, plus the final request. | ||
| for i, req in enumerate(requests): | ||
| if i % args.output_every_n_results == 0 or i == len(requests) - 1: | ||
| print(f' Attributes of request {i}: {req.__dict__}') | ||
| result_dict = { | ||
| "input_prompt": req.prompt_text, | ||
| "generated_text": req.output_text, | ||
| "generated_tokens": req.output_tokens, | ||
| "latency": req.time_end - req.time_start, | ||
| "cuda_graph_request_count_map" : result["cuda_graph_request_count_map"], | ||
| "step_count" : engine.step_count, | ||
| "top_n_logprobs" : getattr(req, 'generated_top_n_logprobs', None), | ||
| "prompt_top_n_logprobs" : getattr(req, 'prompt_top_n_logprobs', None), | ||
|
Comment on lines
+588
to
+589
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. See my comment here about |
||
| } | ||
| if req.sampling_params.return_log_probs: | ||
| response_logprobs = req.log_probs | ||
|
|
@@ -509,6 +525,7 @@ def escape_str(s): | |
| # Track system-level throughput as a test / debug metric | ||
| json_results["throughput"] = throughputs | ||
|
|
||
| print(f' Saving results to {args.output_path}') | ||
| with open(args.output_path, "w") as fp: | ||
| json.dump(json_results, fp, indent=1) | ||
|
|
||
|
|
@@ -560,4 +577,4 @@ def escape_str(s): | |
|
|
||
|
|
||
| if __name__ == "__main__": | ||
| main() | ||
| main() | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Personally, I strongly oppose conditional attributes that make us have to use
getattr. I do not see a reason why we cannot just putgenerated_top_n_logprobsandprompt_top_n_logprobsdirectly into theInferenceRequestdataclass.It is more confusing for the
InferenceRequestobject to gain unlisted attributes over time, than it is for us to list all the attributes ahead of time - even if not all flows will need them.