@@ -169,17 +169,31 @@ def pipe(
169169
170170 processed_messages .append ({"role" : message ["role" ], "content" : processed_content })
171171
172- payload = {"modelId" : model_id ,
173- "messages" : processed_messages ,
174- "system" : [{'text' : system_message ["content" ] if system_message else 'you are an intelligent ai assistant' }],
175- "inferenceConfig" : {
176- "temperature" : body .get ("temperature" , 0.5 ),
177- "topP" : body .get ("top_p" , 0.9 ),
178- "maxTokens" : body .get ("max_tokens" , 4096 ),
179- "stopSequences" : body .get ("stop" , []),
180- },
181- "additionalModelRequestFields" : {"top_k" : body .get ("top_k" , 200 )}
182- }
172+ payload = {
173+ "modelId" : model_id ,
174+ "messages" : processed_messages ,
175+ "system" : [{'text' : system_message ["content" ] if system_message else 'you are an intelligent ai assistant' }],
176+ "inferenceConfig" : {
177+ "temperature" : body .get ("temperature" , 0.5 ),
178+ "maxTokens" : body .get ("max_tokens" , 4096 ),
179+ "stopSequences" : body .get ("stop" , []),
180+ },
181+ "additionalModelRequestFields" : {}
182+ }
183+
184+ # Handle top_p and temperature conflict
185+ if "top_p" in body :
186+ payload ["inferenceConfig" ]["topP" ] = body ["top_p" ]
187+ # Remove temperature if top_p is explicitly set
188+ if "temperature" in payload ["inferenceConfig" ]:
189+ del payload ["inferenceConfig" ]["temperature" ]
190+
191+ # Add top_k if explicitly provided
192+ if "top_k" in body :
193+ payload ["additionalModelRequestFields" ]["top_k" ] = body ["top_k" ]
194+ else :
195+ # Use default top_k value
196+ payload ["additionalModelRequestFields" ]["top_k" ] = 200
183197
184198 if body .get ("stream" , False ):
185199 supports_thinking = any (model in model_id for model in self .get_thinking_supported_models ())
0 commit comments