Skip to content

Commit 84dfa74

Browse files
committed
sync stainless evals
1 parent 1c45163 commit 84dfa74

File tree

140 files changed

+10172
-2749
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

140 files changed

+10172
-2749
lines changed

src/llama_stack/lib/.keep

+4
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
File generated from our OpenAPI spec by Stainless.
2+
3+
This directory can be used to store custom files to expand the SDK.
4+
It is ignored by Stainless code generation and its content (other than this keep file) won't be touched.

src/llama_stack_client/_base_client.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1575,7 +1575,7 @@ async def _request(
15751575
except Exception as err:
15761576
log.debug("Encountered Exception", exc_info=True)
15771577

1578-
if retries_taken > 0:
1578+
if remaining_retries > 0:
15791579
return await self._retry_request(
15801580
input_options,
15811581
cast_to,

src/llama_stack_client/_client.py

+40-32
Original file line numberDiff line numberDiff line change
@@ -48,22 +48,23 @@
4848
class LlamaStackClient(SyncAPIClient):
4949
agents: resources.AgentsResource
5050
batch_inferences: resources.BatchInferencesResource
51-
datasets: resources.DatasetsResource
52-
evaluate: resources.EvaluateResource
53-
evaluations: resources.EvaluationsResource
5451
inspect: resources.InspectResource
5552
inference: resources.InferenceResource
5653
memory: resources.MemoryResource
5754
memory_banks: resources.MemoryBanksResource
55+
datasets: resources.DatasetsResource
5856
models: resources.ModelsResource
5957
post_training: resources.PostTrainingResource
6058
providers: resources.ProvidersResource
61-
reward_scoring: resources.RewardScoringResource
6259
routes: resources.RoutesResource
6360
safety: resources.SafetyResource
6461
shields: resources.ShieldsResource
6562
synthetic_data_generation: resources.SyntheticDataGenerationResource
6663
telemetry: resources.TelemetryResource
64+
datasetio: resources.DatasetioResource
65+
scoring: resources.ScoringResource
66+
scoring_functions: resources.ScoringFunctionsResource
67+
eval: resources.EvalResource
6768
with_raw_response: LlamaStackClientWithRawResponse
6869
with_streaming_response: LlamaStackClientWithStreamedResponse
6970

@@ -110,22 +111,23 @@ def __init__(
110111

111112
self.agents = resources.AgentsResource(self)
112113
self.batch_inferences = resources.BatchInferencesResource(self)
113-
self.datasets = resources.DatasetsResource(self)
114-
self.evaluate = resources.EvaluateResource(self)
115-
self.evaluations = resources.EvaluationsResource(self)
116114
self.inspect = resources.InspectResource(self)
117115
self.inference = resources.InferenceResource(self)
118116
self.memory = resources.MemoryResource(self)
119117
self.memory_banks = resources.MemoryBanksResource(self)
118+
self.datasets = resources.DatasetsResource(self)
120119
self.models = resources.ModelsResource(self)
121120
self.post_training = resources.PostTrainingResource(self)
122121
self.providers = resources.ProvidersResource(self)
123-
self.reward_scoring = resources.RewardScoringResource(self)
124122
self.routes = resources.RoutesResource(self)
125123
self.safety = resources.SafetyResource(self)
126124
self.shields = resources.ShieldsResource(self)
127125
self.synthetic_data_generation = resources.SyntheticDataGenerationResource(self)
128126
self.telemetry = resources.TelemetryResource(self)
127+
self.datasetio = resources.DatasetioResource(self)
128+
self.scoring = resources.ScoringResource(self)
129+
self.scoring_functions = resources.ScoringFunctionsResource(self)
130+
self.eval = resources.EvalResource(self)
129131
self.with_raw_response = LlamaStackClientWithRawResponse(self)
130132
self.with_streaming_response = LlamaStackClientWithStreamedResponse(self)
131133

@@ -229,22 +231,23 @@ def _make_status_error(
229231
class AsyncLlamaStackClient(AsyncAPIClient):
230232
agents: resources.AsyncAgentsResource
231233
batch_inferences: resources.AsyncBatchInferencesResource
232-
datasets: resources.AsyncDatasetsResource
233-
evaluate: resources.AsyncEvaluateResource
234-
evaluations: resources.AsyncEvaluationsResource
235234
inspect: resources.AsyncInspectResource
236235
inference: resources.AsyncInferenceResource
237236
memory: resources.AsyncMemoryResource
238237
memory_banks: resources.AsyncMemoryBanksResource
238+
datasets: resources.AsyncDatasetsResource
239239
models: resources.AsyncModelsResource
240240
post_training: resources.AsyncPostTrainingResource
241241
providers: resources.AsyncProvidersResource
242-
reward_scoring: resources.AsyncRewardScoringResource
243242
routes: resources.AsyncRoutesResource
244243
safety: resources.AsyncSafetyResource
245244
shields: resources.AsyncShieldsResource
246245
synthetic_data_generation: resources.AsyncSyntheticDataGenerationResource
247246
telemetry: resources.AsyncTelemetryResource
247+
datasetio: resources.AsyncDatasetioResource
248+
scoring: resources.AsyncScoringResource
249+
scoring_functions: resources.AsyncScoringFunctionsResource
250+
eval: resources.AsyncEvalResource
248251
with_raw_response: AsyncLlamaStackClientWithRawResponse
249252
with_streaming_response: AsyncLlamaStackClientWithStreamedResponse
250253

@@ -291,22 +294,23 @@ def __init__(
291294

292295
self.agents = resources.AsyncAgentsResource(self)
293296
self.batch_inferences = resources.AsyncBatchInferencesResource(self)
294-
self.datasets = resources.AsyncDatasetsResource(self)
295-
self.evaluate = resources.AsyncEvaluateResource(self)
296-
self.evaluations = resources.AsyncEvaluationsResource(self)
297297
self.inspect = resources.AsyncInspectResource(self)
298298
self.inference = resources.AsyncInferenceResource(self)
299299
self.memory = resources.AsyncMemoryResource(self)
300300
self.memory_banks = resources.AsyncMemoryBanksResource(self)
301+
self.datasets = resources.AsyncDatasetsResource(self)
301302
self.models = resources.AsyncModelsResource(self)
302303
self.post_training = resources.AsyncPostTrainingResource(self)
303304
self.providers = resources.AsyncProvidersResource(self)
304-
self.reward_scoring = resources.AsyncRewardScoringResource(self)
305305
self.routes = resources.AsyncRoutesResource(self)
306306
self.safety = resources.AsyncSafetyResource(self)
307307
self.shields = resources.AsyncShieldsResource(self)
308308
self.synthetic_data_generation = resources.AsyncSyntheticDataGenerationResource(self)
309309
self.telemetry = resources.AsyncTelemetryResource(self)
310+
self.datasetio = resources.AsyncDatasetioResource(self)
311+
self.scoring = resources.AsyncScoringResource(self)
312+
self.scoring_functions = resources.AsyncScoringFunctionsResource(self)
313+
self.eval = resources.AsyncEvalResource(self)
310314
self.with_raw_response = AsyncLlamaStackClientWithRawResponse(self)
311315
self.with_streaming_response = AsyncLlamaStackClientWithStreamedResponse(self)
312316

@@ -411,96 +415,100 @@ class LlamaStackClientWithRawResponse:
411415
def __init__(self, client: LlamaStackClient) -> None:
412416
self.agents = resources.AgentsResourceWithRawResponse(client.agents)
413417
self.batch_inferences = resources.BatchInferencesResourceWithRawResponse(client.batch_inferences)
414-
self.datasets = resources.DatasetsResourceWithRawResponse(client.datasets)
415-
self.evaluate = resources.EvaluateResourceWithRawResponse(client.evaluate)
416-
self.evaluations = resources.EvaluationsResourceWithRawResponse(client.evaluations)
417418
self.inspect = resources.InspectResourceWithRawResponse(client.inspect)
418419
self.inference = resources.InferenceResourceWithRawResponse(client.inference)
419420
self.memory = resources.MemoryResourceWithRawResponse(client.memory)
420421
self.memory_banks = resources.MemoryBanksResourceWithRawResponse(client.memory_banks)
422+
self.datasets = resources.DatasetsResourceWithRawResponse(client.datasets)
421423
self.models = resources.ModelsResourceWithRawResponse(client.models)
422424
self.post_training = resources.PostTrainingResourceWithRawResponse(client.post_training)
423425
self.providers = resources.ProvidersResourceWithRawResponse(client.providers)
424-
self.reward_scoring = resources.RewardScoringResourceWithRawResponse(client.reward_scoring)
425426
self.routes = resources.RoutesResourceWithRawResponse(client.routes)
426427
self.safety = resources.SafetyResourceWithRawResponse(client.safety)
427428
self.shields = resources.ShieldsResourceWithRawResponse(client.shields)
428429
self.synthetic_data_generation = resources.SyntheticDataGenerationResourceWithRawResponse(
429430
client.synthetic_data_generation
430431
)
431432
self.telemetry = resources.TelemetryResourceWithRawResponse(client.telemetry)
433+
self.datasetio = resources.DatasetioResourceWithRawResponse(client.datasetio)
434+
self.scoring = resources.ScoringResourceWithRawResponse(client.scoring)
435+
self.scoring_functions = resources.ScoringFunctionsResourceWithRawResponse(client.scoring_functions)
436+
self.eval = resources.EvalResourceWithRawResponse(client.eval)
432437

433438

434439
class AsyncLlamaStackClientWithRawResponse:
435440
def __init__(self, client: AsyncLlamaStackClient) -> None:
436441
self.agents = resources.AsyncAgentsResourceWithRawResponse(client.agents)
437442
self.batch_inferences = resources.AsyncBatchInferencesResourceWithRawResponse(client.batch_inferences)
438-
self.datasets = resources.AsyncDatasetsResourceWithRawResponse(client.datasets)
439-
self.evaluate = resources.AsyncEvaluateResourceWithRawResponse(client.evaluate)
440-
self.evaluations = resources.AsyncEvaluationsResourceWithRawResponse(client.evaluations)
441443
self.inspect = resources.AsyncInspectResourceWithRawResponse(client.inspect)
442444
self.inference = resources.AsyncInferenceResourceWithRawResponse(client.inference)
443445
self.memory = resources.AsyncMemoryResourceWithRawResponse(client.memory)
444446
self.memory_banks = resources.AsyncMemoryBanksResourceWithRawResponse(client.memory_banks)
447+
self.datasets = resources.AsyncDatasetsResourceWithRawResponse(client.datasets)
445448
self.models = resources.AsyncModelsResourceWithRawResponse(client.models)
446449
self.post_training = resources.AsyncPostTrainingResourceWithRawResponse(client.post_training)
447450
self.providers = resources.AsyncProvidersResourceWithRawResponse(client.providers)
448-
self.reward_scoring = resources.AsyncRewardScoringResourceWithRawResponse(client.reward_scoring)
449451
self.routes = resources.AsyncRoutesResourceWithRawResponse(client.routes)
450452
self.safety = resources.AsyncSafetyResourceWithRawResponse(client.safety)
451453
self.shields = resources.AsyncShieldsResourceWithRawResponse(client.shields)
452454
self.synthetic_data_generation = resources.AsyncSyntheticDataGenerationResourceWithRawResponse(
453455
client.synthetic_data_generation
454456
)
455457
self.telemetry = resources.AsyncTelemetryResourceWithRawResponse(client.telemetry)
458+
self.datasetio = resources.AsyncDatasetioResourceWithRawResponse(client.datasetio)
459+
self.scoring = resources.AsyncScoringResourceWithRawResponse(client.scoring)
460+
self.scoring_functions = resources.AsyncScoringFunctionsResourceWithRawResponse(client.scoring_functions)
461+
self.eval = resources.AsyncEvalResourceWithRawResponse(client.eval)
456462

457463

458464
class LlamaStackClientWithStreamedResponse:
459465
def __init__(self, client: LlamaStackClient) -> None:
460466
self.agents = resources.AgentsResourceWithStreamingResponse(client.agents)
461467
self.batch_inferences = resources.BatchInferencesResourceWithStreamingResponse(client.batch_inferences)
462-
self.datasets = resources.DatasetsResourceWithStreamingResponse(client.datasets)
463-
self.evaluate = resources.EvaluateResourceWithStreamingResponse(client.evaluate)
464-
self.evaluations = resources.EvaluationsResourceWithStreamingResponse(client.evaluations)
465468
self.inspect = resources.InspectResourceWithStreamingResponse(client.inspect)
466469
self.inference = resources.InferenceResourceWithStreamingResponse(client.inference)
467470
self.memory = resources.MemoryResourceWithStreamingResponse(client.memory)
468471
self.memory_banks = resources.MemoryBanksResourceWithStreamingResponse(client.memory_banks)
472+
self.datasets = resources.DatasetsResourceWithStreamingResponse(client.datasets)
469473
self.models = resources.ModelsResourceWithStreamingResponse(client.models)
470474
self.post_training = resources.PostTrainingResourceWithStreamingResponse(client.post_training)
471475
self.providers = resources.ProvidersResourceWithStreamingResponse(client.providers)
472-
self.reward_scoring = resources.RewardScoringResourceWithStreamingResponse(client.reward_scoring)
473476
self.routes = resources.RoutesResourceWithStreamingResponse(client.routes)
474477
self.safety = resources.SafetyResourceWithStreamingResponse(client.safety)
475478
self.shields = resources.ShieldsResourceWithStreamingResponse(client.shields)
476479
self.synthetic_data_generation = resources.SyntheticDataGenerationResourceWithStreamingResponse(
477480
client.synthetic_data_generation
478481
)
479482
self.telemetry = resources.TelemetryResourceWithStreamingResponse(client.telemetry)
483+
self.datasetio = resources.DatasetioResourceWithStreamingResponse(client.datasetio)
484+
self.scoring = resources.ScoringResourceWithStreamingResponse(client.scoring)
485+
self.scoring_functions = resources.ScoringFunctionsResourceWithStreamingResponse(client.scoring_functions)
486+
self.eval = resources.EvalResourceWithStreamingResponse(client.eval)
480487

481488

482489
class AsyncLlamaStackClientWithStreamedResponse:
483490
def __init__(self, client: AsyncLlamaStackClient) -> None:
484491
self.agents = resources.AsyncAgentsResourceWithStreamingResponse(client.agents)
485492
self.batch_inferences = resources.AsyncBatchInferencesResourceWithStreamingResponse(client.batch_inferences)
486-
self.datasets = resources.AsyncDatasetsResourceWithStreamingResponse(client.datasets)
487-
self.evaluate = resources.AsyncEvaluateResourceWithStreamingResponse(client.evaluate)
488-
self.evaluations = resources.AsyncEvaluationsResourceWithStreamingResponse(client.evaluations)
489493
self.inspect = resources.AsyncInspectResourceWithStreamingResponse(client.inspect)
490494
self.inference = resources.AsyncInferenceResourceWithStreamingResponse(client.inference)
491495
self.memory = resources.AsyncMemoryResourceWithStreamingResponse(client.memory)
492496
self.memory_banks = resources.AsyncMemoryBanksResourceWithStreamingResponse(client.memory_banks)
497+
self.datasets = resources.AsyncDatasetsResourceWithStreamingResponse(client.datasets)
493498
self.models = resources.AsyncModelsResourceWithStreamingResponse(client.models)
494499
self.post_training = resources.AsyncPostTrainingResourceWithStreamingResponse(client.post_training)
495500
self.providers = resources.AsyncProvidersResourceWithStreamingResponse(client.providers)
496-
self.reward_scoring = resources.AsyncRewardScoringResourceWithStreamingResponse(client.reward_scoring)
497501
self.routes = resources.AsyncRoutesResourceWithStreamingResponse(client.routes)
498502
self.safety = resources.AsyncSafetyResourceWithStreamingResponse(client.safety)
499503
self.shields = resources.AsyncShieldsResourceWithStreamingResponse(client.shields)
500504
self.synthetic_data_generation = resources.AsyncSyntheticDataGenerationResourceWithStreamingResponse(
501505
client.synthetic_data_generation
502506
)
503507
self.telemetry = resources.AsyncTelemetryResourceWithStreamingResponse(client.telemetry)
508+
self.datasetio = resources.AsyncDatasetioResourceWithStreamingResponse(client.datasetio)
509+
self.scoring = resources.AsyncScoringResourceWithStreamingResponse(client.scoring)
510+
self.scoring_functions = resources.AsyncScoringFunctionsResourceWithStreamingResponse(client.scoring_functions)
511+
self.eval = resources.AsyncEvalResourceWithStreamingResponse(client.eval)
504512

505513

506514
Client = LlamaStackClient

src/llama_stack_client/_compat.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,7 @@ def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str:
133133
def model_dump(
134134
model: pydantic.BaseModel,
135135
*,
136-
exclude: IncEx = None,
136+
exclude: IncEx | None = None,
137137
exclude_unset: bool = False,
138138
exclude_defaults: bool = False,
139139
warnings: bool = True,

src/llama_stack_client/_models.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,7 @@ def __str__(self) -> str:
176176
# Based on https://github.com/samuelcolvin/pydantic/issues/1168#issuecomment-817742836.
177177
@classmethod
178178
@override
179-
def construct(
179+
def construct( # pyright: ignore[reportIncompatibleMethodOverride]
180180
cls: Type[ModelT],
181181
_fields_set: set[str] | None = None,
182182
**values: object,
@@ -248,8 +248,8 @@ def model_dump(
248248
self,
249249
*,
250250
mode: Literal["json", "python"] | str = "python",
251-
include: IncEx = None,
252-
exclude: IncEx = None,
251+
include: IncEx | None = None,
252+
exclude: IncEx | None = None,
253253
by_alias: bool = False,
254254
exclude_unset: bool = False,
255255
exclude_defaults: bool = False,
@@ -303,8 +303,8 @@ def model_dump_json(
303303
self,
304304
*,
305305
indent: int | None = None,
306-
include: IncEx = None,
307-
exclude: IncEx = None,
306+
include: IncEx | None = None,
307+
exclude: IncEx | None = None,
308308
by_alias: bool = False,
309309
exclude_unset: bool = False,
310310
exclude_defaults: bool = False,

src/llama_stack_client/_types.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
Optional,
1717
Sequence,
1818
)
19-
from typing_extensions import Literal, Protocol, TypeAlias, TypedDict, override, runtime_checkable
19+
from typing_extensions import Set, Literal, Protocol, TypeAlias, TypedDict, override, runtime_checkable
2020

2121
import httpx
2222
import pydantic
@@ -193,7 +193,9 @@ def get(self, __key: str) -> str | None: ...
193193

194194
# Note: copied from Pydantic
195195
# https://github.com/pydantic/pydantic/blob/32ea570bf96e84234d2992e1ddf40ab8a565925a/pydantic/main.py#L49
196-
IncEx: TypeAlias = "set[int] | set[str] | dict[int, Any] | dict[str, Any] | None"
196+
IncEx: TypeAlias = Union[
197+
Set[int], Set[str], Mapping[int, Union["IncEx", Literal[True]]], Mapping[str, Union["IncEx", Literal[True]]]
198+
]
197199

198200
PostParser = Callable[[Any], Any]
199201

0 commit comments

Comments
 (0)