|
48 | 48 | class LlamaStackClient(SyncAPIClient):
|
49 | 49 | agents: resources.AgentsResource
|
50 | 50 | batch_inferences: resources.BatchInferencesResource
|
51 |
| - datasets: resources.DatasetsResource |
52 |
| - evaluate: resources.EvaluateResource |
53 |
| - evaluations: resources.EvaluationsResource |
54 | 51 | inspect: resources.InspectResource
|
55 | 52 | inference: resources.InferenceResource
|
56 | 53 | memory: resources.MemoryResource
|
57 | 54 | memory_banks: resources.MemoryBanksResource
|
| 55 | + datasets: resources.DatasetsResource |
58 | 56 | models: resources.ModelsResource
|
59 | 57 | post_training: resources.PostTrainingResource
|
60 | 58 | providers: resources.ProvidersResource
|
61 |
| - reward_scoring: resources.RewardScoringResource |
62 | 59 | routes: resources.RoutesResource
|
63 | 60 | safety: resources.SafetyResource
|
64 | 61 | shields: resources.ShieldsResource
|
65 | 62 | synthetic_data_generation: resources.SyntheticDataGenerationResource
|
66 | 63 | telemetry: resources.TelemetryResource
|
| 64 | + datasetio: resources.DatasetioResource |
| 65 | + scoring: resources.ScoringResource |
| 66 | + scoring_functions: resources.ScoringFunctionsResource |
| 67 | + eval: resources.EvalResource |
67 | 68 | with_raw_response: LlamaStackClientWithRawResponse
|
68 | 69 | with_streaming_response: LlamaStackClientWithStreamedResponse
|
69 | 70 |
|
@@ -110,22 +111,23 @@ def __init__(
|
110 | 111 |
|
111 | 112 | self.agents = resources.AgentsResource(self)
|
112 | 113 | self.batch_inferences = resources.BatchInferencesResource(self)
|
113 |
| - self.datasets = resources.DatasetsResource(self) |
114 |
| - self.evaluate = resources.EvaluateResource(self) |
115 |
| - self.evaluations = resources.EvaluationsResource(self) |
116 | 114 | self.inspect = resources.InspectResource(self)
|
117 | 115 | self.inference = resources.InferenceResource(self)
|
118 | 116 | self.memory = resources.MemoryResource(self)
|
119 | 117 | self.memory_banks = resources.MemoryBanksResource(self)
|
| 118 | + self.datasets = resources.DatasetsResource(self) |
120 | 119 | self.models = resources.ModelsResource(self)
|
121 | 120 | self.post_training = resources.PostTrainingResource(self)
|
122 | 121 | self.providers = resources.ProvidersResource(self)
|
123 |
| - self.reward_scoring = resources.RewardScoringResource(self) |
124 | 122 | self.routes = resources.RoutesResource(self)
|
125 | 123 | self.safety = resources.SafetyResource(self)
|
126 | 124 | self.shields = resources.ShieldsResource(self)
|
127 | 125 | self.synthetic_data_generation = resources.SyntheticDataGenerationResource(self)
|
128 | 126 | self.telemetry = resources.TelemetryResource(self)
|
| 127 | + self.datasetio = resources.DatasetioResource(self) |
| 128 | + self.scoring = resources.ScoringResource(self) |
| 129 | + self.scoring_functions = resources.ScoringFunctionsResource(self) |
| 130 | + self.eval = resources.EvalResource(self) |
129 | 131 | self.with_raw_response = LlamaStackClientWithRawResponse(self)
|
130 | 132 | self.with_streaming_response = LlamaStackClientWithStreamedResponse(self)
|
131 | 133 |
|
@@ -229,22 +231,23 @@ def _make_status_error(
|
229 | 231 | class AsyncLlamaStackClient(AsyncAPIClient):
|
230 | 232 | agents: resources.AsyncAgentsResource
|
231 | 233 | batch_inferences: resources.AsyncBatchInferencesResource
|
232 |
| - datasets: resources.AsyncDatasetsResource |
233 |
| - evaluate: resources.AsyncEvaluateResource |
234 |
| - evaluations: resources.AsyncEvaluationsResource |
235 | 234 | inspect: resources.AsyncInspectResource
|
236 | 235 | inference: resources.AsyncInferenceResource
|
237 | 236 | memory: resources.AsyncMemoryResource
|
238 | 237 | memory_banks: resources.AsyncMemoryBanksResource
|
| 238 | + datasets: resources.AsyncDatasetsResource |
239 | 239 | models: resources.AsyncModelsResource
|
240 | 240 | post_training: resources.AsyncPostTrainingResource
|
241 | 241 | providers: resources.AsyncProvidersResource
|
242 |
| - reward_scoring: resources.AsyncRewardScoringResource |
243 | 242 | routes: resources.AsyncRoutesResource
|
244 | 243 | safety: resources.AsyncSafetyResource
|
245 | 244 | shields: resources.AsyncShieldsResource
|
246 | 245 | synthetic_data_generation: resources.AsyncSyntheticDataGenerationResource
|
247 | 246 | telemetry: resources.AsyncTelemetryResource
|
| 247 | + datasetio: resources.AsyncDatasetioResource |
| 248 | + scoring: resources.AsyncScoringResource |
| 249 | + scoring_functions: resources.AsyncScoringFunctionsResource |
| 250 | + eval: resources.AsyncEvalResource |
248 | 251 | with_raw_response: AsyncLlamaStackClientWithRawResponse
|
249 | 252 | with_streaming_response: AsyncLlamaStackClientWithStreamedResponse
|
250 | 253 |
|
@@ -291,22 +294,23 @@ def __init__(
|
291 | 294 |
|
292 | 295 | self.agents = resources.AsyncAgentsResource(self)
|
293 | 296 | self.batch_inferences = resources.AsyncBatchInferencesResource(self)
|
294 |
| - self.datasets = resources.AsyncDatasetsResource(self) |
295 |
| - self.evaluate = resources.AsyncEvaluateResource(self) |
296 |
| - self.evaluations = resources.AsyncEvaluationsResource(self) |
297 | 297 | self.inspect = resources.AsyncInspectResource(self)
|
298 | 298 | self.inference = resources.AsyncInferenceResource(self)
|
299 | 299 | self.memory = resources.AsyncMemoryResource(self)
|
300 | 300 | self.memory_banks = resources.AsyncMemoryBanksResource(self)
|
| 301 | + self.datasets = resources.AsyncDatasetsResource(self) |
301 | 302 | self.models = resources.AsyncModelsResource(self)
|
302 | 303 | self.post_training = resources.AsyncPostTrainingResource(self)
|
303 | 304 | self.providers = resources.AsyncProvidersResource(self)
|
304 |
| - self.reward_scoring = resources.AsyncRewardScoringResource(self) |
305 | 305 | self.routes = resources.AsyncRoutesResource(self)
|
306 | 306 | self.safety = resources.AsyncSafetyResource(self)
|
307 | 307 | self.shields = resources.AsyncShieldsResource(self)
|
308 | 308 | self.synthetic_data_generation = resources.AsyncSyntheticDataGenerationResource(self)
|
309 | 309 | self.telemetry = resources.AsyncTelemetryResource(self)
|
| 310 | + self.datasetio = resources.AsyncDatasetioResource(self) |
| 311 | + self.scoring = resources.AsyncScoringResource(self) |
| 312 | + self.scoring_functions = resources.AsyncScoringFunctionsResource(self) |
| 313 | + self.eval = resources.AsyncEvalResource(self) |
310 | 314 | self.with_raw_response = AsyncLlamaStackClientWithRawResponse(self)
|
311 | 315 | self.with_streaming_response = AsyncLlamaStackClientWithStreamedResponse(self)
|
312 | 316 |
|
@@ -411,96 +415,100 @@ class LlamaStackClientWithRawResponse:
|
411 | 415 | def __init__(self, client: LlamaStackClient) -> None:
|
412 | 416 | self.agents = resources.AgentsResourceWithRawResponse(client.agents)
|
413 | 417 | self.batch_inferences = resources.BatchInferencesResourceWithRawResponse(client.batch_inferences)
|
414 |
| - self.datasets = resources.DatasetsResourceWithRawResponse(client.datasets) |
415 |
| - self.evaluate = resources.EvaluateResourceWithRawResponse(client.evaluate) |
416 |
| - self.evaluations = resources.EvaluationsResourceWithRawResponse(client.evaluations) |
417 | 418 | self.inspect = resources.InspectResourceWithRawResponse(client.inspect)
|
418 | 419 | self.inference = resources.InferenceResourceWithRawResponse(client.inference)
|
419 | 420 | self.memory = resources.MemoryResourceWithRawResponse(client.memory)
|
420 | 421 | self.memory_banks = resources.MemoryBanksResourceWithRawResponse(client.memory_banks)
|
| 422 | + self.datasets = resources.DatasetsResourceWithRawResponse(client.datasets) |
421 | 423 | self.models = resources.ModelsResourceWithRawResponse(client.models)
|
422 | 424 | self.post_training = resources.PostTrainingResourceWithRawResponse(client.post_training)
|
423 | 425 | self.providers = resources.ProvidersResourceWithRawResponse(client.providers)
|
424 |
| - self.reward_scoring = resources.RewardScoringResourceWithRawResponse(client.reward_scoring) |
425 | 426 | self.routes = resources.RoutesResourceWithRawResponse(client.routes)
|
426 | 427 | self.safety = resources.SafetyResourceWithRawResponse(client.safety)
|
427 | 428 | self.shields = resources.ShieldsResourceWithRawResponse(client.shields)
|
428 | 429 | self.synthetic_data_generation = resources.SyntheticDataGenerationResourceWithRawResponse(
|
429 | 430 | client.synthetic_data_generation
|
430 | 431 | )
|
431 | 432 | self.telemetry = resources.TelemetryResourceWithRawResponse(client.telemetry)
|
| 433 | + self.datasetio = resources.DatasetioResourceWithRawResponse(client.datasetio) |
| 434 | + self.scoring = resources.ScoringResourceWithRawResponse(client.scoring) |
| 435 | + self.scoring_functions = resources.ScoringFunctionsResourceWithRawResponse(client.scoring_functions) |
| 436 | + self.eval = resources.EvalResourceWithRawResponse(client.eval) |
432 | 437 |
|
433 | 438 |
|
434 | 439 | class AsyncLlamaStackClientWithRawResponse:
|
435 | 440 | def __init__(self, client: AsyncLlamaStackClient) -> None:
|
436 | 441 | self.agents = resources.AsyncAgentsResourceWithRawResponse(client.agents)
|
437 | 442 | self.batch_inferences = resources.AsyncBatchInferencesResourceWithRawResponse(client.batch_inferences)
|
438 |
| - self.datasets = resources.AsyncDatasetsResourceWithRawResponse(client.datasets) |
439 |
| - self.evaluate = resources.AsyncEvaluateResourceWithRawResponse(client.evaluate) |
440 |
| - self.evaluations = resources.AsyncEvaluationsResourceWithRawResponse(client.evaluations) |
441 | 443 | self.inspect = resources.AsyncInspectResourceWithRawResponse(client.inspect)
|
442 | 444 | self.inference = resources.AsyncInferenceResourceWithRawResponse(client.inference)
|
443 | 445 | self.memory = resources.AsyncMemoryResourceWithRawResponse(client.memory)
|
444 | 446 | self.memory_banks = resources.AsyncMemoryBanksResourceWithRawResponse(client.memory_banks)
|
| 447 | + self.datasets = resources.AsyncDatasetsResourceWithRawResponse(client.datasets) |
445 | 448 | self.models = resources.AsyncModelsResourceWithRawResponse(client.models)
|
446 | 449 | self.post_training = resources.AsyncPostTrainingResourceWithRawResponse(client.post_training)
|
447 | 450 | self.providers = resources.AsyncProvidersResourceWithRawResponse(client.providers)
|
448 |
| - self.reward_scoring = resources.AsyncRewardScoringResourceWithRawResponse(client.reward_scoring) |
449 | 451 | self.routes = resources.AsyncRoutesResourceWithRawResponse(client.routes)
|
450 | 452 | self.safety = resources.AsyncSafetyResourceWithRawResponse(client.safety)
|
451 | 453 | self.shields = resources.AsyncShieldsResourceWithRawResponse(client.shields)
|
452 | 454 | self.synthetic_data_generation = resources.AsyncSyntheticDataGenerationResourceWithRawResponse(
|
453 | 455 | client.synthetic_data_generation
|
454 | 456 | )
|
455 | 457 | self.telemetry = resources.AsyncTelemetryResourceWithRawResponse(client.telemetry)
|
| 458 | + self.datasetio = resources.AsyncDatasetioResourceWithRawResponse(client.datasetio) |
| 459 | + self.scoring = resources.AsyncScoringResourceWithRawResponse(client.scoring) |
| 460 | + self.scoring_functions = resources.AsyncScoringFunctionsResourceWithRawResponse(client.scoring_functions) |
| 461 | + self.eval = resources.AsyncEvalResourceWithRawResponse(client.eval) |
456 | 462 |
|
457 | 463 |
|
458 | 464 | class LlamaStackClientWithStreamedResponse:
|
459 | 465 | def __init__(self, client: LlamaStackClient) -> None:
|
460 | 466 | self.agents = resources.AgentsResourceWithStreamingResponse(client.agents)
|
461 | 467 | self.batch_inferences = resources.BatchInferencesResourceWithStreamingResponse(client.batch_inferences)
|
462 |
| - self.datasets = resources.DatasetsResourceWithStreamingResponse(client.datasets) |
463 |
| - self.evaluate = resources.EvaluateResourceWithStreamingResponse(client.evaluate) |
464 |
| - self.evaluations = resources.EvaluationsResourceWithStreamingResponse(client.evaluations) |
465 | 468 | self.inspect = resources.InspectResourceWithStreamingResponse(client.inspect)
|
466 | 469 | self.inference = resources.InferenceResourceWithStreamingResponse(client.inference)
|
467 | 470 | self.memory = resources.MemoryResourceWithStreamingResponse(client.memory)
|
468 | 471 | self.memory_banks = resources.MemoryBanksResourceWithStreamingResponse(client.memory_banks)
|
| 472 | + self.datasets = resources.DatasetsResourceWithStreamingResponse(client.datasets) |
469 | 473 | self.models = resources.ModelsResourceWithStreamingResponse(client.models)
|
470 | 474 | self.post_training = resources.PostTrainingResourceWithStreamingResponse(client.post_training)
|
471 | 475 | self.providers = resources.ProvidersResourceWithStreamingResponse(client.providers)
|
472 |
| - self.reward_scoring = resources.RewardScoringResourceWithStreamingResponse(client.reward_scoring) |
473 | 476 | self.routes = resources.RoutesResourceWithStreamingResponse(client.routes)
|
474 | 477 | self.safety = resources.SafetyResourceWithStreamingResponse(client.safety)
|
475 | 478 | self.shields = resources.ShieldsResourceWithStreamingResponse(client.shields)
|
476 | 479 | self.synthetic_data_generation = resources.SyntheticDataGenerationResourceWithStreamingResponse(
|
477 | 480 | client.synthetic_data_generation
|
478 | 481 | )
|
479 | 482 | self.telemetry = resources.TelemetryResourceWithStreamingResponse(client.telemetry)
|
| 483 | + self.datasetio = resources.DatasetioResourceWithStreamingResponse(client.datasetio) |
| 484 | + self.scoring = resources.ScoringResourceWithStreamingResponse(client.scoring) |
| 485 | + self.scoring_functions = resources.ScoringFunctionsResourceWithStreamingResponse(client.scoring_functions) |
| 486 | + self.eval = resources.EvalResourceWithStreamingResponse(client.eval) |
480 | 487 |
|
481 | 488 |
|
482 | 489 | class AsyncLlamaStackClientWithStreamedResponse:
|
483 | 490 | def __init__(self, client: AsyncLlamaStackClient) -> None:
|
484 | 491 | self.agents = resources.AsyncAgentsResourceWithStreamingResponse(client.agents)
|
485 | 492 | self.batch_inferences = resources.AsyncBatchInferencesResourceWithStreamingResponse(client.batch_inferences)
|
486 |
| - self.datasets = resources.AsyncDatasetsResourceWithStreamingResponse(client.datasets) |
487 |
| - self.evaluate = resources.AsyncEvaluateResourceWithStreamingResponse(client.evaluate) |
488 |
| - self.evaluations = resources.AsyncEvaluationsResourceWithStreamingResponse(client.evaluations) |
489 | 493 | self.inspect = resources.AsyncInspectResourceWithStreamingResponse(client.inspect)
|
490 | 494 | self.inference = resources.AsyncInferenceResourceWithStreamingResponse(client.inference)
|
491 | 495 | self.memory = resources.AsyncMemoryResourceWithStreamingResponse(client.memory)
|
492 | 496 | self.memory_banks = resources.AsyncMemoryBanksResourceWithStreamingResponse(client.memory_banks)
|
| 497 | + self.datasets = resources.AsyncDatasetsResourceWithStreamingResponse(client.datasets) |
493 | 498 | self.models = resources.AsyncModelsResourceWithStreamingResponse(client.models)
|
494 | 499 | self.post_training = resources.AsyncPostTrainingResourceWithStreamingResponse(client.post_training)
|
495 | 500 | self.providers = resources.AsyncProvidersResourceWithStreamingResponse(client.providers)
|
496 |
| - self.reward_scoring = resources.AsyncRewardScoringResourceWithStreamingResponse(client.reward_scoring) |
497 | 501 | self.routes = resources.AsyncRoutesResourceWithStreamingResponse(client.routes)
|
498 | 502 | self.safety = resources.AsyncSafetyResourceWithStreamingResponse(client.safety)
|
499 | 503 | self.shields = resources.AsyncShieldsResourceWithStreamingResponse(client.shields)
|
500 | 504 | self.synthetic_data_generation = resources.AsyncSyntheticDataGenerationResourceWithStreamingResponse(
|
501 | 505 | client.synthetic_data_generation
|
502 | 506 | )
|
503 | 507 | self.telemetry = resources.AsyncTelemetryResourceWithStreamingResponse(client.telemetry)
|
| 508 | + self.datasetio = resources.AsyncDatasetioResourceWithStreamingResponse(client.datasetio) |
| 509 | + self.scoring = resources.AsyncScoringResourceWithStreamingResponse(client.scoring) |
| 510 | + self.scoring_functions = resources.AsyncScoringFunctionsResourceWithStreamingResponse(client.scoring_functions) |
| 511 | + self.eval = resources.AsyncEvalResourceWithStreamingResponse(client.eval) |
504 | 512 |
|
505 | 513 |
|
506 | 514 | Client = LlamaStackClient
|
|
0 commit comments