diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock
index 5f9243c3..28b65901 100644
--- a/.speakeasy/gen.lock
+++ b/.speakeasy/gen.lock
@@ -1,19 +1,19 @@
lockVersion: 2.0.0
id: 2d045ec7-2ebb-4f4d-ad25-40953b132161
management:
- docChecksum: 55730589b8aa15c220d6d72eff876af4
+ docChecksum: 463d7bdb3f827922fff6e7a9e1dc09ec
docVersion: 1.0.0
speakeasyVersion: 1.754.0
generationVersion: 2.862.0
- releaseVersion: 2.1.3
- configChecksum: f9314a8eb94dfdb80621f196b6abfd9c
+ releaseVersion: 2.2.0rc1
+ configChecksum: c8c010d5fe5b11bf44e0ab048a2f7e9a
repoURL: https://github.com/mistralai/client-python.git
installationURL: https://github.com/mistralai/client-python.git
published: true
persistentEdits:
- generation_id: 8fcdbb7f-ada3-4a90-970d-e275c2dd2090
- pristine_commit_hash: c03ad2205c9bdefd33787d6b6d3ffc96b56c6f87
- pristine_tree_hash: 015ef7e1d24817483937f656f5f7a5d15ada1e8f
+ generation_id: ea079c2e-a12c-493c-8efb-a9762e991b50
+ pristine_commit_hash: e2352f93e359fc65c00ad6cc04c6e2167424ff5f
+ pristine_tree_hash: 60f2f179ff27fe8be3c50ff8dded0f32427ee36b
features:
python:
acceptHeaders: 3.0.0
@@ -24,6 +24,7 @@ features:
core: 6.0.18
customCodeRegions: 0.1.1
defaultEnabledRetries: 0.2.0
+ deprecations: 3.0.2
downloadStreams: 1.0.1
enumUnions: 0.1.1
envVarSecurityUsage: 0.3.2
@@ -41,6 +42,7 @@ features:
nameOverrides: 3.0.3
nullables: 1.0.2
openEnums: 1.0.4
+ pagination: 3.0.7
responseFormat: 1.1.0
retries: 3.0.4
sdkHooks: 1.2.1
@@ -71,6 +73,62 @@ trackedFiles:
id: 750ead4ce7ee
last_write_checksum: sha1:a3fa64f01329ca02fdd8d7a6470c3d2f051985e5
pristine_git_object: 615552c32013c702b3e7476b1691952b0a4a65df
+ docs/models/activitytaskcompletedattributesrequest.md:
+ id: 9509b84041b6
+ last_write_checksum: sha1:bf7116695175ef6664a86b84ebcaa2c095c55bf2
+ pristine_git_object: 5050b558d58657f0869d3140e3949b9cca2afcfb
+ docs/models/activitytaskcompletedattributesresponse.md:
+ id: 972b526f8415
+ last_write_checksum: sha1:d04ffa5085fcf5e440a922bdb130035b86277ff1
+ pristine_git_object: 5baaffa6840cb04dbb8eb2379fcea08c96826455
+ docs/models/activitytaskcompletedrequest.md:
+ id: fc911027e2e5
+ last_write_checksum: sha1:3897f394900cb847c6efcf67beff158d330035a9
+ pristine_git_object: 9008bbf1e50e37546029205d756e5ef8493724d8
+ docs/models/activitytaskcompletedresponse.md:
+ id: 6aade3f8c26c
+ last_write_checksum: sha1:944b923aa932452372abebfe6d61ab739555109f
+ pristine_git_object: 56a646311c1f9f7bb9dcd8187c9e4f937f19e6cb
+ docs/models/activitytaskfailedattributes.md:
+ id: a69d5f4b0b99
+ last_write_checksum: sha1:e5371dae85ec79f485b5e3afc1ac8c29a2424163
+ pristine_git_object: 5cb9cafe9ce235ff996c3f1529babedfc6ebe239
+ docs/models/activitytaskfailedrequest.md:
+ id: 7f9c3fa2ddf7
+ last_write_checksum: sha1:2cf9ff1213584c3a20993eb2e29175f5ee528a90
+ pristine_git_object: 1db1d90a44ba364f9d64e15c1d4663e549be9701
+ docs/models/activitytaskfailedresponse.md:
+ id: 325e821a4d2f
+ last_write_checksum: sha1:165eb54c023c8d29fa1093542cf62b69cbc4657b
+ pristine_git_object: 0853f68d3413dead9b67b3183b287a08f9339faa
+ docs/models/activitytaskretryingattributes.md:
+ id: 4d2e278266f7
+ last_write_checksum: sha1:a79f27b4272efab1378a0f2d7de12939dfb7a933
+ pristine_git_object: 9216607678cc149d446bf30ba92a6cf9b3d4d3d2
+ docs/models/activitytaskretryingrequest.md:
+ id: fe8bf3783cda
+ last_write_checksum: sha1:b6f5c9dc5bc15ca1d5d7fd85f5541657c803fe59
+ pristine_git_object: fb6e27b5cb4585f2496edea5cf060df59cb7495f
+ docs/models/activitytaskretryingresponse.md:
+ id: ca0fb7b72379
+ last_write_checksum: sha1:0ede54e73f1c5c77e585e8c9f3892c626bede7cf
+ pristine_git_object: 8b7bdd42e45eb8b3c5ad33a2de5ac2d15d8b3453
+ docs/models/activitytaskstartedattributesrequest.md:
+ id: a33e1e21c741
+ last_write_checksum: sha1:caa2e31f0a6fdc73de5df68136a585dfe7f18d93
+ pristine_git_object: e264b20b3e50d78a37f15bcd5bbf7dd207a87ac4
+ docs/models/activitytaskstartedattributesresponse.md:
+ id: b0d2e886ad19
+ last_write_checksum: sha1:81ddc501fe7c04dc2313ef93ec5897a806934798
+ pristine_git_object: 5d1f59a838412b1fc863dc51f4269548c760e584
+ docs/models/activitytaskstartedrequest.md:
+ id: 29ea1621e782
+ last_write_checksum: sha1:d6aa2804105baceb53fa31a5ab2ff15d3f20a371
+ pristine_git_object: 02421624f37cfefbb8544a2b207b0b0fdd421a30
+ docs/models/activitytaskstartedresponse.md:
+ id: c25113506e99
+ last_write_checksum: sha1:3cd53ec4c3c8bdc389c55cf527c960b3b8a91c30
+ pristine_git_object: 1aee1533fc92116e79a13b338dd6e6e74fcab800
docs/models/agent.md:
id: ffdbb4c53c87
last_write_checksum: sha1:17c0a4bcfb18b9db99bb3a783d275a046b66bb78
@@ -243,6 +301,10 @@ trackedFiles:
id: 133f4af8058f
last_write_checksum: sha1:95fa73ebd765cbd244c847218df6d31e18dc5e85
pristine_git_object: 276656d1d00ca174e78aa9102f7f576575daa818
+ docs/models/archiveworkflowv1workflowsworkflowidentifierarchiveputrequest.md:
+ id: 782799cd1e0b
+ last_write_checksum: sha1:c4e29a286d05af9de794a7b955dd5443c8ee58bc
+ pristine_git_object: 6ebe540af990f5a52dbd236e6d29fd138a2fae22
docs/models/arguments.md:
id: 7ea5e33709a7
last_write_checksum: sha1:09eea126210d7fd0353e60a76bf1dbed173f13ec
@@ -301,8 +363,12 @@ trackedFiles:
pristine_git_object: f8401a8e26c3d54aa47a1a6cc265050f0a4afc80
docs/models/basefielddefinition.md:
id: f9e0bbae859b
- last_write_checksum: sha1:28a4d2774231873af97debedcf2fba4f49e83bf2
- pristine_git_object: 3f7abea981c142e1fb1ba993a10dbd0e347b2df2
+ last_write_checksum: sha1:fea96b146e5696ff3b93e6b6529fe4ace90616aa
+ pristine_git_object: 3d721d91572e4cdb8d5de5a2cdd294693f552d73
+ docs/models/basefielddefinitiontype.md:
+ id: e0678603f859
+ last_write_checksum: sha1:4c79afbbc22081d7ac18c3de963b92dd2dca0069
+ pristine_git_object: efdff2a4bf26336c8f1880683fb2d798ee2d630d
docs/models/basemodelcard.md:
id: 2f62bfbd650e
last_write_checksum: sha1:d42dfe36b103ee95f55e91ac306e3179060dfb3f
@@ -315,6 +381,18 @@ trackedFiles:
id: 8053e29a3f26
last_write_checksum: sha1:23a12dc2e95f92a7a3691bd65a1b05012c669f0f
pristine_git_object: 95016cdc4c6225d23edc4436e11e4a7feacf1fe6
+ docs/models/batchexecutionbody.md:
+ id: eb5f0e359ea6
+ last_write_checksum: sha1:1501d090c7bcb631f0c4ec0e07417eea8277d632
+ pristine_git_object: 9c31e334d7b44a1fea834f14f84945ea69d4a55b
+ docs/models/batchexecutionresponse.md:
+ id: 77f8c97e0418
+ last_write_checksum: sha1:3dc72435fc7dcf31172ab0f4a5301b25e950fda2
+ pristine_git_object: 394b6d86517e1339bbab6a629289b52cbc237684
+ docs/models/batchexecutionresult.md:
+ id: a04ce80759e1
+ last_write_checksum: sha1:c595f3e603609a9e676bf253f3935a74f9bed27a
+ pristine_git_object: 57107bbaa5a544f473a1e5d268ba7ed89dd27ebd
docs/models/batchjob.md:
id: de2a00d0f739
last_write_checksum: sha1:1160822c4032e1745dfaf37abcac02e78cbc4fb4
@@ -339,6 +417,10 @@ trackedFiles:
id: cc1272bc909c
last_write_checksum: sha1:291f6152431f3f14c16df9005a4392907dbf03e2
pristine_git_object: 1f2a7a365ac4a3811ccf9760e56eabbf9106638d
+ docs/models/cancelworkflowexecutionv1workflowsexecutionsexecutionidcancelpostrequest.md:
+ id: 69e393daf019
+ last_write_checksum: sha1:51a5c5fa815093aa1e91ec0f41567066b025abf3
+ pristine_git_object: 4968d664d1f4d34c300bc6a6dec18c992652999f
docs/models/chatclassificationrequest.md:
id: 57b86771c870
last_write_checksum: sha1:bfd2fb8e2c83578ca0cea5209ea3f18c3bcd2ae5
@@ -445,8 +527,8 @@ trackedFiles:
pristine_git_object: f3b10727b023dd83a207d955b3d0f3cd4b7479a1
docs/models/classifierfinetunedmodel.md:
id: b67a370e0ef1
- last_write_checksum: sha1:5fe3c26e337083716dd823e861924a03c55ce293
- pristine_git_object: ad05f93147d6904ee62602480c24644ec5e4cf63
+ last_write_checksum: sha1:d2fee635b135b4728a7966dd62c7ddea3f2d0c1c
+ pristine_git_object: 9fa69cbb27a53456fd359ba8650d3510264926b9
docs/models/classifierfinetuningjob.md:
id: 5bf35c25183f
last_write_checksum: sha1:afedddfe38e217189b5ec12ded74606c3b1e4c59
@@ -505,8 +587,8 @@ trackedFiles:
pristine_git_object: 7a66e8fee2bb0f1c58166177653893bb05b98f1d
docs/models/completionfinetunedmodel.md:
id: 23a7705a9c89
- last_write_checksum: sha1:50d173b7505a97435c9d7ccb4fa99af04a51c6a2
- pristine_git_object: 0055db021f1c039c84cf7cfecd654683d2f9996f
+ last_write_checksum: sha1:43b1d576afd2da43e8a97f374b09c6a5ed236ec1
+ pristine_git_object: 26bcdb1700c8e8ce52c93da302e7c05e97935091
docs/models/completionfinetuningjob.md:
id: 13c69dd18690
last_write_checksum: sha1:b77e82f00f851034999986ff67aea5b0b558fbd2
@@ -565,8 +647,8 @@ trackedFiles:
pristine_git_object: 9ef7a35154327673593f323a142fe63ec706e799
docs/models/connectorcalltoolv1request.md:
id: a7a294c4280c
- last_write_checksum: sha1:296a9c31a7b9dc7621b871000f17f6ced3d3a288
- pristine_git_object: cdda08cf9a0c30e7b2e8e1017cd633472ea398c7
+ last_write_checksum: sha1:0d8983932c2f4eb6f19e7a81c1e76eeb7ee9bb2f
+ pristine_git_object: dea7083dabf1fe9f5470a5b13d6e65ae234d8aa8
docs/models/connectordeletev1request.md:
id: 8d5621ba6395
last_write_checksum: sha1:881b88b2e0788f7c16938115a6cada5f0ebe144b
@@ -807,6 +889,98 @@ trackedFiles:
id: 7bcc77607afa
last_write_checksum: sha1:ce9e8ffac3f83e08269fbed5d2dffbfeb9f8649a
pristine_git_object: 0a0b69217abb0fbcbe30fad32c239aea070d3740
+ docs/models/customtaskcanceledattributes.md:
+ id: 5a2768dda295
+ last_write_checksum: sha1:0a1df1ffa17cd0857bf3e0ac884406d7dfeb07f0
+ pristine_git_object: 3a5facdd3be90098f0f22bcf05215472cf2d4ec9
+ docs/models/customtaskcanceledrequest.md:
+ id: d6ef1bfd85d9
+ last_write_checksum: sha1:c03f0e55f8a90476dea366bf3246a458cab76734
+ pristine_git_object: 4a313ab15cc92a231b9ae3361a745e1ad0c3f767
+ docs/models/customtaskcanceledresponse.md:
+ id: 79e4f2bbf720
+ last_write_checksum: sha1:c65ddfa80c6bd5ce96fcd0de194f1dbb68423dea
+ pristine_git_object: 0bcee2279755656a624d626acea0a157fa3b381c
+ docs/models/customtaskcompletedattributesrequest.md:
+ id: 841c933a0d4d
+ last_write_checksum: sha1:11ecb1aa3f1be3c38ea6e42e5c47a2c714800d0c
+ pristine_git_object: 43671ef9518e80fde77ee2023c2caf282388b0f9
+ docs/models/customtaskcompletedattributesresponse.md:
+ id: c12961b43e1c
+ last_write_checksum: sha1:772f6aa0610241727b090426384fcf883fa919cb
+ pristine_git_object: a6a136e75a0625d155eb57808fc6f967dc10cc47
+ docs/models/customtaskcompletedrequest.md:
+ id: 7591787e3f54
+ last_write_checksum: sha1:d4d3e2e6f96a194a0988b9f264f739c961d0b03b
+ pristine_git_object: 90ec92a736e9c6edb95871cda81c49b74492e446
+ docs/models/customtaskcompletedresponse.md:
+ id: 81d843216312
+ last_write_checksum: sha1:0439d35a428e5d8d321da8b710ce72c188802085
+ pristine_git_object: 6871184f3d850a628ed71cbef9c98ec3e742dbfe
+ docs/models/customtaskfailedattributes.md:
+ id: cd0cdf6bd8d8
+ last_write_checksum: sha1:71cf249d1b1bd2df1cf3ec4426da9b8c9bb82c37
+ pristine_git_object: f4e3ab2d57ec9d447c3570e6832f911c93c94f96
+ docs/models/customtaskfailedrequest.md:
+ id: 59a1820fceef
+ last_write_checksum: sha1:6d6edbc9e05d3d76a0b42501777d61a367ea5a26
+ pristine_git_object: ddfd79a608ba60d960defd5909492fd941233d1e
+ docs/models/customtaskfailedresponse.md:
+ id: 16c047b0f70f
+ last_write_checksum: sha1:639353633a2fd6f383896673750338235a1c442e
+ pristine_git_object: e14e4afa4b1fe76d10a2f8505ce88e9a828d845e
+ docs/models/customtaskinprogressattributesrequest.md:
+ id: 44d737ed33c5
+ last_write_checksum: sha1:5eca2ef7c833740ac20e48e8e967fa25a8678033
+ pristine_git_object: 6241f13a2b45926044aa71636c4a2c2e18decacb
+ docs/models/customtaskinprogressattributesrequestpayload.md:
+ id: 0452df61c52a
+ last_write_checksum: sha1:b347ebe1758df4bfc98db6e73cca8c44ac596320
+ pristine_git_object: 776898babb4af16af124e4103e17651f9d75d308
+ docs/models/customtaskinprogressattributesresponse.md:
+ id: 8482e22b58c4
+ last_write_checksum: sha1:1716403748b86df8b8c78dd60c34098130789a0b
+ pristine_git_object: 311e4fb5215ae3cbf1617f5049abc3318237e615
+ docs/models/customtaskinprogressattributesresponsepayload.md:
+ id: 95c1fda66b5d
+ last_write_checksum: sha1:48304d42e556419f50e7f294f9124f0e6175194b
+ pristine_git_object: 96e460227d2a9eba0da11aca8d03b944695b94d0
+ docs/models/customtaskinprogressrequest.md:
+ id: 794c9c4840e2
+ last_write_checksum: sha1:fc9d8ed5220d423fb1bb1a6f418e2c50cbd3da28
+ pristine_git_object: 4e32436301d575da50f1c9833f46606b45906b07
+ docs/models/customtaskinprogressresponse.md:
+ id: a181c7602b7f
+ last_write_checksum: sha1:43caaf16e8ef772ab4a4b1f2cd926a59c87b627f
+ pristine_git_object: 72afa91fb2baadd7dd8cf32aad2b2812316a6953
+ docs/models/customtaskstartedattributesrequest.md:
+ id: 402fc0112fce
+ last_write_checksum: sha1:d23c2c717dcad52fc6e2afaa88853df95f7e066f
+ pristine_git_object: ff8d50ece8f2d89fe7158210f5954f7c475a0bf8
+ docs/models/customtaskstartedattributesresponse.md:
+ id: 272cf75ac5d3
+ last_write_checksum: sha1:cd16d650b11898966e3950305b3ca2cf035fc6d5
+ pristine_git_object: 8bf43423f0dbba2d89429e4d98f7ae1ac1c460b1
+ docs/models/customtaskstartedrequest.md:
+ id: 7735ab9faa24
+ last_write_checksum: sha1:c531b566a1bd31ce7f1118043ce20c1616b2eb7c
+ pristine_git_object: aa4dc293d80e71142e1c3594b6a06f3d812aa865
+ docs/models/customtaskstartedresponse.md:
+ id: 6ccd9fe137bc
+ last_write_checksum: sha1:031eac9a01bc24b36a9cf2eb4ea864c89ba709e5
+ pristine_git_object: 80b1b08b02b219ef714e44d0853df4c992db84eb
+ docs/models/customtasktimedoutattributes.md:
+ id: b941e1c031db
+ last_write_checksum: sha1:db82046ed2aa47a5c206a9e5cea7f7ab066a3329
+ pristine_git_object: b302b07666ad0b807990d42d6ef67a3c9c213bc8
+ docs/models/customtasktimedoutrequest.md:
+ id: 45967446735d
+ last_write_checksum: sha1:389f1926ae5168214d774bf9aa915bef1cac2a62
+ pristine_git_object: 5be6c0af5c5363a812fe417f56eeb9527961f0ef
+ docs/models/customtasktimedoutresponse.md:
+ id: b4f8e7f006e7
+ last_write_checksum: sha1:9b167994685d056179122eeed0df39c6b52c9cf4
+ pristine_git_object: 75d497434c80a29a3010c451fb6b94d32d213863
docs/models/dataset.md:
id: 31af4d237e19
last_write_checksum: sha1:073fa9a0b7891ccd9b6ae64ba960d74eeb5c9007
@@ -823,6 +997,10 @@ trackedFiles:
id: 449ed69d3872
last_write_checksum: sha1:55a649fc627e297da420482599680da87989c297
pristine_git_object: dbc7c3d0046af0423eebab4d0d982e55a3ed1d38
+ docs/models/deletebatchjobresponse.md:
+ id: 077483bb3832
+ last_write_checksum: sha1:2789ca6f7663b82731201d9439d949c8dfd1077c
+ pristine_git_object: d0df2d193f18773ba758545127e0322b76a7b6cf
docs/models/deletecampaignv1observabilitycampaignscampaigniddeleterequest.md:
id: "853118278484"
last_write_checksum: sha1:b8dc39c31de60ed218a063efd7e8f07fcbb1c63f
@@ -867,6 +1045,22 @@ trackedFiles:
id: 7307bedc8733
last_write_checksum: sha1:a1211b8cb576ad1358e68983680ee326c3920a5e
pristine_git_object: 8142772d7ea33ad8a75cf9cf822564ba3f630de2
+ docs/models/deploymentdetailresponse.md:
+ id: 712f7bbc8cc0
+ last_write_checksum: sha1:b346373ba31e99b0238582297011c8f2f10d2f13
+ pristine_git_object: e1d84e11532acb21ff4d083e704bbca68385ebf3
+ docs/models/deploymentlistresponse.md:
+ id: 45a11caf1008
+ last_write_checksum: sha1:aa393b5a879506970c6d27342168d8f5f50d700c
+ pristine_git_object: 22868512ffd2c564c8a8ab02fbb9f324d2ecf013
+ docs/models/deploymentresponse.md:
+ id: a9da842850c4
+ last_write_checksum: sha1:073ef3db8e9709464950891a311243f9bf96c28d
+ pristine_git_object: f6f12836457eb150c477d37d7325238dac25ded5
+ docs/models/deploymentworkerresponse.md:
+ id: 07cd9c89237a
+ last_write_checksum: sha1:7da6e9f5df0b2d81ae899b28a3c5cdb9fbd2b0d3
+ pristine_git_object: 9b76278e391626a3a8ab3607def03730f169215b
docs/models/document.md:
id: cd1d2a444370
last_write_checksum: sha1:c10641b02547bedcc982b8997097083dfc562598
@@ -915,6 +1109,10 @@ trackedFiles:
id: 89b078acdc42
last_write_checksum: sha1:e3e9200948f864382e0ecd3e04240b13d013141a
pristine_git_object: 20b50618ac99c63f7cf57fe4377840bfc1f85823
+ docs/models/encodedpayloadoptions.md:
+ id: 033bd880796d
+ last_write_checksum: sha1:49223cf4218c03c0615357040dc31149214fa67a
+ pristine_git_object: 15367ce6382bcfb3bb090620877719fac93438fe
docs/models/encodingformat.md:
id: 066e154e4d43
last_write_checksum: sha1:cc98abdb803d374146f58a6811c9e3f2b58ff5f3
@@ -931,6 +1129,26 @@ trackedFiles:
id: 311c22a8574a
last_write_checksum: sha1:627793d6aed5e378e3f2eeb4087808eb50e948d5
pristine_git_object: 3eebffca874b8614a5be3d75be3cb7b0e52c2339
+ docs/models/eventprogressstatus.md:
+ id: af5980e18511
+ last_write_checksum: sha1:83557558fb755cbdba7b8cfce5610f2f13f69a58
+ pristine_git_object: 02033178750aea0683ffac8323b94d8caa7d07a1
+ docs/models/eventsource.md:
+ id: 0a07100b20e9
+ last_write_checksum: sha1:687dd7c7a4e01c08c13aacc0709e876179f18da3
+ pristine_git_object: ac53ccf7936c807910c335a52a7cde6549982d0f
+ docs/models/eventtype.md:
+ id: bfe6f1477d8b
+ last_write_checksum: sha1:7932d1d96baca16e3858c2be7c35d406fafe9ebf
+ pristine_git_object: 6add793e64b13b0a6adbd78f7ee165ef7d91c26f
+ docs/models/executeworkflowregistrationv1workflowsregistrationsworkflowregistrationidexecutepostrequest.md:
+ id: 33e258603a49
+ last_write_checksum: sha1:f156d7d837872e5b33f19b385a650a547ad39436
+ pristine_git_object: 54de12a2b578e1d81e2b16d6bf27d2fb865014f7
+ docs/models/executeworkflowv1workflowsworkflowidentifierexecutepostrequest.md:
+ id: e4a5c891d180
+ last_write_checksum: sha1:d47a7d1b9d89462c346c54edd7c0dcbbeae25765
+ pristine_git_object: 29c55a770e232a65565b342ea700e53ab36318cb
docs/models/executionconfig.md:
id: c96b31c33dcd
last_write_checksum: sha1:971187596dde6a53f9e7f4c26cb0f37d5cbafb40
@@ -943,6 +1161,10 @@ trackedFiles:
id: 16660f92d7d1
last_write_checksum: sha1:fa22e53a929291e6b057283482ca7871fb6d2062
pristine_git_object: efe4bbb058c168ad531be27cc135f09464d0da13
+ docs/models/failure.md:
+ id: 3f79c7d64eac
+ last_write_checksum: sha1:344f1cea9b786a399a0dd974d4df010714031b2a
+ pristine_git_object: 259d7bcf2d03d3f26e4a760e92eeb5c039e4aee8
docs/models/feedresultchatcompletioneventpreview.md:
id: 5ed9f0e8db01
last_write_checksum: sha1:c5950602d174d49f293cb85047d871d360e6af16
@@ -1069,8 +1291,8 @@ trackedFiles:
pristine_git_object: 36d3db18679569d21e6bacfe13bcd22715372297
docs/models/ftmodelcard.md:
id: 15ed6f94deea
- last_write_checksum: sha1:5873780bc2a828f1164818eb9c64c3f69c6618cc
- pristine_git_object: f65ff6e888a460ac5f3ad3a4556ccdd8366960c0
+ last_write_checksum: sha1:8e1383e26d86fd247fa1762dbb0508ceed932a49
+ pristine_git_object: 0381dd81d0460af3aacdf81a495c304ed9b78451
docs/models/function.md:
id: 416a80fba031
last_write_checksum: sha1:a9485076d430a7753558461ce87bf42d09e34511
@@ -1167,6 +1389,10 @@ trackedFiles:
id: c8c1559b454f
last_write_checksum: sha1:98b580deaae5c4720cf905907db49e966b5e294a
pristine_git_object: 073ab76999d4f1b8b0b6b89feacc5137b156c4e5
+ docs/models/getdeploymentv1workflowsdeploymentsnamegetrequest.md:
+ id: 655b59bc906d
+ last_write_checksum: sha1:2a98869f76afaf5be8d93482613027600f0d4039
+ pristine_git_object: 6af056b16a647cf9e4e541cf433cf5ee2d3a8a80
docs/models/getfileresponse.md:
id: a983b3c8acd6
last_write_checksum: sha1:643a3c92ce26f21a915bd485fc4af7817e79f864
@@ -1179,6 +1405,14 @@ trackedFiles:
id: 5a7a03200f1f
last_write_checksum: sha1:f9bbebd7b36957b6d9807063f2926b4a37c73a7e
pristine_git_object: 154ece82c1932053d4764d7d8fb2ab0f394027b2
+ docs/models/getrunhistoryv1workflowsrunsrunidhistorygetrequest.md:
+ id: d0010e389ace
+ last_write_checksum: sha1:656aedb84d30a4f532ed0e2247f5424023fb4fe2
+ pristine_git_object: 8d4af7322e4ebe359216fb794c7209c3dc624d69
+ docs/models/getrunv1workflowsrunsrunidgetrequest.md:
+ id: ac9de7c67a5f
+ last_write_checksum: sha1:40fa492321f18e77792d49cb9142b0293be7ffe3
+ pristine_git_object: 6b03369064e2bb2fedffa606d8bff20d9fceae00
docs/models/getsignedurlresponse.md:
id: 5539e5d7c3d4
last_write_checksum: sha1:7198474f48bfba6d47326cd436e4a00a8ba70ce3
@@ -1187,6 +1421,14 @@ trackedFiles:
id: 38d58bb7d102
last_write_checksum: sha1:63b80dff98aa4fc959f71d1e41faa0eeec4801aa
pristine_git_object: cf276b5e4297eec4d5d4c6996fde7144d54dd8c0
+ docs/models/getstreameventsv1workflowseventsstreamgetrequest.md:
+ id: c4f4986768d9
+ last_write_checksum: sha1:e3c15d6f54d2d24040eb0d7cf76e435b50d04965
+ pristine_git_object: 9b5ae17446b4b959c7ab47751921313864ddaf2e
+ docs/models/getstreameventsv1workflowseventsstreamgetresponsebody.md:
+ id: 5d958d2ebde1
+ last_write_checksum: sha1:58ffec3d780d35a688634d273588e0209afd4f72
+ pristine_git_object: 18f9b63939e1110145d3201d45b584a0c155a964
docs/models/getvoicesampleaudiov1audiovoicesvoiceidsamplegetrequest.md:
id: fa1b72f3aa6b
last_write_checksum: sha1:10b8897bf884ac876559ef574a76bee6df3b7afb
@@ -1195,6 +1437,54 @@ trackedFiles:
id: ff2e9c3966c2
last_write_checksum: sha1:83baeac8591dbf035dd1c04fbe9c06e0e65afcd8
pristine_git_object: 09caf65c1d4f3415ccf2a1095a54748a7f6ca2df
+ docs/models/getworkfloweventsv1workflowseventslistgetrequest.md:
+ id: cb48ac6b32b0
+ last_write_checksum: sha1:7194b7cd11b927fefdf242e651d8cdc705680f30
+ pristine_git_object: 5594402ce103f05e902b33c465fbaa225a3e5eb6
+ docs/models/getworkflowexecutionhistoryv1workflowsexecutionsexecutionidhistorygetrequest.md:
+ id: 7524ddab7997
+ last_write_checksum: sha1:b47a3b47cee2e582a58d2504511c37266bb3b0b3
+ pristine_git_object: dc63ca0a3d798c3be90f3b126357e239edb34806
+ docs/models/getworkflowexecutiontraceeventsrequest.md:
+ id: 6722ae5f7115
+ last_write_checksum: sha1:2764ba45e55fe648e50f8d397826cebbf3a3dced
+ pristine_git_object: 4a1d8021877186295dcdaedea6a40429267f0668
+ docs/models/getworkflowexecutiontraceotelrequest.md:
+ id: b74d60dc0f0a
+ last_write_checksum: sha1:48145598f7e45876a30bf3018a7da8c1bdfacaab
+ pristine_git_object: 8c4f9be70233a49cd0d74051a44756e030c2bc1a
+ docs/models/getworkflowexecutiontracesummaryrequest.md:
+ id: 692ff7296f7d
+ last_write_checksum: sha1:208b98b98c00754861017b97072bd2a321f1047b
+ pristine_git_object: a4524bbcf83fd9df132fbcb3cf983d3a72e998a1
+ docs/models/getworkflowexecutionv1workflowsexecutionsexecutionidgetrequest.md:
+ id: 24a42f780b20
+ last_write_checksum: sha1:9f3215cad08d84b5888d5cd30011bed0c37adfe3
+ pristine_git_object: f9566f1c0050c4e66f5985761b6cbc56ec0a73cd
+ docs/models/getworkflowmetricsv1workflowsworkflownamemetricsgetrequest.md:
+ id: bf5300598ee8
+ last_write_checksum: sha1:719dbcd2d7c28d159741ed3c64876c31f82c2691
+ pristine_git_object: a6e623dd847551f7d96b4b90ef8e9d504529112a
+ docs/models/getworkflowregistrationsv1workflowsregistrationsgetrequest.md:
+ id: 4b699772bee5
+ last_write_checksum: sha1:db3928c8dd78fd1d16dbd0a7d667a92036785f2e
+ pristine_git_object: ee72444555a1303163849f32eeee568c7118698e
+ docs/models/getworkflowregistrationv1workflowsregistrationsworkflowregistrationidgetrequest.md:
+ id: 597766b7d51b
+ last_write_checksum: sha1:5c58fbed3af8d08dd94eaba0ae8ed41d61fb6c52
+ pristine_git_object: ec43ad56a6698eaec7a89e015f42154a7a97dea8
+ docs/models/getworkflowsv1workflowsgetrequest.md:
+ id: e61c93d9ecc7
+ last_write_checksum: sha1:3285ea1da5f7e05bc3d15885267d25d2968cf930
+ pristine_git_object: c83ed6d1d0028fcb008b46234767e4a62fbd7c20
+ docs/models/getworkflowsv1workflowsgetresponse.md:
+ id: a6375877d4dd
+ last_write_checksum: sha1:26401f0edf76e787a8ca780eb97724a4c6ca37a7
+ pristine_git_object: 6ec4f4e79cbc377f6b1eb4fff720425cda6b731e
+ docs/models/getworkflowv1workflowsworkflowidentifiergetrequest.md:
+ id: ddc86b6b1bf9
+ last_write_checksum: sha1:63f5d6e78e90572125d1b366ccc0c75a897210e7
+ pristine_git_object: 2f5c2b4dc84095a1b00bac6c728c86dd1f80c2b2
docs/models/githubrepository.md:
id: 66c120df624b
last_write_checksum: sha1:045e538dd7faffc1c6c6e6816563c5c8e776a276
@@ -1283,6 +1573,10 @@ trackedFiles:
id: 798cb1ca1385
last_write_checksum: sha1:67e8bda117608aee0e09a702a1ef8a4b03c40b68
pristine_git_object: c19d0241784ff69bc68a11f405437400057d6f62
+ docs/models/jobsapiroutesbatchdeletebatchjobrequest.md:
+ id: 2e077aebc6a1
+ last_write_checksum: sha1:cdc193bac69847c56040e5e2e514578d3bd9a4e1
+ pristine_git_object: 02f1ec244b3751b07149cbd13c681e6f77ef897d
docs/models/jobsapiroutesbatchgetbatchjobrequest.md:
id: e83a7ec84f8a
last_write_checksum: sha1:d661875832b4b9d5f545262216c9fcb9a77c8cd0
@@ -1343,6 +1637,46 @@ trackedFiles:
id: c265a30fd4cf
last_write_checksum: sha1:410c62a884aae902cdfbfcab33779e62487de13b
pristine_git_object: f40350bf9d74d09ca3a2ec6d91d9068bda631ef5
+ docs/models/jsonpatchadd.md:
+ id: 07cac5b1f9b2
+ last_write_checksum: sha1:74f0bea77f7e1dfebdc686e709a6be90bbda7228
+ pristine_git_object: 2a091c8ac8bfb57bfada1ce173c75543c9431fd0
+ docs/models/jsonpatchappend.md:
+ id: 5d68c9614b70
+ last_write_checksum: sha1:c1d5ac89195b6712b8a4b61450450a4f9b0e910e
+ pristine_git_object: 684cc896b4ba80ba1bbc278e119fc9fc1dbbf924
+ docs/models/jsonpatchpayloadrequest.md:
+ id: 581f55578656
+ last_write_checksum: sha1:a8a92eb17cdf8227cc266792e62200fee47b5665
+ pristine_git_object: fb4da675da1092185beb217b8edf72e31e6b1ffd
+ docs/models/jsonpatchpayloadrequestvalue.md:
+ id: 94fb761c9ad9
+ last_write_checksum: sha1:a420f0be2cdcad371a4d9449e58b981910832616
+ pristine_git_object: d58a1323190368d62d563b2cf679cae86aff813e
+ docs/models/jsonpatchpayloadresponse.md:
+ id: fb6daf532ebb
+ last_write_checksum: sha1:bb2d72a81a43a44a183fea86372e037b072ae085
+ pristine_git_object: ce9dbd90491812583b873030d1cc50139769b3fa
+ docs/models/jsonpatchpayloadresponsevalue.md:
+ id: 8c09c68e6201
+ last_write_checksum: sha1:16506ea5571f2f58b488210f08b6b22431d6b248
+ pristine_git_object: 21c7d186ad3eaa1b233d0a640fa8e2c27cc08e83
+ docs/models/jsonpatchremove.md:
+ id: 8a64b68498b3
+ last_write_checksum: sha1:9619b20ac7d2b69e034fd7e549d2469e2369985a
+ pristine_git_object: 735305cf06722f055065e64b23be040f40bfa96f
+ docs/models/jsonpatchreplace.md:
+ id: c7115b07e32f
+ last_write_checksum: sha1:698ad1a5ce80a06bdcd5e7c6dc582f9f05d94e31
+ pristine_git_object: 382b7d93925d0d68af43c4a745144a71d1f61dcf
+ docs/models/jsonpayloadrequest.md:
+ id: 92f7a9ed39ae
+ last_write_checksum: sha1:e1f66388d6c407091bb9af5846e8f3c8602aa368
+ pristine_git_object: 63ec21420a788d7b108fc9dc75ae3c9346d1971d
+ docs/models/jsonpayloadresponse.md:
+ id: ce61786a7c2e
+ last_write_checksum: sha1:369e120be622bd6deb9d2d5161482e018cb8ef42
+ pristine_git_object: 01eaae6160941599a44254091a0b237a90bc77f5
docs/models/jsonschema.md:
id: a6b15ed6fac8
last_write_checksum: sha1:523465666ad3c292252b3fe60f345c7ffb29053f
@@ -1451,6 +1785,10 @@ trackedFiles:
id: f47ad71ec7ca
last_write_checksum: sha1:3b2bf1e4f6069d0c954e1ebf95b575a32c4adeac
pristine_git_object: 6e1e04c39c15a85d96710f8d3a8ed11a22412816
+ docs/models/librarieslistv1request.md:
+ id: eb6047c126d3
+ last_write_checksum: sha1:9079cd35336bd61bcf0a46c1b350671a5e7df9df
+ pristine_git_object: b9f99a17907fad6427542181936fb962ca873ed3
docs/models/librariessharecreatev1request.md:
id: 99e7bb8f7fed
last_write_checksum: sha1:f37578c7882eab83cca3cb2aaf1ac17b9a21934c
@@ -1499,6 +1837,10 @@ trackedFiles:
id: a776dbfbc267
last_write_checksum: sha1:abba75226b6bc439381777ea05d83bc8e910e53c
pristine_git_object: af046696ebac829f4d7f6333075a4e5b26dd855e
+ docs/models/listdeploymentsv1workflowsdeploymentsgetrequest.md:
+ id: 7c9f534b899b
+ last_write_checksum: sha1:5a57963e7633b8ae37c8967dd9613c25995e9136
+ pristine_git_object: c93c0dbbd799f3817fc94c257720e231378d6c37
docs/models/listdocumentsresponse.md:
id: f2091cee0405
last_write_checksum: sha1:335d0ccd3a448e65739d5a0cfa2c67614daec031
@@ -1521,20 +1863,44 @@ trackedFiles:
pristine_git_object: 66883d64d0e040f5eef0976a88c1a83b7a3137ed
docs/models/listlibrariesresponse.md:
id: 87e3bec10745
- last_write_checksum: sha1:00522e685ec71a54f5f272d66b82e650848eaf36
- pristine_git_object: e21b9ced628f6fd5ae891d4a46666ebc94546859
+ last_write_checksum: sha1:7c0a155e2cd02e420745c3739df42b824305e363
+ pristine_git_object: 18cb63c16ab28d9399b6035c36f24a761008f734
docs/models/listmodelsv1modelsgetrequest.md:
id: ade37f6d014a
last_write_checksum: sha1:10d4e1242cdac6cdc7597881e0d25ce06760971f
pristine_git_object: 537269f7e774b31c45ac75c82c096530c0bd2b4e
+ docs/models/listrunsv1workflowsrunsgetrequest.md:
+ id: 132927390b33
+ last_write_checksum: sha1:5ec52640c83f0d560dc8efb39afc50317fa2e31c
+ pristine_git_object: 259876c68b0c9b5684b3c565b3df4bb3430a76b6
+ docs/models/listrunsv1workflowsrunsgetresponse.md:
+ id: da55980ef20d
+ last_write_checksum: sha1:e176049e48a9f78b47be166b6dc8f47bca3e7da7
+ pristine_git_object: 405119b0a2320a2fd6a6a8b43bede0b5e8435da8
+ docs/models/listrunsv1workflowsrunsgetstatus.md:
+ id: 91fb9d0c3512
+ last_write_checksum: sha1:5b6ee877902594eda2f001322c275100981d778d
+ pristine_git_object: e6f0d606f374000f752021bee5216f622e0df405
docs/models/listsharingresponse.md:
id: 165871ba2e7d
last_write_checksum: sha1:2c6c18123e297829dde6e877f3df984ce20aeef3
pristine_git_object: 4c29d4d4fc8087424104ff7d5312177ec4940094
docs/models/listvoicesv1audiovoicesgetrequest.md:
id: 2728f62b4ff4
- last_write_checksum: sha1:3173ded8a9dfea913295ab4cc943e30a8a30052b
- pristine_git_object: f767d3099a2fa8b487f9d92be421623c536f19b7
+ last_write_checksum: sha1:ef9581d0fa6fbabde734174727c9600fd46c9938
+ pristine_git_object: 67c04d7c738ff871aaaeb8252721265f1754b5d1
+ docs/models/listvoicesv1audiovoicesgettype.md:
+ id: 953d5379682a
+ last_write_checksum: sha1:63cdc365642971afbc8e737cde026775860bc3d7
+ pristine_git_object: a44f95b351b2da5ff6a2ab56a72b24478710771e
+ docs/models/listworkfloweventresponse.md:
+ id: 891251fced72
+ last_write_checksum: sha1:a8eceef60570b6fe8d254ce05806a8966e7631af
+ pristine_git_object: 72c51a165a6d7430910db5dc115fad19518bd371
+ docs/models/listworkfloweventresponseevent.md:
+ id: b7e837f50a09
+ last_write_checksum: sha1:f7a335daf57910fbc5bf435d43023271772bfc8f
+ pristine_git_object: 2b8cb1e461c146faeb06a940781803a008ea2173
docs/models/loc.md:
id: b071d5a509cc
last_write_checksum: sha1:09a04749333ab50ae806c3ac6adcaa90d54df0f1
@@ -1671,6 +2037,10 @@ trackedFiles:
id: f5be2d861921
last_write_checksum: sha1:624f30759b7b7de1913b1ef3b8bb2187a95b9570
pristine_git_object: 9d7a00c44e1507edd12f16fbb6b3864f1c0bbd81
+ docs/models/networkencodedinput.md:
+ id: 44842604e405
+ last_write_checksum: sha1:50e09dfa5ed838df4a760358c6bf81c04da4910b
+ pristine_git_object: 71e38615fada1e4f3b10e669eb3d8a6b768aa268
docs/models/oauth2tokenauth.md:
id: f23959dcc4b0
last_write_checksum: sha1:ca434f8479c3f5e868db1269e86efa5e0d9070e2
@@ -1807,6 +2177,26 @@ trackedFiles:
id: 1391892fce0a
last_write_checksum: sha1:4a82fc483f090e4fc09d19a3f2c0f2a2e022cf3f
pristine_git_object: 36b58e9aa80a53dad530cad93fb1d565fdd03b59
+ docs/models/querydefinition.md:
+ id: 4831b7e558f9
+ last_write_checksum: sha1:f4983c0963906b3eda13c69e2852b08d662dd744
+ pristine_git_object: 9d094fdad5d9b33792fc366fe156a91531d771fa
+ docs/models/queryinvocationbody.md:
+ id: b8ceb9c9a70c
+ last_write_checksum: sha1:01de5f28c0358b7051b80ca6467bb00a30bd8941
+ pristine_git_object: 3d4e5441618a02a07ece83fe69e27d68c5cd8ce4
+ docs/models/queryinvocationbodyinput.md:
+ id: e56d939bc2dd
+ last_write_checksum: sha1:e3b7f0a0865843b789f4ddbc849faa9851fbbb8c
+ pristine_git_object: deb0db275d3da722d0cf340f4ce5e3f914b590c4
+ docs/models/queryworkflowexecutionv1workflowsexecutionsexecutionidqueriespostrequest.md:
+ id: 9ab2fbbf8cc3
+ last_write_checksum: sha1:49e01dcdfec3ec5b60fd7c80f2eaa08ebbc482d1
+ pristine_git_object: 0e0d378d22959d5d3ca1e65aae8d9c6c0a780b95
+ docs/models/queryworkflowresponse.md:
+ id: 6f29ca165760
+ last_write_checksum: sha1:1fb91b06fd875a868c0e8d3bb4b86ae31cc17fec
+ pristine_git_object: cc5b08669152c4f3581a6d80a856c0925c40768b
docs/models/realtimetranscriptionerror.md:
id: 4bc5e819565b
last_write_checksum: sha1:c93e4b19a0aa68723ea69973a9f22a581c7b2ff6
@@ -1867,6 +2257,14 @@ trackedFiles:
id: 8857ab6025c4
last_write_checksum: sha1:5634447d01c483713dad3d6d39df182007e6181b
pristine_git_object: b710cc1f394e04ecc93c9fc6daddddec366254ad
+ docs/models/resetinvocationbody.md:
+ id: 4063ca65ac51
+ last_write_checksum: sha1:94c7cd498cba13d4da784c4be76ddbb5b1b64cd9
+ pristine_git_object: 9f306e2d0ce2e3b9311ed55fdcd028a74319c79d
+ docs/models/resetworkflowv1workflowsexecutionsexecutionidresetpostrequest.md:
+ id: 834b0b115e63
+ last_write_checksum: sha1:21bf448e143353577c2bb1cc9333dba89c033f4b
+ pristine_git_object: 418c6c5a041db5bc33b731b112346c671cd136bd
docs/models/resource.md:
id: 94a32a903140
last_write_checksum: sha1:0c141e37497f5b07d2da77a41777617ed3653582
@@ -1883,14 +2281,10 @@ trackedFiles:
id: 583c991c7a30
last_write_checksum: sha1:0791cb4aa4045708ab64d42bf67bd6ab74bc7752
pristine_git_object: ff67925758959b87992b47a1a32c224eeeb599e3
- docs/models/responseconnectorlisttoolsv11.md:
- id: 6a5e4a2ba10b
- last_write_checksum: sha1:9f2f54bdfb137736b24bafbc0d32c9826ffabfee
- pristine_git_object: 9b5c98c6c4b3ba1292e18edccaafd9d78f934e76
- docs/models/responseconnectorlisttoolsv12.md:
- id: c422816d84f9
- last_write_checksum: sha1:7db23fc2c5ade5ea4b04c70ebc629596545ccdb0
- pristine_git_object: 0266f66da4668a6fe33ed2b9a88278c57056576b
+ docs/models/responseconnectorlisttoolsv1.md:
+ id: 11f3f310ef36
+ last_write_checksum: sha1:db8c315f851caf7f36b852bf7a34ab2a0daf63fe
+ pristine_git_object: 18a31c5628f714a7bf1757bc8f79cab9e689bc6a
docs/models/responsedoneevent.md:
id: 38c38c3c065b
last_write_checksum: sha1:4ac3a0fd91d5ebaccce7f4098ae416b56e08416f
@@ -1899,6 +2293,14 @@ trackedFiles:
id: 3e868aa9958d
last_write_checksum: sha1:4711077bf182e4f3406dd12357da49d37d172b4c
pristine_git_object: 4309bdadc323918900cc4ca4fddb18788361d648
+ docs/models/responseexecuteworkflowregistrationv1workflowsregistrationsworkflowregistrationidexecutepost.md:
+ id: 7df3dce048fa
+ last_write_checksum: sha1:f98acd879d2690c2ce02646026242f21b19a0cb5
+ pristine_git_object: 381987b63d5749243a1aad073f0c6fa03062e3b5
+ docs/models/responseexecuteworkflowv1workflowsworkflowidentifierexecutepost.md:
+ id: 052963e8160c
+ last_write_checksum: sha1:bf0b0faed2f7d54fba78ecb89cef07c339b99233
+ pristine_git_object: 368f23184def4b3d4fe1f7c8a06693afb3eac4dd
docs/models/responseformat.md:
id: 50a1e4140614
last_write_checksum: sha1:e877b2e81470ef5eec5675dfb91a47e74d5d3add
@@ -1935,6 +2337,46 @@ trackedFiles:
id: 0e09775cd9d3
last_write_checksum: sha1:b77964a7b39ec1b74f70925a39c30b23fad6ac43
pristine_git_object: 31f26c3c2869a453306fff5b062b40e95aa5f19a
+ docs/models/scalarmetric.md:
+ id: e23b9b990914
+ last_write_checksum: sha1:272bcac17d958e6d045be0e78ad80aa710d35bb7
+ pristine_git_object: 96b932315f7844bc432ce6bacc48d0349203262b
+ docs/models/scalarmetricvalue.md:
+ id: b73292165d87
+ last_write_checksum: sha1:6585beed45b4f6ce8188348145a148fb3380cf74
+ pristine_git_object: 6723310e9e340eebf58f11dfdc2417d030a87a60
+ docs/models/schedulecalendar.md:
+ id: 60c22b27e191
+ last_write_checksum: sha1:6c5457c6c0f2393e41c297e42d25ca95a14f752a
+ pristine_git_object: 4166bef5ded04c323f5644b80305ed5bb4159fb4
+ docs/models/scheduledefinition.md:
+ id: 5a795fa1c041
+ last_write_checksum: sha1:542e8de454c8a75e18de51f08cff031fa2928cc2
+ pristine_git_object: 4f8f8b80b6687092a5fde30366cba91f7cae5f3f
+ docs/models/scheduledefinitionoutput.md:
+ id: e5f8d9d43716
+ last_write_checksum: sha1:d65378cd7873dd8aa5e60d5fc84b1ecefc5ef4e9
+ pristine_git_object: c4937c6969d586f9909bab342a05afa3cf61696f
+ docs/models/scheduleinterval.md:
+ id: 4792384fc173
+ last_write_checksum: sha1:3c73f2b3bcf44494d0ecd943cf15a592fed95493
+ pristine_git_object: e99d552f4d302f1385afbb76a77b7ac5897be62c
+ docs/models/scheduleoverlappolicy.md:
+ id: ced0baf85d76
+ last_write_checksum: sha1:24c5a5f8bccc5204084bf9bd7f6d419c7fbea309
+ pristine_git_object: 1df8c0ea277353267bdf3b024c7482e9ebc3a0ab
+ docs/models/schedulepolicy.md:
+ id: cd43e2c8ebd4
+ last_write_checksum: sha1:148ef1b0166528b0ddc7c502495b9bac713e482b
+ pristine_git_object: 5f0790490888458b468c4dca9cdbfde2c860f405
+ docs/models/schedulerange.md:
+ id: 70268cb45dae
+ last_write_checksum: sha1:cc9778a0389b2255c8ce8433802fa327a1716980
+ pristine_git_object: d6cb09759e5a983434ffd094b77fd3d36ab450c3
+ docs/models/scope.md:
+ id: c7d8338540ba
+ last_write_checksum: sha1:08e688325601e4c4614cc6b7264ea4d2f631da36
+ pristine_git_object: 0dc29aa5d453a82d0366ea003ab6e5710e2b1e3a
docs/models/searchchatcompletioneventidsrequest.md:
id: ace22de1b563
last_write_checksum: sha1:0368a82e5b6399806252670aec01714e5bb78331
@@ -1971,6 +2413,30 @@ trackedFiles:
id: cd53ce3913a5
last_write_checksum: sha1:380d0621a0a8ec9cd4be2b53a6e326b8c9c3d201
pristine_git_object: 21b8ec1f74c1f903aa087cd3b4d13918c0ea9dad
+ docs/models/signaldefinition.md:
+ id: 4627d2840cb7
+ last_write_checksum: sha1:3606a41b67a27350c22aa2bf234d8a8696b23404
+ pristine_git_object: c1974f70ef9676beb9fefae2c124e231da5b1b6b
+ docs/models/signalinvocationbody.md:
+ id: 9723986308b4
+ last_write_checksum: sha1:4c005bb178a814e1f23bcb5112a0ba61df96ad92
+ pristine_git_object: 14dc23e0e9a8d74b7275daf8a492d6a0a83d5e8a
+ docs/models/signalinvocationbodyinput.md:
+ id: db0ac2b1de58
+ last_write_checksum: sha1:edda01f077f43a2527900caab977d4d07f2e8eff
+ pristine_git_object: e9308bcb4b74122eca76aeca31883c160eb15ebd
+ docs/models/signalinvocationbodynetworkencodedinput.md:
+ id: 0a2d313daef1
+ last_write_checksum: sha1:4a2e7fed866136d621b842d58d0cd6964a869bdb
+ pristine_git_object: 09368656b06d42477862d0f4f8dcf5909d52d1c3
+ docs/models/signalworkflowexecutionv1workflowsexecutionsexecutionidsignalspostrequest.md:
+ id: f43e35cd41a6
+ last_write_checksum: sha1:701f949b648221e98bb302c4afff668f4d86fc4d
+ pristine_git_object: 2ca356d85370faff6c573a678a4380c9caf56ad6
+ docs/models/signalworkflowresponse.md:
+ id: 38c985ca6ede
+ last_write_checksum: sha1:f82ac07d517e89bfa8044eebff6f9f7aa56f6c77
+ pristine_git_object: f65d7924388b842863deddb0e52d4376f2e28925
docs/models/source.md:
id: 6541ef7b41e7
last_write_checksum: sha1:00d43797d0155945ec39d4aff068a3ac7bad5ad5
@@ -2015,6 +2481,26 @@ trackedFiles:
id: 6a902241137c
last_write_checksum: sha1:6291d6bc7cbcd7640b83b03ac8ce353b95c1f913
pristine_git_object: 372eafee6f22105d6f5fd31f2e04ea04f5abe641
+ docs/models/streameventssepayload.md:
+ id: c3cdfddd480b
+ last_write_checksum: sha1:6251dc54c4becf826ac475ffd8cc48d18a509aad
+ pristine_git_object: 6ec6726a4464f4535a51398cd042f90e4390f789
+ docs/models/streameventssepayloaddata.md:
+ id: 04bdf412c709
+ last_write_checksum: sha1:6feb3abc7dcb4766216d12093d597c180ab396f1
+ pristine_git_object: 61e6556277ba7ffe5e81b43fbc0c3eb0d21013df
+ docs/models/streameventworkflowcontext.md:
+ id: 32e6c8f9826e
+ last_write_checksum: sha1:d89a4e547d92889b3c88269d8f3c9a870a5058c3
+ pristine_git_object: 098f48fd4cb8dad49298c723081528dcd715c26a
+ docs/models/streamv1workflowsexecutionsexecutionidstreamgetrequest.md:
+ id: 5ca96459c844
+ last_write_checksum: sha1:200e5cf38c1c655c72d856591ebca1c3e85a9b7f
+ pristine_git_object: 19d02a40d06b7066d32f63e0daf6a40434d88754
+ docs/models/streamv1workflowsexecutionsexecutionidstreamgetresponsebody.md:
+ id: 09d5a590ebf1
+ last_write_checksum: sha1:1f22c1e502478d9532b6729bcde30aa8bb5528c4
+ pristine_git_object: e12bb93835a164b665e4cdf4b9862f350d91c850
docs/models/supportedoperator.md:
id: 000f0770e0f9
last_write_checksum: sha1:ea71b0062712f8750234d8bd5313063589de1155
@@ -2039,6 +2525,62 @@ trackedFiles:
id: 58be1f1aef25
last_write_checksum: sha1:2005c0e15f36bd315c17688daa4dd9a8c260b009
pristine_git_object: 2d16da9de0af99ca5cecd7aab3349e13d22c7013
+ docs/models/tempogettraceresponse.md:
+ id: b5700fe45c4d
+ last_write_checksum: sha1:334312483f0365ed3ec20f6fc26b7fdf54fde811
+ pristine_git_object: cc4a4e93da510f5852e5ce2c7254f6f18efe0ca7
+ docs/models/tempotraceattribute.md:
+ id: 6ce2d6cbea6b
+ last_write_checksum: sha1:d296899e9a5fb160925a36382dee30dbe959a106
+ pristine_git_object: 6061df8649c32a4c4daea648223525a9f870bb2a
+ docs/models/tempotraceattributeboolvalue.md:
+ id: 0e2767bf14da
+ last_write_checksum: sha1:6e09bcab345a7cbe376c41fd20955017f54243bc
+ pristine_git_object: 07b77e43cd17be26c620a30a8ec627310d67886f
+ docs/models/tempotraceattributeintvalue.md:
+ id: 55a1d75f3e12
+ last_write_checksum: sha1:2381b0f0f84aeb42d9a0a97fe3764ebaa9dee0d9
+ pristine_git_object: 5c47f1eb1b12d0d1a29f676daf2b79225032c524
+ docs/models/tempotraceattributestringvalue.md:
+ id: ccd801521b60
+ last_write_checksum: sha1:3c0c1e94c6adb62043be5498dc6632079e3ec701
+ pristine_git_object: 61e7eb8ac7f91e718e85fd46b7298095300821f5
+ docs/models/tempotraceattributevalue.md:
+ id: b5a74866dcac
+ last_write_checksum: sha1:30a7d69df2775d18e110e20dcd6956359a065b54
+ pristine_git_object: eb448c3b039475cc18de097e2d90f5700aa136eb
+ docs/models/tempotracebatch.md:
+ id: 1a73bb7f3ee0
+ last_write_checksum: sha1:778e8837929ee817567c02653ae9daca8d564ea7
+ pristine_git_object: 1e7779a2d9c56a5e25d46e26031f69e5ab8410eb
+ docs/models/tempotraceevent.md:
+ id: 07cb7db3504c
+ last_write_checksum: sha1:04a17ed3d22f34bc1a10c135f10ae18785abe42d
+ pristine_git_object: e456fa7c93b66686c0d3ac802da83e2b0b8ffa69
+ docs/models/tempotraceresource.md:
+ id: 9e75b787c050
+ last_write_checksum: sha1:dc195ca7c4331183e5688fc53161208a54495ffe
+ pristine_git_object: 0d0e2ded0e99297f1982e70bc5bb9d14e957d25d
+ docs/models/tempotracescope.md:
+ id: 07e19420387a
+ last_write_checksum: sha1:14ccec8d631703fbee79ac1a1f5e1ac71b5a461f
+ pristine_git_object: f3678a9fb8867a9e42b4ad02ef98dcbb7622fe63
+ docs/models/tempotracescopekind.md:
+ id: 04915d7e8cc2
+ last_write_checksum: sha1:e5ab7e2396f71337a812c298097080a4e6b4b175
+ pristine_git_object: e08cc9be04286dc81cd000422e0b914ad7acd8bb
+ docs/models/tempotracescopespan.md:
+ id: 3fa2689ebf7c
+ last_write_checksum: sha1:474874dac2f2513fb2c8c834aa21cb59db543bb1
+ pristine_git_object: e3b6d58a39dde1cdcab1deb5f53bfad5d14bb64a
+ docs/models/tempotracespan.md:
+ id: b7d1c4f02519
+ last_write_checksum: sha1:e7dc0b212731b706d7e3b475bcd0d41826ff8ae4
+ pristine_git_object: f657acc6870a7a9f024c0763716e4ecea1111adf
+ docs/models/terminateworkflowexecutionv1workflowsexecutionsexecutionidterminatepostrequest.md:
+ id: 197e0d3cf762
+ last_write_checksum: sha1:1ff1f9ac91867332457179b31786f013d6923b5c
+ pristine_git_object: 227066fb06da0b4cd22b923ca466e10087fa5467
docs/models/textchunk.md:
id: 6cd12e0ef110
last_write_checksum: sha1:d9fe94c670c5e0578212752c11a0c405a9da8518
@@ -2059,6 +2601,18 @@ trackedFiles:
id: 07234f8dd364
last_write_checksum: sha1:90c0b34284137712678b0671e9f4bfb319548cbf
pristine_git_object: d9e51d7dc93b24edd807b018393eab38143d46f4
+ docs/models/timeseriesmetric.md:
+ id: 64217edf3511
+ last_write_checksum: sha1:ca9fad09ddf36bb078950120f53d9011a98e0654
+ pristine_git_object: 2b5fc61d3717b4383d4e7b4660ae6a7229774336
+ docs/models/timeseriesmetricvalue1.md:
+ id: 6cb257de9197
+ last_write_checksum: sha1:86c5cf46002459d8f86d1fd197f181bba7b12246
+ pristine_git_object: f1783455276c32478d9c22523a3e2f1d1a58b948
+ docs/models/timeseriesmetricvalue2.md:
+ id: 40045842e667
+ last_write_checksum: sha1:0ca94992e7bbfee401a1bafd03b5fd09940f4633
+ pristine_git_object: c6ba5ecaaf5a26b60415b4292757366e9fa95c20
docs/models/timestampgranularity.md:
id: eb4d5a8e6f08
last_write_checksum: sha1:b423f56d3a6004ed89c356806002b2af3e5c5a56
@@ -2207,14 +2761,18 @@ trackedFiles:
id: f99b41288e88
last_write_checksum: sha1:092cd9ae293706419a86fd69c490cbc6587965d3
pristine_git_object: 2f61a2c4d8be1634ccbb1ef0adacde0c6722a9bd
- docs/models/typeenum.md:
- id: d306d1d601a4
- last_write_checksum: sha1:f0f0bfac75bf5e5fcd972d0cb196961d512d40ad
- pristine_git_object: ec47cf9668f839469c9ab71c010f3e1975be929c
docs/models/unarchivemodelresponse.md:
id: a690f43df567
last_write_checksum: sha1:5c9d4b78c92d30bb4835cb724d1ea22a19bf5327
pristine_git_object: 375962a7110f814288ea9f72323383bd8194e843
+ docs/models/unarchiveworkflowv1workflowsworkflowidentifierunarchiveputrequest.md:
+ id: 68ac0e7701d3
+ last_write_checksum: sha1:3ff86e1e4647f0787841ba5f267a16b730c60351
+ pristine_git_object: e25cd4bcc990fc9b6d583b8d771ce9b0c297d391
+ docs/models/unscheduleworkflowv1workflowsschedulesscheduleiddeleterequest.md:
+ id: 40615902c1fe
+ last_write_checksum: sha1:d42a893a1c281ddfc8db957e168d832311917fe8
+ pristine_git_object: c2bc33738e8e34d66da61ae08a496384b6c623f0
docs/models/updateagentrequest.md:
id: 371bfedd9f89
last_write_checksum: sha1:ffa273db325e85b14ba2554b223a2a2de7ad9b8f
@@ -2251,10 +2809,22 @@ trackedFiles:
id: 4be4c812536f
last_write_checksum: sha1:600212552869daf41c872ff6ec7a4cbff1e00b63
pristine_git_object: 78eae8fb8193b201064416858d719fc28bb964b1
+ docs/models/updatedefinition.md:
+ id: 66f521a5a9f5
+ last_write_checksum: sha1:a5121824ba55698bcb31641641e73e2eeb19136c
+ pristine_git_object: 8cf62617303949d2633773af0268447befb04af2
docs/models/updatedocumentrequest.md:
id: ee4e094a6aa7
last_write_checksum: sha1:4c4d774c67449402eb7e1476b9d0fef5b63f2b99
pristine_git_object: 7e0b41b7be9f559b27a3430f46ed53d0453f6e03
+ docs/models/updateinvocationbody.md:
+ id: d8b3b9cca5ae
+ last_write_checksum: sha1:0fed6d77626ec76d3584e8d38b97ee228218a390
+ pristine_git_object: e810a041e5db77f4d9396075b420600c1d86054d
+ docs/models/updateinvocationbodyinput.md:
+ id: 6c6c00bca06a
+ last_write_checksum: sha1:604242fdb6f47c049b1e5afbe4ddd374a516cb98
+ pristine_git_object: 70665f9ea1871781bc6192b41b1e5b1bff91bf2c
docs/models/updatejudgerequest.md:
id: c220d8a2a289
last_write_checksum: sha1:7c85b0f8b6ca133ca9e4b435322e50f97afd8fbd
@@ -2279,6 +2849,18 @@ trackedFiles:
id: 0b7dfc68573f
last_write_checksum: sha1:53d5d07d53d184d68b57185d251c8d68ff3cf4d4
pristine_git_object: a8ca8e298bab527fe82e3c0dbf076ace0c70da20
+ docs/models/updateworkflowexecutionv1workflowsexecutionsexecutionidupdatespostrequest.md:
+ id: df780faca122
+ last_write_checksum: sha1:d0ebf3489a9b91d85983af31625040583ab0fed4
+ pristine_git_object: 5705af6b2d86aa378f3c54a10e863c731805fa4d
+ docs/models/updateworkflowresponse.md:
+ id: fcf739a23f8a
+ last_write_checksum: sha1:76e12ce365fad9b2d68abbd60b6c54bd30acd01e
+ pristine_git_object: a4e9494e8ec0d6b4cb187c7d1479b3407bd004ba
+ docs/models/updateworkflowv1workflowsworkflowidentifierputrequest.md:
+ id: fc6e47dd1496
+ last_write_checksum: sha1:9184d5f03fa53c41eda416b24ab1e75eef94cc8f
+ pristine_git_object: 51877da9157009f81857f115a84101cc5142c2fa
docs/models/usageinfo.md:
id: ec6fe65028a9
last_write_checksum: sha1:cf71fb9676d870eba7c4d10a69636e1db4054adc
@@ -2339,6 +2921,274 @@ trackedFiles:
id: fc4df52fb9b5
last_write_checksum: sha1:72636dc7ae74264bb5158d284ef6f83da5290b27
pristine_git_object: 4ca7333c412ad819e3e02c61debe402e3f9b0af9
+ docs/models/workflow.md:
+ id: a782201b7327
+ last_write_checksum: sha1:0a6dbea734dc94626fb53ab37c619fef48b9db63
+ pristine_git_object: 4b245e07d66418614e4eadf09bf6203254f0d91e
+ docs/models/workflowarchiveresponse.md:
+ id: e952228d4f02
+ last_write_checksum: sha1:49dbb8dd7c8b3a2442de072bd45244540c79954f
+ pristine_git_object: 6192c2b2c82b3a29e8cefab33067b6e7c23a5d55
+ docs/models/workflowbasicdefinition.md:
+ id: a88b03086ce2
+ last_write_checksum: sha1:7046dd6a11770b56c36e22af07552e28c6cf36f0
+ pristine_git_object: a534ad34da32c4d8eb93f055e1bbf128c5df7e8a
+ docs/models/workflowcodedefinition.md:
+ id: 16ce7d540f36
+ last_write_checksum: sha1:35fce15dd70e7c8eb85b9f54919cca1b0a0e1dcb
+ pristine_git_object: 2034f81fff68465c568b5d189086094c5635bff8
+ docs/models/workfloweventbatchrequest.md:
+ id: 7caeaf762e39
+ last_write_checksum: sha1:28a13d5121fa878814b784ad24dd14c6f2bcaf03
+ pristine_git_object: cf0af15fd4a1f6a977a81b887a06dde3ab2b5d98
+ docs/models/workfloweventbatchrequestevent.md:
+ id: 90eae031de76
+ last_write_checksum: sha1:99a7e986f51ddf240922f5144447ffc0afd021c4
+ pristine_git_object: 98ac66e85eba7b8d979f0fe610f0f4fdd0ac0984
+ docs/models/workfloweventbatchresponse.md:
+ id: 2f360869f319
+ last_write_checksum: sha1:5a8e27b9b398bd298317184f9ad9e5961118b3a0
+ pristine_git_object: 84ab9e3945052b4f675d8468eabcaf118f6b9355
+ docs/models/workfloweventbatchresponsestatus.md:
+ id: 719bafb1fd93
+ last_write_checksum: sha1:84b7cfe727c48142b5ddb921d272171da62e4021
+ pristine_git_object: 47dd74bc0cad72e159d35c8ac74fc58fde59fd95
+ docs/models/workfloweventrequest.md:
+ id: e036e83eefeb
+ last_write_checksum: sha1:384d69360ccf715f947bf8ecea71e0e423a42a2a
+ pristine_git_object: bec821725e8b95714bb668669bed6755ba523162
+ docs/models/workfloweventrequestevent.md:
+ id: ed43510bd1a1
+ last_write_checksum: sha1:f032d827b0ec57bcd023f084b3806f0399d4ebcd
+ pristine_git_object: 2156cd37b95c12d5cce0089bc32d2083b99f8fa8
+ docs/models/workfloweventresponse.md:
+ id: d4bd90a2e937
+ last_write_checksum: sha1:8032460df2a92279008be067f21e15714080a3b2
+ pristine_git_object: e336dfb2a593b739240dfb665aadbcc68c423936
+ docs/models/workfloweventresponsestatus.md:
+ id: 7ffd009a0514
+ last_write_checksum: sha1:4dcecd8af02f1b7142f63ab1e91b2e8792077656
+ pristine_git_object: 684f3fc728a44bb0515dad483d9ea9df32dc78de
+ docs/models/workfloweventtype.md:
+ id: 0ea616e53d9b
+ last_write_checksum: sha1:97e9654b44b627a610b3a30f14b3dc56dab1d725
+ pristine_git_object: ebfe2d86bb5c460219eb9fe9902002817c0f914a
+ docs/models/workflowexecutioncanceledattributes.md:
+ id: 2eb975976d1a
+ last_write_checksum: sha1:63f7d6bbbbca074ebc2988988477d95d2f147c11
+ pristine_git_object: 8c2929eb3de33ac39ebf3b243af978957c6a43f5
+ docs/models/workflowexecutioncanceledrequest.md:
+ id: bcb7b2a53948
+ last_write_checksum: sha1:8ed5c7d5147417a842fc1cc7df18167298d81d29
+ pristine_git_object: e32f3c9d8cdb16de1df9e3aa36dda6b98f6d0da3
+ docs/models/workflowexecutioncanceledresponse.md:
+ id: c56b39b95831
+ last_write_checksum: sha1:3fc990c9fd4b7b3b0368b80840d737cf355a8759
+ pristine_git_object: f9f4c01d263fb222de033128cd43f86c56f2b8cd
+ docs/models/workflowexecutioncompletedattributesrequest.md:
+ id: 073b3acc2ebf
+ last_write_checksum: sha1:47ae4322597198da507a5515c559bd3c32e5e9ef
+ pristine_git_object: 5ab8341c23827306b788e1a351a82594b4736b30
+ docs/models/workflowexecutioncompletedattributesresponse.md:
+ id: ee786dd87f88
+ last_write_checksum: sha1:56f6a37a7e59b6026988373b0d88b09bd6062ee5
+ pristine_git_object: 30a33052d50278e5cb14e08e4631caf68cc8e058
+ docs/models/workflowexecutioncompletedrequest.md:
+ id: 62b0e626688c
+ last_write_checksum: sha1:8a08360f267188b72bbef3ca5f6fb8223c830241
+ pristine_git_object: 02bd2a4452faeb26f7c9b68d90cc33f62ab9864d
+ docs/models/workflowexecutioncompletedresponse.md:
+ id: c1888ba90ec6
+ last_write_checksum: sha1:491656749133e6b0d7714d2a9cbc8de8aa9c92d7
+ pristine_git_object: a4d40dc52ead7c8a525cfd838fc0aa336570fb3b
+ docs/models/workflowexecutioncontinuedasnewattributesrequest.md:
+ id: eb5c1bb4f634
+ last_write_checksum: sha1:561a29856a9f90badf31b2c634e94a6335171d06
+ pristine_git_object: c85a7b3049137295f02b1e9e0c5769f2c98eddb1
+ docs/models/workflowexecutioncontinuedasnewattributesresponse.md:
+ id: 30eef6da0d67
+ last_write_checksum: sha1:10f85d5ec5f9fbb442d3d69319b651604baf9a0b
+ pristine_git_object: b49a25964981bb48a5563b230413cb2ff6c5bd70
+ docs/models/workflowexecutioncontinuedasnewrequest.md:
+ id: a18d2f94b134
+ last_write_checksum: sha1:75368c1e435af748ced2a483799452b2903817d9
+ pristine_git_object: f83f7b516019c4420c5534db9aa7e8b8b8ebebd6
+ docs/models/workflowexecutioncontinuedasnewresponse.md:
+ id: 1ea81bc536b4
+ last_write_checksum: sha1:e65c01b128029322249b28e4152cd60af2443b18
+ pristine_git_object: e6a7212a856f0b89b18c16e7b9f6e93ddbb2e527
+ docs/models/workflowexecutionfailedattributes.md:
+ id: b5ddf561c50e
+ last_write_checksum: sha1:097719213eeb253420f6bc7167f96c58d00bb5b4
+ pristine_git_object: 9fa14fa42e0d11148df4ceb227e35d808b3b7909
+ docs/models/workflowexecutionfailedrequest.md:
+ id: 1f34d50f9f72
+ last_write_checksum: sha1:74d90184e36d11a269d668337c3b5e823dbe5d38
+ pristine_git_object: 160cfd1a8f28adebaa55ce6cc941dde5e467fc2a
+ docs/models/workflowexecutionfailedresponse.md:
+ id: 1c88150815b1
+ last_write_checksum: sha1:92a46ae195053013887496ae827cfd8853e864c5
+ pristine_git_object: 222fbfd75c79bf9868b04affab9d2708f5942f64
+ docs/models/workflowexecutionlistresponse.md:
+ id: a66878b279ba
+ last_write_checksum: sha1:fcdc0fa6d385ee938df2127c7026a93634bccacb
+ pristine_git_object: 3fe61f60d02bb479666a3e589c27c358eb2d4e03
+ docs/models/workflowexecutionprogresstraceevent.md:
+ id: 143ceb0718a6
+ last_write_checksum: sha1:9878b118897957dd4b92163966ea4199c29bc5ff
+ pristine_git_object: ac50a894e5290f07d8531e25663baae090cf533e
+ docs/models/workflowexecutionrequest.md:
+ id: 44a6d9ef046a
+ last_write_checksum: sha1:78e3aeb9a9b13ab01906e9c2d423ac4925332dc4
+ pristine_git_object: 553119b16dbc695eff102eae11e80c4426612e01
+ docs/models/workflowexecutionresponse.md:
+ id: 73595cfc443f
+ last_write_checksum: sha1:3861cb1972cca17c595d1eb23518bfa54c382e89
+ pristine_git_object: e414c47eb7a01e677cef6cc3ebda77fd28566450
+ docs/models/workflowexecutionstartedattributesrequest.md:
+ id: 40b21129ca0e
+ last_write_checksum: sha1:920ab1f7115f6e16d744e450f3210aa37514c468
+ pristine_git_object: a65df97ada042def878557af930d5bdc66143f62
+ docs/models/workflowexecutionstartedattributesresponse.md:
+ id: 384d48b3e104
+ last_write_checksum: sha1:be35194c22fa533235fe930bf9aa83acd5b8ca40
+ pristine_git_object: c28de680eb4e49295610414f052164833e28265a
+ docs/models/workflowexecutionstartedrequest.md:
+ id: 049cff94a306
+ last_write_checksum: sha1:ab51a3214f8083dc79d6e9bfb21216528d40d5ec
+ pristine_git_object: c2bce40e3b280a7a2aa09e812c9aa187a1dabcf3
+ docs/models/workflowexecutionstartedresponse.md:
+ id: 64174669ff20
+ last_write_checksum: sha1:8eee734d26442d9c6e6693386c78eb204a932847
+ pristine_git_object: 7c370d468250ef29319f365957f8eca504f40d19
+ docs/models/workflowexecutionstatus.md:
+ id: 1e988cdad165
+ last_write_checksum: sha1:9478f494d16651522d373f36d0537c0fca703038
+ pristine_git_object: 1be6d741ec3dbda33b167ee7e6fd6ae4295084d8
+ docs/models/workflowexecutionsyncresponse.md:
+ id: de066d2e3d26
+ last_write_checksum: sha1:f3bcd9c25419d4da5c307d2f201517c7b48b5126
+ pristine_git_object: 88cbc21caa9762cfa485366f7e98681f8e482986
+ docs/models/workflowexecutiontraceevent.md:
+ id: 0c99f0388dcf
+ last_write_checksum: sha1:387058f5e007dd8aef7bed906e394bb748149dbf
+ pristine_git_object: b6bb388ca18523ca9b682e5bc4d4cf1c95100fa5
+ docs/models/workflowexecutiontraceeventsresponse.md:
+ id: 4242880e71fc
+ last_write_checksum: sha1:52585c82f6588ee57520355320c156348f694695
+ pristine_git_object: a0e6e4867f9d3e98b1f34a4f7c9462bd90ae7ced
+ docs/models/workflowexecutiontraceeventsresponseevent.md:
+ id: 40012f3fc374
+ last_write_checksum: sha1:b7b2a4544e04bda359d226efc6ae4247678bda9a
+ pristine_git_object: 81c4f573a5af75f2ea8356f8bf23da676700ab8d
+ docs/models/workflowexecutiontraceotelresponse.md:
+ id: 358cf8c2556f
+ last_write_checksum: sha1:b0a9f0bf2236d86d1a6863c2bff41bf58058fa47
+ pristine_git_object: 6218877661371cfe62fb4d24fd3a56fb21c77591
+ docs/models/workflowexecutiontracesummaryattributesvalues.md:
+ id: 50e4fbfd4cf6
+ last_write_checksum: sha1:0486340bda63ec19ddc32dfc49520a4946847321
+ pristine_git_object: c01a122f81fd7bcd6c00a746bc5705c66261bdc6
+ docs/models/workflowexecutiontracesummaryresponse.md:
+ id: 5ac68ea4a1a4
+ last_write_checksum: sha1:3db91df0b8a7404d5fd0ca7e43554cc7e330ac8b
+ pristine_git_object: 3c1ab0e51584fae9398d6d9fb64dbc03149f2f5b
+ docs/models/workflowexecutiontracesummaryspan.md:
+ id: 3851de52d638
+ last_write_checksum: sha1:528371799a9c90c9ddaf5315c2d33b89320688b8
+ pristine_git_object: 8ee487a2091161e674db28a7919d0d3d4cb57e32
+ docs/models/workflowexecutionwithoutresultresponse.md:
+ id: 612e2645e3a3
+ last_write_checksum: sha1:42a987056a79fccc0bf262e46214f5c2f875736a
+ pristine_git_object: f74414a66874be98f9a7c3ddaf938a0d9c0a598c
+ docs/models/workflowgetresponse.md:
+ id: aa8fca163259
+ last_write_checksum: sha1:eba9585e94ebc141c43d39689c31997083dcb615
+ pristine_git_object: ef9baa44ce44b31d7cba47e1d1ce88ffef29c539
+ docs/models/workflowlistresponse.md:
+ id: 00dfc68e54c1
+ last_write_checksum: sha1:ca7ce66386b7cf011d9a906ecd6b70626c5417e4
+ pristine_git_object: d36e48c68f379ac55a9b8f91916e7f7fbd465c63
+ docs/models/workflowmetadata.md:
+ id: 3ae926974727
+ last_write_checksum: sha1:de9267bafa22597a6cbc7f52cf8b4c1744d4b141
+ pristine_git_object: c69cb08b384e5659f6884fa5f1d364ea848be02d
+ docs/models/workflowmetrics.md:
+ id: a8fe312f060c
+ last_write_checksum: sha1:211ed95c429aa5db0b074caaec7b5e1962a03be6
+ pristine_git_object: 4936382b48d568309c80ba1c4c25703ef3b69659
+ docs/models/workflowregistration.md:
+ id: 1d0720b1355b
+ last_write_checksum: sha1:2d4585a731a6c2b3ac693c21da4582c1a70d2984
+ pristine_git_object: aad6831aa880c56f7001a7dfbe2a41bf5e137068
+ docs/models/workflowregistrationgetresponse.md:
+ id: f2658cb29bf4
+ last_write_checksum: sha1:ef021920be1f1cf2016887fb1e3fb7f583c3596f
+ pristine_git_object: c524fc68d9034604c7ab756ee401223b93287f32
+ docs/models/workflowregistrationlistresponse.md:
+ id: 343e0dd93827
+ last_write_checksum: sha1:79c56fad05442fb760af09584fcf6e42b742c002
+ pristine_git_object: e37b36713c38a44b35d0aea4fb96cc8a44658156
+ docs/models/workflowregistrationwithworkerstatus.md:
+ id: a558e858a7a9
+ last_write_checksum: sha1:9eb21c738b1d1ef8421cea8145691943f2d90355
+ pristine_git_object: 625871c171c9a7c6d37e03daf00274155af85218
+ docs/models/workflowschedulelistresponse.md:
+ id: 4c425808aa60
+ last_write_checksum: sha1:d8e8aad7a91d7f8e398f7716109d436df6c4fc86
+ pristine_git_object: 7142185b8081d076cb3b8fcd592234d49e3e0e5a
+ docs/models/workflowschedulerequest.md:
+ id: 5a6b71dbc1a7
+ last_write_checksum: sha1:d053fd9a98d217986d09f941bb3429a9b9123e39
+ pristine_git_object: a6255c75646dc93a333c51ffdb92d80a4ad0ac8f
+ docs/models/workflowscheduleresponse.md:
+ id: ef0b813976ad
+ last_write_checksum: sha1:2da7d2a8519fe915554fdcebc25915ad11fbb203
+ pristine_git_object: 96d7ced717099cc1bbff4ac38d60e7552e9c6204
+ docs/models/workflowtaskfailedattributes.md:
+ id: 7f329c44d7fb
+ last_write_checksum: sha1:432c70ec990e81e2de79c14efbbb6161b60e1b3e
+ pristine_git_object: c6515f84fb66f59e6b35a4524dbd1f3edc73a7ad
+ docs/models/workflowtaskfailedrequest.md:
+ id: 44b5e84af2b2
+ last_write_checksum: sha1:91ea900d446b03f2233227458b6a9b41a4131412
+ pristine_git_object: a39cd97c861a47bc022b3cb4b2e9ec43bcb2a337
+ docs/models/workflowtaskfailedresponse.md:
+ id: 9e6bdbc8ec6d
+ last_write_checksum: sha1:3c83e4760483063bed3f48ec1babddd0b53e9a22
+ pristine_git_object: 731ed219c64f5024884887bbdbd67857b3658707
+ docs/models/workflowtasktimedoutattributes.md:
+ id: e9ab5a25e976
+ last_write_checksum: sha1:52fb7ed9cfce65338b381616afac1dfc1e676d16
+ pristine_git_object: 7f298ea251febc1a35510655d14a84c37172c899
+ docs/models/workflowtasktimedoutrequest.md:
+ id: 23e590babe17
+ last_write_checksum: sha1:70c24971218bc0edb067fc6b8b12765115d0689b
+ pristine_git_object: 6c87b8cef524b65107a6cc60d6f0415e025a3fc4
+ docs/models/workflowtasktimedoutresponse.md:
+ id: f53d1e6e4141
+ last_write_checksum: sha1:8c3355239dce116da33ebc288363b20ddbf3a335
+ pristine_git_object: ea3edade3558a0d95faa58c6d7c592928aae0fd9
+ docs/models/workflowtype.md:
+ id: fdb669e25dee
+ last_write_checksum: sha1:1cb6c274e8bd8942a7064a46a183f402935b0526
+ pristine_git_object: 93aa31f16f086c84fe097f0d0f306f13d5803d06
+ docs/models/workflowunarchiveresponse.md:
+ id: fd8377932c7c
+ last_write_checksum: sha1:50ffff371e778ae77c66128b1673f096157415e3
+ pristine_git_object: 667c93871f80c652ab1f61ce4fbd62252efc19a7
+ docs/models/workflowupdaterequest.md:
+ id: 4dfd044f6694
+ last_write_checksum: sha1:138ba57f1972c97dc1ac9f99b43e0f5c5b09626c
+ pristine_git_object: f8ce41edac51832df67aa8e18c13b43e1694fbab
+ docs/models/workflowupdateresponse.md:
+ id: 61dc42aa3dcc
+ last_write_checksum: sha1:0e9fc12bd0148cc59fd6222e22c3f923f305089d
+ pristine_git_object: 68faf7c0932990ae52eb0ff627544bd76bcb1545
+ docs/models/workflowwithworkerstatus.md:
+ id: 81265aadaf0d
+ last_write_checksum: sha1:f426a9a3a9ba4ded4ab8f330509b12c7a13e0408
+ pristine_git_object: ac6efb9c006c95ffcd50a72710c86140ace4d2c4
docs/sdks/accesses/README.md:
id: 2ea167c2eff2
last_write_checksum: sha1:663516c8c94ca324b938a5d5bd1196663cb1de88
@@ -2349,8 +3199,8 @@ trackedFiles:
pristine_git_object: 46fb60ef4f92d61b2959f1b67e5f07e5bae55ba7
docs/sdks/batchjobs/README.md:
id: a3b8043c6336
- last_write_checksum: sha1:b4b3123ff210545048e2b0c729f2b7e5f7460f4e
- pristine_git_object: 3633fe4ee136c1ac90f9446425f62a0d68fa4f90
+ last_write_checksum: sha1:ecf3800c83f9455471766e0f20a07192e76a736e
+ pristine_git_object: 3e082340c7c8740bd6acd6143dc35a940145ccb6
docs/sdks/betaagents/README.md:
id: 5df79b1612d8
last_write_checksum: sha1:b01e307ad49bb22962062b1c1d3dd58563dea71b
@@ -2373,8 +3223,8 @@ trackedFiles:
pristine_git_object: dc0f4984380b5b137266421e87a1505af5260e89
docs/sdks/connectors/README.md:
id: 7633a87d946d
- last_write_checksum: sha1:d236c2895bbc9f1797817578d3ad8530798af3bb
- pristine_git_object: 99892188389f4d53ddb45459f641475954a1ee03
+ last_write_checksum: sha1:fca5196bf515725bb1a75bbc1db99dfc414f62d6
+ pristine_git_object: f4a0a09713bf1f1e4d3e7fb7c4479415f79a9dc0
docs/sdks/conversations/README.md:
id: e22a9d2c5424
last_write_checksum: sha1:e0b78791f14b76d486688c5aa829877d58ae36da
@@ -2383,6 +3233,10 @@ trackedFiles:
id: deb5d90f4faf
last_write_checksum: sha1:cf41aa56b5fe1296961ddb769b96cb0f451ed2f2
pristine_git_object: c04ced0cf5e5f7774ba9b1d25722085b92b8f0c2
+ docs/sdks/deployments/README.md:
+ id: e7c5559ab768
+ last_write_checksum: sha1:b6f43ed5be5071a808499af105191d2d79e9f81d
+ pristine_git_object: 941b5538a57b3b693914d6b0d2acad2e8bb47611
docs/sdks/documents/README.md:
id: 9758e88a0a9d
last_write_checksum: sha1:a77fb7acf2be6e18a3017855b30f5ad58576698f
@@ -2391,6 +3245,14 @@ trackedFiles:
id: 15b5b04486c1
last_write_checksum: sha1:4a279bf9bcd84a9878ef979c78b8b75af3d52f02
pristine_git_object: cb207d8be2ca86b00dc797fc06eabd1498adb770
+ docs/sdks/events/README.md:
+ id: cf45a4390b9b
+ last_write_checksum: sha1:9305fbb844865a70722fc3d4354537a5e2939e1c
+ pristine_git_object: 1de37e5c9aad042cacde48d885f16210ea08b817
+ docs/sdks/executions/README.md:
+ id: 401745b17323
+ last_write_checksum: sha1:74e97aa7308bebb4865de66110386f3d863049f2
+ pristine_git_object: a2f4aeb06e6b7e609a0def9347864201122a40f0
docs/sdks/fields/README.md:
id: fdb6c4f3bd69
last_write_checksum: sha1:ea6dea75f85d25fd0ccdd2c659ecec43d0e5242d
@@ -2413,8 +3275,12 @@ trackedFiles:
pristine_git_object: 34d167398ac81ce78d50c1498c01f4c74917ce7f
docs/sdks/libraries/README.md:
id: df9a982905a3
- last_write_checksum: sha1:e3eb0e9efb3f758fdf830aa1752c942d59a4f72b
- pristine_git_object: 7df1ef4e26449af572412f052ee7ad189039544f
+ last_write_checksum: sha1:4d37c4de9e0d6e8693ed2f19e6512412eae06932
+ pristine_git_object: 1ae444f1ffa634c276c4695d9d978a6db7c59dc1
+ docs/sdks/metrics/README.md:
+ id: a8545d964e21
+ last_write_checksum: sha1:0c559bd570e162c994c6125058f3712e24bc576b
+ pristine_git_object: 08a5ccb9418788b3f333dcbeddd25e51f71ca104
docs/sdks/models/README.md:
id: b35bdf4bc7ed
last_write_checksum: sha1:ca04fe883c5440abf402640cf26a1a0e9799a55f
@@ -2427,6 +3293,14 @@ trackedFiles:
id: db86bab024d3
last_write_checksum: sha1:2ef5b164016e9b40c27d8a4915aeb8d3d2ed42c7
pristine_git_object: ce8f1f689512a9eac118c05ec1e9acf17e931556
+ docs/sdks/runs/README.md:
+ id: 4598fd39b715
+ last_write_checksum: sha1:e049a4caff6e27e6fc62b44f372c0dd4877c79dc
+ pristine_git_object: bd7865446b10d0ce2728a763a535d919dc2b4446
+ docs/sdks/schedules/README.md:
+ id: 2f28c809a225
+ last_write_checksum: sha1:70cf78f4dd201a70da97aa86a1f5d936dfabbe13
+ pristine_git_object: dd1565e04c21206cb8498f6cf5cea719ee0856ec
docs/sdks/speech/README.md:
id: d5924688d48c
last_write_checksum: sha1:39e91f6d26c0d04212b24de47dbe5b5d5487a208
@@ -2437,8 +3311,16 @@ trackedFiles:
pristine_git_object: 97703c9b4dc942385ee04ae96cbd100c3f632a17
docs/sdks/voices/README.md:
id: 4f1a657c8f68
- last_write_checksum: sha1:31c15c54de42204fcfb348c7ba05494e10305639
- pristine_git_object: 3383e617c3d3733243e28497396b15589c4552b1
+ last_write_checksum: sha1:8ddfebe91ff50ea863d4c0bd85c75458a482667e
+ pristine_git_object: 3b4d135f39bdfc3237c36c03367952eae54bdd48
+ docs/sdks/workflows/README.md:
+ id: 80c76ce944c0
+ last_write_checksum: sha1:572ef0ec9cadcd850e3966dad909a3d2e901280a
+ pristine_git_object: e65383ca453849b3bab58f921f80f5b8e7df7839
+ docs/sdks/workflowsevents/README.md:
+ id: 514b42269280
+ last_write_checksum: sha1:819db9d9561cc4802f139bbd1dec8045b718866f
+ pristine_git_object: bae0cb908bf2dcc0ddabfc340d929eddaea38c97
py.typed:
id: 258c3ed47ae4
last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60
@@ -2465,8 +3347,8 @@ trackedFiles:
pristine_git_object: 036d44b8cfc51599873bd5c401a6aed30450536c
src/mistralai/client/_version.py:
id: cc807b30de19
- last_write_checksum: sha1:4119527b41471c1a6571b0a33c6f5368dbae4077
- pristine_git_object: 68da4e561a187ec72d7a25b962d05112688779ee
+ last_write_checksum: sha1:b98fd6b10eefa6ebd5e051393980c124cd65058d
+ pristine_git_object: 5cebd13059e236d30d9d91cd3fdd5a7da81c42b5
src/mistralai/client/accesses.py:
id: 76fc53bfcf59
last_write_checksum: sha1:4b1cf5d760f690d35582f9037df44c97c11e7e14
@@ -2489,8 +3371,8 @@ trackedFiles:
pristine_git_object: 7e36fd0d73ebeb873f74f4109896a6cf3bb7d2ba
src/mistralai/client/batch_jobs.py:
id: 3423fec25840
- last_write_checksum: sha1:34de0e986e7c0e4377f70125d319e522280c565f
- pristine_git_object: 0e135b30cd122d1a813ee67bf2f9037953448e73
+ last_write_checksum: sha1:cd6fa64476a43c23709383118c39c307acdcd64f
+ pristine_git_object: dc378cde31e59b39f2569e71496b9e8abf8a5056
src/mistralai/client/beta.py:
id: 981417f45147
last_write_checksum: sha1:0971bda6a9024dcbdf8b4aaad1086417b01ee40f
@@ -2517,8 +3399,8 @@ trackedFiles:
pristine_git_object: 67199b601e38dff6fc6a4317eb845fbde6c25de0
src/mistralai/client/connectors.py:
id: 39da03126050
- last_write_checksum: sha1:60d5d04e3650f48487df3cbb682c5646bf3c3a81
- pristine_git_object: e0c4793e40098f9055735edcfce4a1168f99918b
+ last_write_checksum: sha1:61a434ec297f20aea468fe6c34cca60ad5f36bec
+ pristine_git_object: 238f927703af44c9449a1d0ae90eefedbfc43c12
src/mistralai/client/conversations.py:
id: 40692a878064
last_write_checksum: sha1:f5f2e6d1d78b75177d6b831b69b212956926eede
@@ -2527,6 +3409,10 @@ trackedFiles:
id: e5a6ae2a2d85
last_write_checksum: sha1:2483bd56b90599039573c2c152dcbffa8ba8b3b8
pristine_git_object: 48ecbdd8c1b2f42fa77a033aa0e4b4f49d20f088
+ src/mistralai/client/deployments.py:
+ id: a874b267fcb9
+ last_write_checksum: sha1:0ef6db65bcea6d41d204d925281ad0ba6763263f
+ pristine_git_object: a428c61cff07e1d789ec9dfca2a795b2ab4c544a
src/mistralai/client/documents.py:
id: bcc17286c31c
last_write_checksum: sha1:29e7edd6f45d8a1a7ec8e9c9734bebe56ff97e7e
@@ -2563,6 +3449,14 @@ trackedFiles:
id: c489ffe1e9ca
last_write_checksum: sha1:f708168e46c2960dd51896083aee75ccdb36f9dd
pristine_git_object: 25b87255a51021079f8ba5cc60b43509e12f9a4d
+ src/mistralai/client/events.py:
+ id: ac9b961cc70d
+ last_write_checksum: sha1:2b128064a9cf9c1b99552fd2d7ec3e81f9aa6160
+ pristine_git_object: d44c0692278f07df2a1a21bc77f55cfe0ddc65c4
+ src/mistralai/client/executions.py:
+ id: 974004d347a2
+ last_write_checksum: sha1:6e2247d95aa58732722c58b9ce57cfbf9e669c15
+ pristine_git_object: 7112dc1a94cb55a753246f5c7a83ff6a4d469bcc
src/mistralai/client/fields.py:
id: 862335210b20
last_write_checksum: sha1:789ac221e3aec61d0d31abd1761e368e383fba88
@@ -2593,12 +3487,72 @@ trackedFiles:
pristine_git_object: b0eb1ea0965f63efb681d8364ac456b96f1e3983
src/mistralai/client/libraries.py:
id: d43a5f78045f
- last_write_checksum: sha1:6440b3df71fe557ecba5c23768d115efd4ceb26f
- pristine_git_object: b8728362b87349118ac6f163f50613dd18c43340
+ last_write_checksum: sha1:d4bde8bbd8ec4200c119e2c27a2e1f5eb32741ef
+ pristine_git_object: 84624c4dae3b51b1a58537f54bb05902e1edea8c
+ src/mistralai/client/metrics.py:
+ id: 937cb03f8130
+ last_write_checksum: sha1:f218d4230e66e0aebe25291bba5d76df2ce78d5d
+ pristine_git_object: 3df1ca56638d59e5a8bd36fd762e49fd1c5df7b9
src/mistralai/client/models/__init__.py:
id: e0e8dad92725
- last_write_checksum: sha1:299d59da943dc58f2bc4bf694d76d7172cbbd30e
- pristine_git_object: 3ace7584ab83c9ee6d7fddfb5b0211fba63763b8
+ last_write_checksum: sha1:767af34246fb87d24793c8235c2e7a8f3ebdb6bf
+ pristine_git_object: a6ac62a6b71432a3b7b2de1678458f4e7a24a4bd
+ src/mistralai/client/models/activitytaskcompletedattributesrequest.py:
+ id: a9e5ef17794f
+ last_write_checksum: sha1:c9862308564483a8da1e373ab2d94f0eb798a5a9
+ pristine_git_object: 560310cf389f15c34b8d83934caae9975be949c7
+ src/mistralai/client/models/activitytaskcompletedattributesresponse.py:
+ id: 8174941767cc
+ last_write_checksum: sha1:8a22b80fbd7e5ea9a72a34016e68fdb4a375ed75
+ pristine_git_object: 899acb62afc89955ccb4ccb7a4e4d4bb8a4ce424
+ src/mistralai/client/models/activitytaskcompletedrequest.py:
+ id: d051525d65ba
+ last_write_checksum: sha1:e08a49cae43cd75716dc086f608b8e7d2ab6a7db
+ pristine_git_object: fae82f6611122ae9795ffb1a6b006f3fbcffa894
+ src/mistralai/client/models/activitytaskcompletedresponse.py:
+ id: 28373f0a2c9e
+ last_write_checksum: sha1:93e29f246bdade11ed0bcd55e5781fe86d352f89
+ pristine_git_object: b324066b94687faa020c9019721eedb2a1aa6998
+ src/mistralai/client/models/activitytaskfailedattributes.py:
+ id: 12635cd17417
+ last_write_checksum: sha1:d9f8dbbc30720e66e0d3da8046dbdc3a6a350198
+ pristine_git_object: 37749d4a7fe8f6c3dfa007a2a6db3cf480761663
+ src/mistralai/client/models/activitytaskfailedrequest.py:
+ id: 1d4b5b52ae29
+ last_write_checksum: sha1:5a7b26a18786cc170996a412dca46b5fe33ac547
+ pristine_git_object: 1d1e9226f2df4edb1c86f1a6e553e45d45d0d4af
+ src/mistralai/client/models/activitytaskfailedresponse.py:
+ id: b739e8fc9b44
+ last_write_checksum: sha1:a4624cf0d6177e940004e180e1ed372d43cb3534
+ pristine_git_object: 3410ea840d9c740ef8dd74cdee6abd9f2d6c9a6f
+ src/mistralai/client/models/activitytaskretryingattributes.py:
+ id: 9aaa9eecac09
+ last_write_checksum: sha1:811a7631992abea7dbcabc4a33d43b135ca1c4ce
+ pristine_git_object: b0ee11d8ae7005e3c1fb7cb403602841632ad1c6
+ src/mistralai/client/models/activitytaskretryingrequest.py:
+ id: 6f2e394e1e75
+ last_write_checksum: sha1:dd456e903ef879559d6adf5e8920f14f7224b009
+ pristine_git_object: 8c4d84f999647a6bcaa308646d95be2c3e943dbf
+ src/mistralai/client/models/activitytaskretryingresponse.py:
+ id: 3a4ff1e01fa1
+ last_write_checksum: sha1:770c9b60721c8ba1f60f193ec1c4f8ac10a4e075
+ pristine_git_object: cf054a27d05dc1683275094a40336cc6f4164eb5
+ src/mistralai/client/models/activitytaskstartedattributesrequest.py:
+ id: 51b3e5622e0b
+ last_write_checksum: sha1:cd3ba7db9a44cdecf5cb2638d02f6e52b826ea92
+ pristine_git_object: dfa3fa04747a7cd33b764724760baa8d920fba1e
+ src/mistralai/client/models/activitytaskstartedattributesresponse.py:
+ id: 3a365e2c2942
+ last_write_checksum: sha1:7a125f9f9cb1a2eb14bb929742981b488686ab81
+ pristine_git_object: 100626d49be3d849b35e0e041d02f7eb9c7964a2
+ src/mistralai/client/models/activitytaskstartedrequest.py:
+ id: aa5813a6903a
+ last_write_checksum: sha1:8f007fc54025d262d5d5e72b179742d5de5eee08
+ pristine_git_object: 503b3998373b2a35da365f52b7714432297ac318
+ src/mistralai/client/models/activitytaskstartedresponse.py:
+ id: d01ef9cb3955
+ last_write_checksum: sha1:1f1dda76127bdb08549b0dd94f68dafd1d6a71fc
+ pristine_git_object: 75186e027475280fefb1750beb1933bdac4d76cb
src/mistralai/client/models/agent.py:
id: 1336849c84fb
last_write_checksum: sha1:2574866d9855cb0b6314ac94ca288d59f3196c73
@@ -2719,6 +3673,10 @@ trackedFiles:
id: f55510f5ff82
last_write_checksum: sha1:995d3fcf007b0f10eea18bb6745478e7eeb6bce2
pristine_git_object: 10cbaf589e2a4bc1e63b3a02689e76f4d7f1e88a
+ src/mistralai/client/models/archive_workflow_v1_workflows_workflow_identifier_archive_putop.py:
+ id: 0be575ead94b
+ last_write_checksum: sha1:1a28b0b1fc741af5fc2830363f6acbd713a9c0f0
+ pristine_git_object: 8582aaf5a704186cd72f67487db55b90ffd081fd
src/mistralai/client/models/archivemodelresponse.py:
id: 2d22c644df64
last_write_checksum: sha1:e55af09d03586d14e2a52292be9a0d6729f9e1e7
@@ -2761,8 +3719,8 @@ trackedFiles:
pristine_git_object: 1f5a6639b3fae3a5c96cf35bc4417d5d5151a37f
src/mistralai/client/models/basefielddefinition.py:
id: ffa42818fea3
- last_write_checksum: sha1:7d7b08ba19500836193c593bacc1dd6982c9ef71
- pristine_git_object: b57ff2cab05c7a2a7f10292ff1c017eb58a10f84
+ last_write_checksum: sha1:a323bc4117cecf4ffcb228cf896e7ceae02f9b6d
+ pristine_git_object: acf9cf8d6fc3220f736b7c6cfe32d42425febd2a
src/mistralai/client/models/basemodelcard.py:
id: 556ebdc33276
last_write_checksum: sha1:1a1d261bad5394f01bbad562e8eee941014b7d9e
@@ -2775,6 +3733,18 @@ trackedFiles:
id: 1563e2a576ec
last_write_checksum: sha1:3bb5b8a9f479b888efc37cf1ba9731e5efc3d8d0
pristine_git_object: a0add5ad71b1732849fe89de72227e846b3e3b1b
+ src/mistralai/client/models/batchexecutionbody.py:
+ id: 6cfbee9c4ec7
+ last_write_checksum: sha1:b59b61a0a9b3f678918d939448b9a95d07abde7b
+ pristine_git_object: 7d9895e5a7e8bfebc77c2a36cbe190dbbc7eb164
+ src/mistralai/client/models/batchexecutionresponse.py:
+ id: 37e6d5d616fe
+ last_write_checksum: sha1:3118fd4f7196e446101b017ec77667a439da09f6
+ pristine_git_object: a0c1f06e72e744cb74ad495d8a906c4723fcee0a
+ src/mistralai/client/models/batchexecutionresult.py:
+ id: ca6840204f22
+ last_write_checksum: sha1:d8628b48ae4198b487312b70cc425dc810b0ea8c
+ pristine_git_object: ebe41e0110c69a856461fa033ce698cd324e2db0
src/mistralai/client/models/batchjob.py:
id: 85cd28932cc7
last_write_checksum: sha1:d9aab4f4058332de7f8e05ddc0719dbc9b054993
@@ -2799,6 +3769,10 @@ trackedFiles:
id: c91d862fb405
last_write_checksum: sha1:770d93cc4d9b8e23ea781592888be5b811533f21
pristine_git_object: 62427fa0336eab3448064fc1a7a02427d44e309b
+ src/mistralai/client/models/cancel_workflow_execution_v1_workflows_executions_execution_id_cancel_postop.py:
+ id: e26fc5a228af
+ last_write_checksum: sha1:cc7b1b2bb13a1ee8f88f5a485c3f4c7ef7e32875
+ pristine_git_object: 8ba7176cb0213275c463d39fe8fc3ad9b9e5a0ad
src/mistralai/client/models/chatclassificationrequest.py:
id: afd9cdc71834
last_write_checksum: sha1:a29088359142ebd6409f45569168b2096014119e
@@ -2853,8 +3827,8 @@ trackedFiles:
pristine_git_object: 6c7d6231d211977332100112900ea0f8cdf5d84c
src/mistralai/client/models/classifierfinetunedmodel.py:
id: 5a9a7a0153c8
- last_write_checksum: sha1:64ba0eff2b87654e569f6a561ce09eb1d7377437
- pristine_git_object: 7c2bfc80c5881319692a54a13a78a479aa47604a
+ last_write_checksum: sha1:bfd6ffd8323a7b62199c84e64077b26b4c9c72d5
+ pristine_git_object: 35d557aa6c122efe7fd300233f5837e69302fc33
src/mistralai/client/models/classifierfinetuningjob.py:
id: a244d5f2afc5
last_write_checksum: sha1:b65b4bab42100c8e00821e6f81e03d2efe039dde
@@ -2897,8 +3871,8 @@ trackedFiles:
pristine_git_object: 3b90ab0c1ecac12f90e0ae3946a6b61410247e4f
src/mistralai/client/models/completionfinetunedmodel.py:
id: f08c10d149f5
- last_write_checksum: sha1:12ffb210273040ce9aa057a85d348643622c6b59
- pristine_git_object: e75b8d2f0cdaf38c8e1bc583f9b19278fdf04f86
+ last_write_checksum: sha1:da9ba4911148929c202368ec95f5a5bd55f6da21
+ pristine_git_object: 9b420ed73a496a0bfd0309422992178332ec6b48
src/mistralai/client/models/completionfinetuningjob.py:
id: c242237efe9b
last_write_checksum: sha1:653d253e02c252ece805d0172d7e214c474fd074
@@ -2921,8 +3895,8 @@ trackedFiles:
pristine_git_object: 55730274eaa98e7aa53d04ba4b9006a61e427740
src/mistralai/client/models/connector_call_tool_v1op.py:
id: 7948899b3068
- last_write_checksum: sha1:09dfd8f2d560f33fb12cba74cadcd505831d2389
- pristine_git_object: df5783d0a78128863d32c86230e6413bdf80ead9
+ last_write_checksum: sha1:c4388ac1594641c36f97daaedcf42b0e94753be7
+ pristine_git_object: 9c77123e18a86efaa7c0055e0c2a1a7226e7ffd9
src/mistralai/client/models/connector_delete_v1op.py:
id: a377930b1435
last_write_checksum: sha1:2c9a501ab2e2b05829f2e3fd838f88a610d56781
@@ -2937,8 +3911,8 @@ trackedFiles:
pristine_git_object: 780afac9281bf78c4263c7bd32faa04a718695d2
src/mistralai/client/models/connector_list_tools_v1op.py:
id: 4c6ad704479b
- last_write_checksum: sha1:cc6b39ad84831a838e0a70748c31c7acae946cee
- pristine_git_object: eb5de3213915c8d748648796bd5f582acd2d24e3
+ last_write_checksum: sha1:3d509db411bf04de04d21360fd2c98cd7adce855
+ pristine_git_object: 030ffb2c8f8423d840147bbc7f45f623ccfe3731
src/mistralai/client/models/connector_list_v1op.py:
id: 5ec0889995f5
last_write_checksum: sha1:17c0a5965d058a4aacb891d37db3481a907ff091
@@ -3087,6 +4061,90 @@ trackedFiles:
id: 14f3643f7703
last_write_checksum: sha1:2162c7e4b3a9c747c5da88e72315d138f28dea5d
pristine_git_object: f3a1e2ffb1a29e8798a2ac8c9c636ac3bada7413
+ src/mistralai/client/models/customtaskcanceledattributes.py:
+ id: c20b8be67b8c
+ last_write_checksum: sha1:94e8aa92de645553991a5a0940aee90c19a6adc1
+ pristine_git_object: cea2137e5356e636b44a3b1107aa9e29a69fd504
+ src/mistralai/client/models/customtaskcanceledrequest.py:
+ id: 4b4f6b8c8ffd
+ last_write_checksum: sha1:db9c0e11711bd0c973c94ff19bf516cedcd5df90
+ pristine_git_object: 0cb82aea95d197e7f39de308e806ef1f5503d25a
+ src/mistralai/client/models/customtaskcanceledresponse.py:
+ id: 0eeb9d6cf409
+ last_write_checksum: sha1:99a1b1ce86f1636abd7f00e746d183cbe8e5c665
+ pristine_git_object: 7873b3ca5fdb61c6c4d1205894727cd09e8d939d
+ src/mistralai/client/models/customtaskcompletedattributesrequest.py:
+ id: 9ec058aedb96
+ last_write_checksum: sha1:f7fc636cc3e19b9a48b357d87509929164649e72
+ pristine_git_object: 5e344a32379026246738dc5fa98bc507ca67bb93
+ src/mistralai/client/models/customtaskcompletedattributesresponse.py:
+ id: 0b9fb891f354
+ last_write_checksum: sha1:d0c1d0c181c3b0d2c9e73a09a40cd84dd5e49b85
+ pristine_git_object: 8b28d1058d3ffc20d9c86c1cbac03ea343d0947a
+ src/mistralai/client/models/customtaskcompletedrequest.py:
+ id: ca7326e72707
+ last_write_checksum: sha1:0eb76ca446cfa3ad883d80df2129db147915d60a
+ pristine_git_object: eee10e8e20c317b02d30657110d070121c765ea5
+ src/mistralai/client/models/customtaskcompletedresponse.py:
+ id: 719b68571f4c
+ last_write_checksum: sha1:4d8b84868e4d94be8ca9190e29dc8dbf37c442fe
+ pristine_git_object: 5d2a8686342100c46426bc1212868129e54835bd
+ src/mistralai/client/models/customtaskfailedattributes.py:
+ id: ff33698df363
+ last_write_checksum: sha1:dbbd423357a0d653ba20e94737126e9919bf2ff9
+ pristine_git_object: b66d7d3c973307e23a73f45a1e654e375f55140c
+ src/mistralai/client/models/customtaskfailedrequest.py:
+ id: 844b7e2b33de
+ last_write_checksum: sha1:0c194c3b7711b6c27ad8d5e7aec834c637362cc9
+ pristine_git_object: abf2d74f1b1a8f6babb27cf8b0f75d1c6191f608
+ src/mistralai/client/models/customtaskfailedresponse.py:
+ id: 1cde7920833f
+ last_write_checksum: sha1:faa16c185064c28a2c58a4ac9ece342fdf4e1d52
+ pristine_git_object: 1f9835ec2cd05a55d2f545b23a223b0039609805
+ src/mistralai/client/models/customtaskinprogressattributesrequest.py:
+ id: d003c9954634
+ last_write_checksum: sha1:fa48517b90b04ac689a852dfe6b0b3cc32705630
+ pristine_git_object: 5737a4e1995572770b558a3823d3c1474d8f2e0b
+ src/mistralai/client/models/customtaskinprogressattributesresponse.py:
+ id: 895147a1a6a8
+ last_write_checksum: sha1:b78742124f3c95c0b542cb5756aa39e466538a19
+ pristine_git_object: 9b5fd20a3c379c4f06edea49f2a853c745e185be
+ src/mistralai/client/models/customtaskinprogressrequest.py:
+ id: d62648fe4f1e
+ last_write_checksum: sha1:a7dfd8c6465029c70dc72c1cb627c2b87a3c612f
+ pristine_git_object: c2c2faab83cb4b9fb16aaf9d472f1f989d199de7
+ src/mistralai/client/models/customtaskinprogressresponse.py:
+ id: 9d012ecb7626
+ last_write_checksum: sha1:ed0824ea2641df6e5284030f0906ee4d14ce7bde
+ pristine_git_object: 33c126f716d9b63fef593498a2442bfa1637d5dc
+ src/mistralai/client/models/customtaskstartedattributesrequest.py:
+ id: 23ea4ebe9e0b
+ last_write_checksum: sha1:011c21a19451f90ec3177ac7cde70b912f80f900
+ pristine_git_object: db1ecbb6829f22b061688fb8b4a524af72003c3c
+ src/mistralai/client/models/customtaskstartedattributesresponse.py:
+ id: 6b8946c77018
+ last_write_checksum: sha1:25f404c0770dac0b612c67d3445b40fd8659fb59
+ pristine_git_object: 71bed58f6bb9b88110a79dbff38bb32f9fde6090
+ src/mistralai/client/models/customtaskstartedrequest.py:
+ id: 39792cc12bde
+ last_write_checksum: sha1:b2dc31635070213fee4f5db093ada888b8d23d3c
+ pristine_git_object: c4bb6c2ae25bcb34f2db11cf11a82291c1f3215d
+ src/mistralai/client/models/customtaskstartedresponse.py:
+ id: 02b330a5292e
+ last_write_checksum: sha1:676b985755499c4691c1ac615d04fb35481d9a0f
+ pristine_git_object: 59d11fa8319c601ad80482944e6e3aef55257a66
+ src/mistralai/client/models/customtasktimedoutattributes.py:
+ id: 9cc865098add
+ last_write_checksum: sha1:229037a374553cf8523d2c0e0f0aa017adb8790b
+ pristine_git_object: 47517ab1924cfce787805e9a528b04b1d4766a19
+ src/mistralai/client/models/customtasktimedoutrequest.py:
+ id: 75499a6e7c0e
+ last_write_checksum: sha1:b32297b801a02fc119f7d749011e8fbd61700744
+ pristine_git_object: def540f444e6a8b27d8aac76189382f14ff2b405
+ src/mistralai/client/models/customtasktimedoutresponse.py:
+ id: 2dbbc78b85d1
+ last_write_checksum: sha1:da1f0487cccc52c38a8132124efe085d3eeb334f
+ pristine_git_object: 7f274a535271c9e62974eedde427e3df577d6e2e
src/mistralai/client/models/dataset.py:
id: cbf14670ee00
last_write_checksum: sha1:012f85ebfbdb286436621efc3b5b1adb150871f3
@@ -3127,6 +4185,10 @@ trackedFiles:
id: a84ce12a8251
last_write_checksum: sha1:4c0a37f3516ff7198dfe13c0ed04fa4cf48b35fb
pristine_git_object: 3ce383648b1de95099b59b1f4c015a595f24060b
+ src/mistralai/client/models/deletebatchjobresponse.py:
+ id: 79a43aab6cf9
+ last_write_checksum: sha1:9d4916ba4733e777f745c54dd20e71a6a8f4fdd7
+ pristine_git_object: 958f0c4f4dbd989b9a95f0c8d52a279c1d448f0e
src/mistralai/client/models/deletedatasetrecordsrequest.py:
id: e7ef16596e54
last_write_checksum: sha1:971e81be9064277df5dd656e912b9f319474e742
@@ -3143,6 +4205,22 @@ trackedFiles:
id: 68f53d67a140
last_write_checksum: sha1:f1b774b3febc9388ea8175d266f585aa2954af55
pristine_git_object: 3823651b3030944520aad12c7e6f6b4e8bbf2a8c
+ src/mistralai/client/models/deploymentdetailresponse.py:
+ id: 7f4a17a1c7ca
+ last_write_checksum: sha1:c0d41bf5e5fbe06811aa3a6ddeb3b1cfab2ab8b6
+ pristine_git_object: 82f021f187b86ad79d826166d7fe6fd91b915b58
+ src/mistralai/client/models/deploymentlistresponse.py:
+ id: 1f0b404ba621
+ last_write_checksum: sha1:abf42a7e10930c6fa908aece4903f5464f88aef2
+ pristine_git_object: 7926cde725d80b9055401de46ab947c98e8c5a3e
+ src/mistralai/client/models/deploymentresponse.py:
+ id: ea15ddd64402
+ last_write_checksum: sha1:a3dcc989ef05cade526d30b7fd23d3d0737e9f1b
+ pristine_git_object: 7f8ed3a346fa92bf7c7ddfc72984a51442e58d0c
+ src/mistralai/client/models/deploymentworkerresponse.py:
+ id: b11a9947bd19
+ last_write_checksum: sha1:e57ad0af89e80ac9b2b40f9dbe1f6e6708efde3e
+ pristine_git_object: 0adcb4d6ee081b1bb699c1ebb2b8a4cec7bcb771
src/mistralai/client/models/document.py:
id: fbbf7428328c
last_write_checksum: sha1:bc290f10562aaf9c24bd14ab3d2a31e62f535042
@@ -3179,6 +4257,10 @@ trackedFiles:
id: 6d6ead6f3803
last_write_checksum: sha1:b0daee4bd21a441a2e8e47cf8ca8be32d2179869
pristine_git_object: 03b23a7cba53c27ef02f3bc025f38f10d7b98e2d
+ src/mistralai/client/models/encodedpayloadoptions.py:
+ id: 97955ebc2eb9
+ last_write_checksum: sha1:464d35eea8a12a0a1a7a58ec66ffaada14f07bb9
+ pristine_git_object: 5c369046204ed55ac2c5dd63befd7598b7fd35ac
src/mistralai/client/models/encodingformat.py:
id: b51ec296cc92
last_write_checksum: sha1:ea907f86b00323d99df37f7ff45d582aace798e7
@@ -3191,6 +4273,26 @@ trackedFiles:
id: e5a68ac2dd57
last_write_checksum: sha1:0711bc7d48998b1c01e7708e29ae5889f9bea7bc
pristine_git_object: 01157dfab89756ecfb587722270ef3cef8722807
+ src/mistralai/client/models/eventprogressstatus.py:
+ id: 48623263df72
+ last_write_checksum: sha1:e2fd3c6e145e07d247c1ec41adf67996c86696ba
+ pristine_git_object: 6fde1c3913c670a60d3eead5ba0d27f77a281799
+ src/mistralai/client/models/eventsource.py:
+ id: 8b926028b7b2
+ last_write_checksum: sha1:688d7fe14b17595e121bab41319c3630860b0a60
+ pristine_git_object: d0f4d5e3f55ca89569ae90dc96eb036d01bbc8e0
+ src/mistralai/client/models/eventtype.py:
+ id: f70686df1fa5
+ last_write_checksum: sha1:a9f66d0772a83a6f83968d7a6a4877ddcf5e4728
+ pristine_git_object: a85321b2603057de07d343b380e86476f1b70adb
+ src/mistralai/client/models/execute_workflow_registration_v1_workflows_registrations_workflow_registration_id_execute_postop.py:
+ id: 3e2249825144
+ last_write_checksum: sha1:d177be670203df65ed11fb73210d4a5afed1a566
+ pristine_git_object: 2e79ef25f7117584df49c6f1f925a3386360d49b
+ src/mistralai/client/models/execute_workflow_v1_workflows_workflow_identifier_execute_postop.py:
+ id: 5da876c66fc5
+ last_write_checksum: sha1:2a474dbbbd10132f2530e3f09d6a5a87ec9e4aa1
+ pristine_git_object: bd9496a2d969927634c9cea1c294c9d26fc3a87b
src/mistralai/client/models/executionconfig.py:
id: 14518c40a13b
last_write_checksum: sha1:351fb4a74622cb70969b728ac65b62ca670fc7e5
@@ -3203,6 +4305,10 @@ trackedFiles:
id: 22cc29d258db
last_write_checksum: sha1:83a787b852101f1862f0aabc1638f68753b5d607
pristine_git_object: f1600cac74e1d50979277d3eb6830ea5e84b69ec
+ src/mistralai/client/models/failure.py:
+ id: 596e38493eaa
+ last_write_checksum: sha1:5077e5660c7192e2123fc26059f1786f9d75e273
+ pristine_git_object: 85ca77a5713f0fc3d51c5c7fdaa3f06066ac4dbc
src/mistralai/client/models/feedresultchatcompletioneventpreview.py:
id: 19109368b436
last_write_checksum: sha1:2dfbc8aa7110a57f892ad80002cc01bc4c94589d
@@ -3313,8 +4419,8 @@ trackedFiles:
pristine_git_object: ccb0f21b5a69f91119bec9db6e9f3d876e4c35af
src/mistralai/client/models/ftmodelcard.py:
id: c4f15eed2ca2
- last_write_checksum: sha1:1c3ab1bda3fa9e2df33b47a49d5e9b9e2d82a701
- pristine_git_object: bb7c52c881ceadccdeb3590104e774f3d2ad94d7
+ last_write_checksum: sha1:4382bdc5744ed2199c0aefcf32d8d9e0d02c42f4
+ pristine_git_object: 922667b0f7726824d0de4b18eac2ff86b271bccb
src/mistralai/client/models/function.py:
id: 32275a9d8fee
last_write_checksum: sha1:c01c1ed54b3086859cde0dd3c3c466c78d8323ae
@@ -3403,6 +4509,10 @@ trackedFiles:
id: 3e4f4e2447ac
last_write_checksum: sha1:c58e1c3e04892b5bfcadeec7c640c37c8d3da017
pristine_git_object: 1d79f2d59fc1d0f38c66d85cf266bc9700087c37
+ src/mistralai/client/models/get_deployment_v1_workflows_deployments_name_getop.py:
+ id: 3fae92e2573a
+ last_write_checksum: sha1:2257627743f50ff58107762a69aae40ef5528214
+ pristine_git_object: 1f699bed56b6a40166274492d7b2ca4ff84a8f61
src/mistralai/client/models/get_judge_by_id_v1_observability_judges_judge_id_getop.py:
id: 4201c3c5a891
last_write_checksum: sha1:44c9a7d21ea727e0f849cfc5dc4a02220bcf6e74
@@ -3411,10 +4521,22 @@ trackedFiles:
id: fa04e3db7781
last_write_checksum: sha1:d59740c9021cd891db4b81a7f4c6aeeecb9d6958
pristine_git_object: b5d0980e197c19eaa70ac26f3b118a8e60364e3e
+ src/mistralai/client/models/get_run_history_v1_workflows_runs_run_id_history_getop.py:
+ id: 9d566ab77998
+ last_write_checksum: sha1:de2b5f449d510a05c9c8805e35f43085e5592e1d
+ pristine_git_object: d974f981d5b3e9055c787fec446e55f642c8823a
+ src/mistralai/client/models/get_run_v1_workflows_runs_run_id_getop.py:
+ id: 60463c59ff01
+ last_write_checksum: sha1:a851312a3e0c91273ad444c6e2cc81c8c541c9cd
+ pristine_git_object: d9b6758f1622c76ad41092f4dbb9c6cf392b299c
? src/mistralai/client/models/get_similar_chat_completion_events_v1_observability_chat_completion_events_event_id_similar_events_getop.py
: id: d651bdc06c1b
last_write_checksum: sha1:25331ac322d230cb7d9fc2a6aff2d7db561fdf2f
pristine_git_object: 7689415dd70da6eec8d0b416cbf020c4b9adeecf
+ src/mistralai/client/models/get_stream_events_v1_workflows_events_stream_getop.py:
+ id: 8dd6ce0e8d66
+ last_write_checksum: sha1:021765a807c5b127c3bfabbf227e952e10f5be88
+ pristine_git_object: 442a7a940b03c27b173281626d612b650a1ade2d
src/mistralai/client/models/get_voice_sample_audio_v1_audio_voices_voice_id_sample_getop.py:
id: a5838063aee4
last_write_checksum: sha1:457e62a76bd229aecf1725c5f813de63e35d8b7e
@@ -3423,6 +4545,50 @@ trackedFiles:
id: b6ca849d5005
last_write_checksum: sha1:ef189175dacf5a9be4295782eebfead3f40ce137
pristine_git_object: bd81442ec3758ba0833ce8d3ad54fbc803748bc5
+ src/mistralai/client/models/get_workflow_events_v1_workflows_events_list_getop.py:
+ id: 9d6d093835d6
+ last_write_checksum: sha1:143b69eded03282526de2164ca95f51915be6a10
+ pristine_git_object: 186c55486ef2f9ba01b4c5ace5b91a416b0a6bcb
+ src/mistralai/client/models/get_workflow_execution_history_v1_workflows_executions_execution_id_history_getop.py:
+ id: 8d636c8cad1e
+ last_write_checksum: sha1:026e7390d2df0f6f67bbd62fe6aa9ebaa0142f76
+ pristine_git_object: 66c6fb065dd395fbcd02031c968d49c757cb8da2
+ src/mistralai/client/models/get_workflow_execution_trace_eventsop.py:
+ id: 75438195bf19
+ last_write_checksum: sha1:3fc47c8b7c301c1029f6a10e30298d8bc776d140
+ pristine_git_object: a60fb536b9e4dd1ab8a2256d1cc28235752c34f2
+ src/mistralai/client/models/get_workflow_execution_trace_otelop.py:
+ id: f24457d6ea21
+ last_write_checksum: sha1:33ddf619eb30b950d5e2120b432a8dc1b3acb10a
+ pristine_git_object: 0abdd75fc1650a95aee925ec26bfd1af8a6e2632
+ src/mistralai/client/models/get_workflow_execution_trace_summaryop.py:
+ id: e44e62e3c444
+ last_write_checksum: sha1:818f5bd1151bd77c9c200f2799e352ae5eab73ab
+ pristine_git_object: 54b080e0b03bd406c805874666e81ee32967bc76
+ src/mistralai/client/models/get_workflow_execution_v1_workflows_executions_execution_id_getop.py:
+ id: 25bc5d3fec8d
+ last_write_checksum: sha1:e7145925a38e3e33c9615e61a880e222eac8a20b
+ pristine_git_object: c99260545f162e2a93c50d26b9a6a07c102e80c8
+ src/mistralai/client/models/get_workflow_metrics_v1_workflows_workflow_name_metrics_getop.py:
+ id: 449550c7f76a
+ last_write_checksum: sha1:93b1174df5a303444fe0d99c881ddec4cac6c788
+ pristine_git_object: 1d9c8989416bf2090476f3ad839c1ad011e522b7
+ src/mistralai/client/models/get_workflow_registration_v1_workflows_registrations_workflow_registration_id_getop.py:
+ id: a6b7d0f559ef
+ last_write_checksum: sha1:881422f47653ae2962f935b039a9c77d347c7b7f
+ pristine_git_object: fe7d639da434356114d8cd5829c0d3fffa776515
+ src/mistralai/client/models/get_workflow_registrations_v1_workflows_registrations_getop.py:
+ id: 822f256b2372
+ last_write_checksum: sha1:364bd7662bed7045624c9a135ba145e55aa5c24c
+ pristine_git_object: 478ee3fb1d86e9421c3219668f8095637a3352ed
+ src/mistralai/client/models/get_workflow_v1_workflows_workflow_identifier_getop.py:
+ id: 097af37374fd
+ last_write_checksum: sha1:962b823e10fb6cc838ecf2b02b0cc284252ee908
+ pristine_git_object: 20d0b6dd4e74c9c39a8b95cb9f9ec590d146574f
+ src/mistralai/client/models/get_workflows_v1_workflows_getop.py:
+ id: a128585aee76
+ last_write_checksum: sha1:b2580366fd3a247b68b1037ed80d7158cd061e51
+ pristine_git_object: 6cbad5e4fdbe4bd86426bec5edbb7a899d091f3c
src/mistralai/client/models/getfileresponse.py:
id: 81919086e371
last_write_checksum: sha1:a116c2fdef65748b5015804fc0eb9860fd2bc3b2
@@ -3503,6 +4669,10 @@ trackedFiles:
id: b56cb6c17c95
last_write_checksum: sha1:21b5794f110c53691654d7195201f9a4b7793f21
pristine_git_object: de2e63472ac53809cfeae200bd7d2f3dcbb70034
+ src/mistralai/client/models/jobs_api_routes_batch_delete_batch_jobop.py:
+ id: 8c43af108342
+ last_write_checksum: sha1:06244690f3de161dc1ba5648070b0212cb2c38d7
+ pristine_git_object: 9fc4a7e77cd756b47cdd1a0c4817cd458fd6a68a
src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py:
id: 36b5a6b3ceee
last_write_checksum: sha1:449dc131e1514edb32107ae1c699544732b313fb
@@ -3543,6 +4713,38 @@ trackedFiles:
id: 6d9dc624aafd
last_write_checksum: sha1:fbacb171b9c75f1fe45406f542a958d10c15fae2
pristine_git_object: 296070b426900305fe4596f03a3c9f081cdb2dcf
+ src/mistralai/client/models/jsonpatchadd.py:
+ id: fb2a1e58a6a5
+ last_write_checksum: sha1:a323e406accd1b25f4e094a54647dd140aab43d5
+ pristine_git_object: d4242f11b063f795b8d677abdd8c19ab0419a0b7
+ src/mistralai/client/models/jsonpatchappend.py:
+ id: 61801f21f4b0
+ last_write_checksum: sha1:f0400cf42fa6c776042487285cf18c1970e17ad6
+ pristine_git_object: 7181ddac1956f1c29b815231f30c3405d4f50754
+ src/mistralai/client/models/jsonpatchpayloadrequest.py:
+ id: 3f10ecfda228
+ last_write_checksum: sha1:93efb55ddcc2cc77f9d70596586061f9d112ea36
+ pristine_git_object: 96a6d689c531f366704cdbd67b36f0c9be8dd9d3
+ src/mistralai/client/models/jsonpatchpayloadresponse.py:
+ id: 1b39f46f529f
+ last_write_checksum: sha1:bf48ca0dd13e374dec8c2ee082fe9b78cab391e5
+ pristine_git_object: 345e6d14ce3244ff4234c80502e553de81ac8dcf
+ src/mistralai/client/models/jsonpatchremove.py:
+ id: e472e5b752ec
+ last_write_checksum: sha1:2dec39f883b362968f8d55b9b7daef6fb40094e4
+ pristine_git_object: 5bcedf879eae1921fd55bc76ba5b5532a2e9641f
+ src/mistralai/client/models/jsonpatchreplace.py:
+ id: 816f9df2f3c9
+ last_write_checksum: sha1:9448e1181ad7913305712966d0984ebad3a9dc4b
+ pristine_git_object: 3dd7aee65f0c4f0a2218762a9b9ea8cd571c406d
+ src/mistralai/client/models/jsonpayloadrequest.py:
+ id: 0d49a02162ea
+ last_write_checksum: sha1:304afed33d57eb0177012ef2d4c08f669cd59014
+ pristine_git_object: 252b8dacd6cdc4b07a46f1feae2fd67f961e5bee
+ src/mistralai/client/models/jsonpayloadresponse.py:
+ id: ba3265f85453
+ last_write_checksum: sha1:9103c6d30a65222458b9bc9b5b15cc88126f4284
+ pristine_git_object: 038ea329340d2e2ecc5d2db503e367e2a279cd1c
src/mistralai/client/models/jsonschema.py:
id: e1fc1d8a434a
last_write_checksum: sha1:f6e3ddb37e55ff27b795389fce6d4f433e7d5639
@@ -3647,6 +4849,10 @@ trackedFiles:
id: d493f39e7ebb
last_write_checksum: sha1:25b3c2c1040cd73ebd6b988b8b27708831affefd
pristine_git_object: 7a51d6053aa2cf2e6524a80487fe9549eec3dfa1
+ src/mistralai/client/models/libraries_list_v1op.py:
+ id: 2d9b1b4deeb0
+ last_write_checksum: sha1:7f853d8cd4a4c26ddb2ff2b6e7c0f35d2a2e540e
+ pristine_git_object: 088a41b2cf773965583fa980f9067381ca40cca8
src/mistralai/client/models/libraries_share_create_v1op.py:
id: feaacfd46dd3
last_write_checksum: sha1:cdb7e60f1aceb2c7aa54fe2b9ba5dafc2bb70995
@@ -3667,14 +4873,22 @@ trackedFiles:
id: 028a34b08f9c
last_write_checksum: sha1:3ac8546f2ada4f9a24d8296e03b9ad61d5f2e372
pristine_git_object: c26710ff4818c9719e08b67d147ee9bded3dc46d
+ src/mistralai/client/models/list_deployments_v1_workflows_deployments_getop.py:
+ id: 0c6586ffcab0
+ last_write_checksum: sha1:c7d7be66560be5bb813316f958a3fecdf7813156
+ pristine_git_object: 066b8db154651681af532e557c60587eddf15536
src/mistralai/client/models/list_models_v1_models_getop.py:
id: 1843a7aa68e5
last_write_checksum: sha1:018c32019aab6c4995285ace8abba54d88f41c0b
pristine_git_object: 1a42d9b0828a2e826948641a7b8be3e5d80e94db
+ src/mistralai/client/models/list_runs_v1_workflows_runs_getop.py:
+ id: 2f1b225158c3
+ last_write_checksum: sha1:50ac1b78fd3397604cb8d90118c16bb17f6da798
+ pristine_git_object: e61140cf55f1284db81359eabc9d9860e2477ea9
src/mistralai/client/models/list_voices_v1_audio_voices_getop.py:
id: 6b3ce5be1294
- last_write_checksum: sha1:8345bd6cbae4c66cbab2ba7c4407451bcdcb4be5
- pristine_git_object: 16ae81c7f0319cc2c6f7744faa4f582c6ba07faa
+ last_write_checksum: sha1:f11a5135ce79d1913db87f9db7d05b4266630b20
+ pristine_git_object: 30161b334339ffb310506b8ab87c40990478b483
src/mistralai/client/models/listbatchjobsresponse.py:
id: 99d94c86a871
last_write_checksum: sha1:01e5fd232c654b3c4a9d86c09f89cfef6f51805f
@@ -3721,12 +4935,16 @@ trackedFiles:
pristine_git_object: 0284cb99375d1205339a5de99a527c212ac2dfbd
src/mistralai/client/models/listlibrariesresponse.py:
id: df556a618365
- last_write_checksum: sha1:55afb46b1fa797bc46574e5256cd063574c6fcbf
- pristine_git_object: 337fe105731d8f3ced1f8f1299ff4081b9d5bfbe
+ last_write_checksum: sha1:f90b903fb4a1ca5504e63350c5353c6eab505761
+ pristine_git_object: be4125f2125eab43cf2682c852f48400a56051d5
src/mistralai/client/models/listsharingresponse.py:
id: 487c6addf089
last_write_checksum: sha1:cebb9e6ab7db8c067a3403211765ebfffec0190a
pristine_git_object: f3e6dc8714311989d1e6c7275c8944e228f3f0c5
+ src/mistralai/client/models/listworkfloweventresponse.py:
+ id: 20a423148117
+ last_write_checksum: sha1:bdf75230150022794f0268453c9f59952580c524
+ pristine_git_object: be99ccea41922c73c7209fbc78393c7ae47e1a3e
src/mistralai/client/models/mcpservericon.py:
id: a5b508a322d7
last_write_checksum: sha1:0dd9bac0684864bf6264ddded7f27c8c88b804f5
@@ -3835,6 +5053,10 @@ trackedFiles:
id: 06bab279cb31
last_write_checksum: sha1:b9158e575276c1e0a510c129347b9a98c5a70567
pristine_git_object: a8a8ec3d8d8a58deb3c1f8358c6dce5a9734f89c
+ src/mistralai/client/models/networkencodedinput.py:
+ id: 6dc5321dbe77
+ last_write_checksum: sha1:5a99ca47003b7bcfee48753dd4683a94eeee1fe1
+ pristine_git_object: 70d9241431ae75d58435965be03089ab8419d32f
src/mistralai/client/models/oauth2tokenauth.py:
id: 167c3b8a104e
last_write_checksum: sha1:146c4ce3a29c0bedcb575cf7fa32cb97aa375c5b
@@ -3951,6 +5173,22 @@ trackedFiles:
id: e6be33f2cd2d
last_write_checksum: sha1:aa71bf6de6316b8754fb18f897b30051a2d4c70e
pristine_git_object: 2c5186c02f6b463c12fd7819637b52c8c7ee4cd7
+ src/mistralai/client/models/query_workflow_execution_v1_workflows_executions_execution_id_queries_postop.py:
+ id: 73826dbd3f5b
+ last_write_checksum: sha1:5105618697e031bae45cf9f6d7c5227d9c08f155
+ pristine_git_object: 5598c4b6f24b0c1a7cc24c6a71b7732aa18e89b2
+ src/mistralai/client/models/querydefinition.py:
+ id: 9648273c8b7d
+ last_write_checksum: sha1:f76a8ac3af27ab1a8ceab6b6befb3af4b4ec919d
+ pristine_git_object: bc9b4f20c754f5e29b2193682376a044186a5a10
+ src/mistralai/client/models/queryinvocationbody.py:
+ id: d78662a1a9bf
+ last_write_checksum: sha1:002769d41d021a34f2adfe0bc7383f02142aaf24
+ pristine_git_object: 550e1d7b584213d84fa9fc367711f3af1f8f0d62
+ src/mistralai/client/models/queryworkflowresponse.py:
+ id: 30281fcedce6
+ last_write_checksum: sha1:fbcc1e870c0a81ff165311519981bd179b4bffec
+ pristine_git_object: aed982d5bb0939985e3966bf54a5a1a43e9b634d
src/mistralai/client/models/realtimetranscriptionerror.py:
id: 8c2267378f48
last_write_checksum: sha1:8bfdc2564fc416b0e1a4cc2e2a3a5437b26cea85
@@ -4003,6 +5241,14 @@ trackedFiles:
id: 3f2774d9e609
last_write_checksum: sha1:1ce68530a46793968f1122d29df722f0a5c9d267
pristine_git_object: fc4433cb4e657b06aa6a4c078094c2df342810e2
+ src/mistralai/client/models/reset_workflow_v1_workflows_executions_execution_id_reset_postop.py:
+ id: 3e9d229cd8ba
+ last_write_checksum: sha1:ef6d5e37d26d9488c657f0fc27e3e3fd4ec34e02
+ pristine_git_object: eda20e536e92f38bf5ffc115da6d7c748c70a4fd
+ src/mistralai/client/models/resetinvocationbody.py:
+ id: 3c0a985a5a77
+ last_write_checksum: sha1:bef8afbe543404fdd79d1b40ea7611f4ac7de3e0
+ pristine_git_object: 26c3389f988818090bc60e17ffb9a65ce992d33c
src/mistralai/client/models/resourcelink.py:
id: 4251cc3c7797
last_write_checksum: sha1:fad9dd6a100450397ee358842a31804c3552ca06
@@ -4043,6 +5289,38 @@ trackedFiles:
id: a9309422fed7
last_write_checksum: sha1:86a61340a647696f6c35a82d945509b1c85aa6f7
pristine_git_object: dfec7cce1e22ab607b6a9e947fa940284426086d
+ src/mistralai/client/models/scalarmetric.py:
+ id: ae8eb1017da6
+ last_write_checksum: sha1:c85c3825e8f60eac8776e912425ed4f43b387ad6
+ pristine_git_object: b9c70a7712022ef4f2f0b9d2e2cdeb3399f79621
+ src/mistralai/client/models/schedulecalendar.py:
+ id: 76d72d187023
+ last_write_checksum: sha1:8f732c72e3b29748d8d913990fe95dadf6a14cc6
+ pristine_git_object: 2cb179eb22e9ac10e820ed276187cd863fc7a440
+ src/mistralai/client/models/scheduledefinition.py:
+ id: 6ea58a356f77
+ last_write_checksum: sha1:d1d24eb6d844bae8c1c31962bf0d60f9d21601d5
+ pristine_git_object: dc622c3a26bd3341f17c95ed6cab5948be5c0fae
+ src/mistralai/client/models/scheduledefinitionoutput.py:
+ id: 69dc15b9a0d6
+ last_write_checksum: sha1:65b865908446dcf7f9a1d45f4b2491157dda644c
+ pristine_git_object: f40470fa3b05672e7817cb996d5ef6bbeb3b961d
+ src/mistralai/client/models/scheduleinterval.py:
+ id: 1d89c2043566
+ last_write_checksum: sha1:8119be5607d6ccad371b83f93b22da6e08c21504
+ pristine_git_object: c01cf8522f743b4b744f02603da7df15311de406
+ src/mistralai/client/models/scheduleoverlappolicy.py:
+ id: a729c26f9c43
+ last_write_checksum: sha1:f39e47b552d05c3bd9534d1c061f811d47d71191
+ pristine_git_object: 13db394715c184da98432af6a971393424bc5197
+ src/mistralai/client/models/schedulepolicy.py:
+ id: f326afe63958
+ last_write_checksum: sha1:47f6665bb77b023658794ef1eac2013d92f3d349
+ pristine_git_object: 9a507656a4da020ea5c7a2d22b81d912ead32022
+ src/mistralai/client/models/schedulerange.py:
+ id: f9d442a062b5
+ last_write_checksum: sha1:763d3229958d166028203998b964abaf73d42a2c
+ pristine_git_object: e30eed16bde2b71981b7b15e9d88a044910c6c3b
src/mistralai/client/models/searchchatcompletioneventidsrequest.py:
id: cabc8ef82d67
last_write_checksum: sha1:3290793dcf229bffc16b16d32d2c599c9a54bf4a
@@ -4079,6 +5357,22 @@ trackedFiles:
id: 2439b732dfae
last_write_checksum: sha1:05e36cba36203bcc579a3b02538aed570cf22352
pristine_git_object: 7c2d435c363bcd98aba97e120954ec1357524e34
+ src/mistralai/client/models/signal_workflow_execution_v1_workflows_executions_execution_id_signals_postop.py:
+ id: 16c54f54e60f
+ last_write_checksum: sha1:57437b8a9d284f7e77459c319908ad0b8db33720
+ pristine_git_object: c61ed019456f82a15c53944dcf0cf122edbdbc0e
+ src/mistralai/client/models/signaldefinition.py:
+ id: f1c1ac98a427
+ last_write_checksum: sha1:d60f213a8d1783df73214a3117761ffdab92d564
+ pristine_git_object: 434e12309e5e3232bc7ece9ec2351e75cf105d6e
+ src/mistralai/client/models/signalinvocationbody.py:
+ id: 0fd96a7c058b
+ last_write_checksum: sha1:13c77816c7846edfe1fbf013c2864c20e9775c1b
+ pristine_git_object: 3b7a1ff1d590cf2a6b4df68016c56a01677808ba
+ src/mistralai/client/models/signalworkflowresponse.py:
+ id: e1844a7da20b
+ last_write_checksum: sha1:c891e5a904c040bcf2a5d46fe9851575890ae8d3
+ pristine_git_object: 2d9ea356fb09d47e53274453476bd04eb6403e54
src/mistralai/client/models/source.py:
id: fcee60a4ea0d
last_write_checksum: sha1:4d4277d75f7ce001780a069898b38afa7c8addc0
@@ -4111,6 +5405,18 @@ trackedFiles:
id: 1733e4765106
last_write_checksum: sha1:3c79fc7c43cd018fba4950ba013ed15899b82ebf
pristine_git_object: 0add960bc93f53df5ddda94892543a0857f32dd6
+ src/mistralai/client/models/stream_v1_workflows_executions_execution_id_stream_getop.py:
+ id: 793a9301522f
+ last_write_checksum: sha1:c05e4bedbc131352b0b275e2bef7eb2f541433b0
+ pristine_git_object: 5282e52aeb9c3dc5729b5cd54502802bc9c514e1
+ src/mistralai/client/models/streameventssepayload.py:
+ id: c6becbbd80bc
+ last_write_checksum: sha1:c54a8dc9c54e62937b61b52e2b5ce3ff3c422ce7
+ pristine_git_object: 2c662a65bc6e419c3402e42755c0365cc30d9541
+ src/mistralai/client/models/streameventworkflowcontext.py:
+ id: 14c00c79de78
+ last_write_checksum: sha1:ea0a2d8e68a44783e47dc532307c7a2b1f717805
+ pristine_git_object: f24de860f5ad2ca115bac68b494caba4cd6ee800
src/mistralai/client/models/systemmessage.py:
id: 500ef6e85ba1
last_write_checksum: sha1:a88de3fc70adab47943f867336659b3a1a6cdae0
@@ -4119,6 +5425,58 @@ trackedFiles:
id: 297e8905d5af
last_write_checksum: sha1:e5695ca0ebdb0f02f3a0c527015df154a0c52b7f
pristine_git_object: d480a219e935aaea91adc320de0003b562c0bbb5
+ src/mistralai/client/models/tempogettraceresponse.py:
+ id: 8bb3c013aa76
+ last_write_checksum: sha1:ff35ba718f008554d79defc41dc02af5115c7ab0
+ pristine_git_object: 8575b95430e10a9b8d7e1df0f0e131cda68bb685
+ src/mistralai/client/models/tempotraceattribute.py:
+ id: 7d0ec3402dc0
+ last_write_checksum: sha1:6d75f1b8a97ca30fbd0d4c56a0e9a42a15c0df5a
+ pristine_git_object: 71c1b1f2ebb6a8211693d7b58d83a7fee9854cc4
+ src/mistralai/client/models/tempotraceattributeboolvalue.py:
+ id: c7c383a6c05f
+ last_write_checksum: sha1:a40945504a6845a3e43a80d5f901af9f5c5ee96b
+ pristine_git_object: 72fb79d022533d8a7af27c5d7e91258e7cf3a3b5
+ src/mistralai/client/models/tempotraceattributeintvalue.py:
+ id: c4659ad7a2a5
+ last_write_checksum: sha1:471273678346f841dd718fce1899a6ae8ad7cd1d
+ pristine_git_object: 6ffe7efe56d3b050b0ecd3db8f6d24d2d37b4854
+ src/mistralai/client/models/tempotraceattributestringvalue.py:
+ id: 2dcdd05a5115
+ last_write_checksum: sha1:67aea0de94db0eb1127f22f0eeafe28bb8a125be
+ pristine_git_object: f4dea6395300960e835d56aa47d4ead96536c9af
+ src/mistralai/client/models/tempotracebatch.py:
+ id: 969acd9d6220
+ last_write_checksum: sha1:8e865d710af9d3a7627423163857a7bf19e7f60e
+ pristine_git_object: 7f50804747fd972ba45bf70bf621d92031e2ef0b
+ src/mistralai/client/models/tempotraceevent.py:
+ id: 98dd1b838524
+ last_write_checksum: sha1:c0153772618cf57c4c3f7830519ca7e7310fc5af
+ pristine_git_object: bda2115aac42c4b0b195143abe2ed9d610053db4
+ src/mistralai/client/models/tempotraceresource.py:
+ id: cc8a7ff3feea
+ last_write_checksum: sha1:3fb070d82116606740a3b6433b97b65ac80ffd79
+ pristine_git_object: 42c4d56ade5183234c30a88ba3cac010bdd6ee50
+ src/mistralai/client/models/tempotracescope.py:
+ id: a2da1a3b8198
+ last_write_checksum: sha1:ea6ee30b2e080440e72fa9a4eb3c592d2bf52fe9
+ pristine_git_object: 3e302f81252b0628eb0046777825e8bde71ea98d
+ src/mistralai/client/models/tempotracescopekind.py:
+ id: 40c697c1e617
+ last_write_checksum: sha1:b0298aaf269df98bba2bae72e8d47ada6aaa2433
+ pristine_git_object: a26d5c38c5dbf1605baaa96bb29ec3d1c3ca6c29
+ src/mistralai/client/models/tempotracescopespan.py:
+ id: cb248e9c0a00
+ last_write_checksum: sha1:7f3742ddf7d4baf8e91182d920206ae3e505c4ee
+ pristine_git_object: 362f3c9974a8183cbe5f59a42f164f6f2d1d5542
+ src/mistralai/client/models/tempotracespan.py:
+ id: f36568c83a96
+ last_write_checksum: sha1:a533d06b305ca4076ff40faaa9ce5546cec50537
+ pristine_git_object: 51b181ba48d4040e0c48c41960b9076b57f95e4b
+ src/mistralai/client/models/terminate_workflow_execution_v1_workflows_executions_execution_id_terminate_postop.py:
+ id: 458eee7d2603
+ last_write_checksum: sha1:51c88c99243e964d8b044a1e25a78dff85145e7c
+ pristine_git_object: 771c165035c62d9963b6b9c71a00e8e948ac6661
src/mistralai/client/models/textchunk.py:
id: 9c96fb86a9ab
last_write_checksum: sha1:40fa1ea5dcbca9f4c534b58f7cb65bbaa5084521
@@ -4135,6 +5493,10 @@ trackedFiles:
id: 294bfce193a4
last_write_checksum: sha1:4563b1759e8b64a9ed1de3843ea73ffb8f2e3285
pristine_git_object: ae084a179c3227351d0ca8b10c926c6a61d21221
+ src/mistralai/client/models/timeseriesmetric.py:
+ id: 7f91751795ac
+ last_write_checksum: sha1:e7871d8fa64b59e8448f0ae3be46f02c4d02eac8
+ pristine_git_object: a0eb0c9ca86791401aeb1df0d851b3a42be1bef7
src/mistralai/client/models/timestampgranularity.py:
id: 68ddf8d702ea
last_write_checksum: sha1:64e7b198a75f026590e26758112651d31984076f
@@ -4251,10 +5613,18 @@ trackedFiles:
id: cdb07c3837f7
last_write_checksum: sha1:c2339efbf6dac10bf5e3f42491fa971982d4a998
pristine_git_object: bcd4d5f2da000bcc4af9c41bf7e92efdaf99b039
+ src/mistralai/client/models/unarchive_workflow_v1_workflows_workflow_identifier_unarchive_putop.py:
+ id: 9df426343c2c
+ last_write_checksum: sha1:12d529f6703695653004ac5692f13daab13bf060
+ pristine_git_object: 3dc55512ab45c4b7617dd0cf2e464a3f53921c8b
src/mistralai/client/models/unarchivemodelresponse.py:
id: 22e2ccbb0c80
last_write_checksum: sha1:8ebdd49f2bef3c5934395808bf71701b46e512be
pristine_git_object: 5cab4c2ef093236cacdc2a9e24b27c1a6c643ee9
+ src/mistralai/client/models/unschedule_workflow_v1_workflows_schedules_schedule_id_deleteop.py:
+ id: 114f67717003
+ last_write_checksum: sha1:f80651b63d89ef19a070c1ae7901b021914a0ddd
+ pristine_git_object: eac992d5936dd5dc935623babfa22177f0e0adb7
src/mistralai/client/models/update_dataset_record_payload_v1_observability_dataset_records_dataset_record_id_payload_putop.py:
id: fa5d55a9d6cf
last_write_checksum: sha1:5cf094a6617b1900745deece2e34edfb1b040905
@@ -4275,6 +5645,14 @@ trackedFiles:
id: 399da05bd75e
last_write_checksum: sha1:aca7b6539571a8111e8f1abb7f90dde1e461d4bc
pristine_git_object: a11b626a9b4ec3b1761bb383dda2cead843ee634
+ src/mistralai/client/models/update_workflow_execution_v1_workflows_executions_execution_id_updates_postop.py:
+ id: 5799cc4ab66e
+ last_write_checksum: sha1:ce9516f04804a7aa692ce5525e0c094bfc8167c9
+ pristine_git_object: 30ff2baef59c3acdcd3b9e3dda3fd15d01490338
+ src/mistralai/client/models/update_workflow_v1_workflows_workflow_identifier_putop.py:
+ id: c537bd5a9dd1
+ last_write_checksum: sha1:2475d631b74ec49cf8420227151e13083a6ffdb0
+ pristine_git_object: 9ee9b6d2db84a56c401c91e9484cea378ecc38dd
src/mistralai/client/models/updateagentrequest.py:
id: 914b4b2be67a
last_write_checksum: sha1:0917571c77c739ae6c158b2adcd5df7ebc332f4f
@@ -4295,10 +5673,18 @@ trackedFiles:
id: bbb067caa23f
last_write_checksum: sha1:4ae8e1ea4a6a9ec5ed559bef8fe21d11af2b0ed3
pristine_git_object: ddbaad1237d2056476c4fe7f8a6caba06a65f744
+ src/mistralai/client/models/updatedefinition.py:
+ id: 143f97683a02
+ last_write_checksum: sha1:932dbdf03be9a321795c14142ddf3729c1e0f111
+ pristine_git_object: e878c8764774e4420a85145196cfb93c32d108fa
src/mistralai/client/models/updatedocumentrequest.py:
id: a8cfda07d337
last_write_checksum: sha1:57b5a2c77ddcf2e09a8eead6b4f5e5c694e21844
pristine_git_object: 12d9c89b9ab67f6d7e69c26c31da76e7aaf1e817
+ src/mistralai/client/models/updateinvocationbody.py:
+ id: b8558eff0be0
+ last_write_checksum: sha1:ea486ca10c1165b9f3dd40c005cc98bc6b0e937f
+ pristine_git_object: a300bd20073a2ddfc9b3bd4844aae9f3915e2f9c
src/mistralai/client/models/updatejudgerequest.py:
id: f6ad6fb901a0
last_write_checksum: sha1:0cc5d951aa36d1ba6cf82020d9ade4ac85bc3a94
@@ -4311,6 +5697,10 @@ trackedFiles:
id: fe649967751e
last_write_checksum: sha1:b5ce56ef430768ee806b45d9c2e4c405764a1ef5
pristine_git_object: 0179ba140d7d598a4a363ddfab67197e56857964
+ src/mistralai/client/models/updateworkflowresponse.py:
+ id: f5dcf717a0a1
+ last_write_checksum: sha1:f0da842336a6aef26bab3fab2f0c742deda2653e
+ pristine_git_object: 699bef98d0eddd6d7b4e776c3aaa2896174bc223
src/mistralai/client/models/usageinfo.py:
id: 54adb9a3af16
last_write_checksum: sha1:108b7cd220312ff96d298981e7ada5bc05921cc9
@@ -4359,6 +5749,254 @@ trackedFiles:
id: 26b0903423e5
last_write_checksum: sha1:e486f50095cc7540ce828fff7571c6aa6748bf2f
pristine_git_object: f82b6ec1c89cae83ea21d9bae12a1984679262ae
+ src/mistralai/client/models/workflow.py:
+ id: 1548cd73984e
+ last_write_checksum: sha1:14e1db57af8784d77633d7dc1d22198f2377909b
+ pristine_git_object: 5edf326ce15c6496607fd15f6362ca7aa1936de7
+ src/mistralai/client/models/workflowarchiveresponse.py:
+ id: 64c479b7f9da
+ last_write_checksum: sha1:7e14d02314fcc8dab22a6a908f89122491449151
+ pristine_git_object: 18eeccf267343acdff7cb10db729d38e167ab5a5
+ src/mistralai/client/models/workflowbasicdefinition.py:
+ id: 34623036478d
+ last_write_checksum: sha1:0516152c52c443b8b94c830f81f591798f08ac92
+ pristine_git_object: d2f3db1fb851353d21ce5bbec5e8d2d4b9afabfa
+ src/mistralai/client/models/workflowcodedefinition.py:
+ id: 36fd5b898ddd
+ last_write_checksum: sha1:70e771dc21affc971eb01de18d3c439c1f3cc8c5
+ pristine_git_object: f71b9ff1885114e764665d2620e5c3fc01862ff8
+ src/mistralai/client/models/workfloweventbatchrequest.py:
+ id: c0c0986a6b07
+ last_write_checksum: sha1:b45258b26ecf5b5f6578eeaf3e6fddd9264c753b
+ pristine_git_object: fba4a160d5490e4ae70eda660557ef7a36fda417
+ src/mistralai/client/models/workfloweventbatchresponse.py:
+ id: 4cafe49944be
+ last_write_checksum: sha1:7d4b3a64acf8cc98b32b418adcb7a5141f731d9f
+ pristine_git_object: 0aa842a34158f4413232c76edc78a00b3e6fbbc9
+ src/mistralai/client/models/workfloweventrequest.py:
+ id: f41edbb269a4
+ last_write_checksum: sha1:bb6092cf910c2b77bb186dd62040d055bce5ac6b
+ pristine_git_object: 0a2c757947e654d257c5733f7f9e4a22e77bfe0a
+ src/mistralai/client/models/workfloweventresponse.py:
+ id: e1a984989f65
+ last_write_checksum: sha1:0724ac05bed376ec722f376bc57ec8091ae70c6e
+ pristine_git_object: 4649da8096cc06140c7a910bce2cc16ad7d77101
+ src/mistralai/client/models/workfloweventtype.py:
+ id: b4aeeb03b57a
+ last_write_checksum: sha1:451a78dadc17941f397d73ff83c14d552f7150d7
+ pristine_git_object: 8c386b01b02e61266dd43f53c003571d5f3adcc8
+ src/mistralai/client/models/workflowexecutioncanceledattributes.py:
+ id: c0802a5de5e5
+ last_write_checksum: sha1:954a39fa24c0f24e603d66532fb18f31f9b06eec
+ pristine_git_object: 6c06bfa93bd601b4c2b6f5358d24045dc6fc3d5b
+ src/mistralai/client/models/workflowexecutioncanceledrequest.py:
+ id: 7bdfdeddd5c4
+ last_write_checksum: sha1:08dd298674384fb349c1747dca0f386cba3bad06
+ pristine_git_object: 09873446f6c24e77a6022a3120c9e3ea955338b5
+ src/mistralai/client/models/workflowexecutioncanceledresponse.py:
+ id: bcc392d67222
+ last_write_checksum: sha1:6fcd8350bcc05ff4eb87a3587bb8d2eefa11d227
+ pristine_git_object: ee1c4e71acecff7f4a94078aae1560ca43c36e31
+ src/mistralai/client/models/workflowexecutioncompletedattributesrequest.py:
+ id: 9d69c2f471c1
+ last_write_checksum: sha1:34cda256b1ebf0c40adbe926e1c45ec7882e145d
+ pristine_git_object: fb48a0fccf1ab5aabbb501e696d92aba0f5128a5
+ src/mistralai/client/models/workflowexecutioncompletedattributesresponse.py:
+ id: f831331b0eb1
+ last_write_checksum: sha1:113cbedfc242da04a708e29b23a55742bc0de084
+ pristine_git_object: 3afc8dcf36f4f5fea19263095c14b245b32b76f9
+ src/mistralai/client/models/workflowexecutioncompletedrequest.py:
+ id: 54326e5805ed
+ last_write_checksum: sha1:bf8e762ca99b13534e3f5f1202b4a99b183f5916
+ pristine_git_object: 81bde0f107733777aaa8330320aaa1c02ee57945
+ src/mistralai/client/models/workflowexecutioncompletedresponse.py:
+ id: a1bec20dfb0e
+ last_write_checksum: sha1:c6c891ed6fdae6204c47d425e0391e2163f20a2e
+ pristine_git_object: 17716668a30d68aad23a98d312e965171ca71306
+ src/mistralai/client/models/workflowexecutioncontinuedasnewattributesrequest.py:
+ id: 8517ed95b5c7
+ last_write_checksum: sha1:cdd83d80b7ce55f4b1258da0a47ef7b86ecfcc16
+ pristine_git_object: 1aba37ae6d2b7b64e2b223143f7b18ba55ab3572
+ src/mistralai/client/models/workflowexecutioncontinuedasnewattributesresponse.py:
+ id: 2dd61dcd7b48
+ last_write_checksum: sha1:e7737a3bb65c1ca887c0f28a1e18102d80755e04
+ pristine_git_object: 943e5ebc979c9381d36e92e1e94d818bb17a83bf
+ src/mistralai/client/models/workflowexecutioncontinuedasnewrequest.py:
+ id: d1057d583b8c
+ last_write_checksum: sha1:6a114eb3f253654d038fe1d15712560f05527f91
+ pristine_git_object: f8c9460423dcc8ca16e91542fe853f20930a7ae9
+ src/mistralai/client/models/workflowexecutioncontinuedasnewresponse.py:
+ id: be26cd87dcb3
+ last_write_checksum: sha1:4947f8cbfa73eae9607e252e85236894a34123d5
+ pristine_git_object: 0f60a5be6324f370319e05b4a573e5cd157c0450
+ src/mistralai/client/models/workflowexecutionfailedattributes.py:
+ id: dbb7fb36a4fd
+ last_write_checksum: sha1:d2922a566ccc93b7979cb9411ef46882606b76c7
+ pristine_git_object: 1e61f3444594307656141cc999cf19f4d66763fa
+ src/mistralai/client/models/workflowexecutionfailedrequest.py:
+ id: 873155c8e314
+ last_write_checksum: sha1:d050e259874c24a6812a0ee45f869b19b3d588ff
+ pristine_git_object: 5c4e445a6cc05804b7ce0c905c088922a1121d12
+ src/mistralai/client/models/workflowexecutionfailedresponse.py:
+ id: a0836009f9de
+ last_write_checksum: sha1:07ea35829fb0411f39c019b42d17807d0aec02c2
+ pristine_git_object: 687d33a9199612bbb8b5db5f6d2be2421469aeb5
+ src/mistralai/client/models/workflowexecutionlistresponse.py:
+ id: c2b2e1ab4821
+ last_write_checksum: sha1:e264605252d0dc594cafea0e4713015cfa7ca925
+ pristine_git_object: 442ed9728bcf2c09fb051b892a7a226b7921e993
+ src/mistralai/client/models/workflowexecutionprogresstraceevent.py:
+ id: 6ea7078aaca3
+ last_write_checksum: sha1:5af64f5a2d76bde2a1a51e3b543db894c2045a44
+ pristine_git_object: ffebdb82a1a80d1d086b919da9b33e2f090388c4
+ src/mistralai/client/models/workflowexecutionrequest.py:
+ id: 806340497ed4
+ last_write_checksum: sha1:43851e98d4c14ebdda88c5ab4412ae3a7a1d0002
+ pristine_git_object: bf6a5fa0cddf979efb8ad32df77affac7f7156e8
+ src/mistralai/client/models/workflowexecutionresponse.py:
+ id: 758786637be5
+ last_write_checksum: sha1:2544bf94e8d64a9bd33f37ff4737f320fcd84733
+ pristine_git_object: 84398375dde630f5aa2dd6363bb3fff43c1766f3
+ src/mistralai/client/models/workflowexecutionstartedattributesrequest.py:
+ id: ee480cd77d79
+ last_write_checksum: sha1:14e5ef6b80a358bf042e8ec64d2be5e78e214a90
+ pristine_git_object: ff74345f2b305e6db8737bfa3991f00769462b90
+ src/mistralai/client/models/workflowexecutionstartedattributesresponse.py:
+ id: d2276919a895
+ last_write_checksum: sha1:5861816b3b992c9a7e2dd044b56307d677f2fc10
+ pristine_git_object: 74e55c2977c6357077d414f799c59c7854fa2d08
+ src/mistralai/client/models/workflowexecutionstartedrequest.py:
+ id: 15e73dddf8c4
+ last_write_checksum: sha1:294047bce91d82283e0d5c929b169f280e3eb55d
+ pristine_git_object: 0ce8bbce27d48a88bfec92181eb190e5d1f29def
+ src/mistralai/client/models/workflowexecutionstartedresponse.py:
+ id: e1a597c911ea
+ last_write_checksum: sha1:9c761698883844061bf5cae76e3bc6444941124e
+ pristine_git_object: fa1e46a07b931f35b46324d9b2e5c111b359889d
+ src/mistralai/client/models/workflowexecutionstatus.py:
+ id: 56a16810d5de
+ last_write_checksum: sha1:71560f9e7a0222734e176a362d3188e703dfbbe2
+ pristine_git_object: 611f3aa1ad149e30b577e5949be1e948c979d833
+ src/mistralai/client/models/workflowexecutionsyncresponse.py:
+ id: ef77c54a11b5
+ last_write_checksum: sha1:e89046ec323523aae1ef4fb9be7e19328ff84f01
+ pristine_git_object: 6ed0f038c27085bbb8896b570dcc3302db137b90
+ src/mistralai/client/models/workflowexecutiontraceevent.py:
+ id: 584e0a98082a
+ last_write_checksum: sha1:5de35701fd0bd92ec997603d1a6cb30c7597dd17
+ pristine_git_object: f74cd5ab6aeadf662e90a46b394621a631602372
+ src/mistralai/client/models/workflowexecutiontraceeventsresponse.py:
+ id: 94d92762ccb7
+ last_write_checksum: sha1:2cf2e1cf393c7295af7b32194211b5ff41c6946e
+ pristine_git_object: 0c24a1eaeb338b14da62ed40a4a7e67e631e1589
+ src/mistralai/client/models/workflowexecutiontraceotelresponse.py:
+ id: 7531bf461dc2
+ last_write_checksum: sha1:eff6ffecd0fb5643747ef9f87c4c4a6a499c0035
+ pristine_git_object: b4320b8324c712380acda8edbc1fedbe49954c73
+ src/mistralai/client/models/workflowexecutiontracesummaryattributesvalues.py:
+ id: 5e3448a39a40
+ last_write_checksum: sha1:b9c8999af86024aaa6533aed348315326658354e
+ pristine_git_object: e288146db9dcaf3a7216c7e780f9c98ce75e85a3
+ src/mistralai/client/models/workflowexecutiontracesummaryresponse.py:
+ id: 2ea199810f5f
+ last_write_checksum: sha1:9746a523ac969574417442960b8d814e04403e25
+ pristine_git_object: d2cc7c6bdc90ed15e9091cf038e98762430d33af
+ src/mistralai/client/models/workflowexecutiontracesummaryspan.py:
+ id: 750353cbd052
+ last_write_checksum: sha1:ba4f7ecc22fdd36aa3c11af5c1e434bb974d5786
+ pristine_git_object: e80db90fe890726c75f711ea2b52b3008ef949ca
+ src/mistralai/client/models/workflowexecutionwithoutresultresponse.py:
+ id: dd70ba8def79
+ last_write_checksum: sha1:38435a2f9f21037122a13e01d7f036fcfe266e5b
+ pristine_git_object: 082653c8e7fb5a6830e1457d629991cb9a9d4d56
+ src/mistralai/client/models/workflowgetresponse.py:
+ id: 230f55a36ebf
+ last_write_checksum: sha1:935cfce50db82d85ac0e246736c8939bfb015da0
+ pristine_git_object: ef1391c9367ed9adf371131c86045246fcee676d
+ src/mistralai/client/models/workflowlistresponse.py:
+ id: b255b05e7395
+ last_write_checksum: sha1:194172c7c5140f090aa259608cd2d9ab8fd9cdf9
+ pristine_git_object: a6497e825ed2dbdffde6072dc8e708e327c77129
+ src/mistralai/client/models/workflowmetadata.py:
+ id: 0435707d6944
+ last_write_checksum: sha1:141a99ae47e987fdfd9bc01c72170c9ad22fb412
+ pristine_git_object: 597525395aac5d0704b45e09d8552366773a0b92
+ src/mistralai/client/models/workflowmetrics.py:
+ id: 471fb1e10716
+ last_write_checksum: sha1:0835e03836dc9f9bc0eeb953eb0fc42e25445183
+ pristine_git_object: d80bb3db6cbc69404e22007a78bbef209c03c658
+ src/mistralai/client/models/workflowregistration.py:
+ id: 2b937728c88b
+ last_write_checksum: sha1:1695e971dc6aeb546f3a35a8e47252aaa492fb1b
+ pristine_git_object: e23f9fb2987490d8ec3df97ec94f1190141c90c3
+ src/mistralai/client/models/workflowregistrationgetresponse.py:
+ id: f063656f22ae
+ last_write_checksum: sha1:cc7b4d3b71ea8bb8ae452c6fd3e4acc710a18321
+ pristine_git_object: 3105856d933d7fad615fa786f2c29a0d5705ac1a
+ src/mistralai/client/models/workflowregistrationlistresponse.py:
+ id: 3752e5b776db
+ last_write_checksum: sha1:ae185c4823954b7a7c8a43fbf4fd441ed02b3bd9
+ pristine_git_object: e66f219b28849d7a49af7c764389e0d1d1ee7f37
+ src/mistralai/client/models/workflowregistrationwithworkerstatus.py:
+ id: 23b661b9496f
+ last_write_checksum: sha1:a6b74946adb241437f4c2f40056a4b030ded1fc8
+ pristine_git_object: c0d9a69e33992cd94755efa06df8a74406d9076f
+ src/mistralai/client/models/workflowschedulelistresponse.py:
+ id: e5247c5183bb
+ last_write_checksum: sha1:37cea2f0f3220331105f9454169f1ea49ed1b51a
+ pristine_git_object: 3c9eb3d7f445a0ec010340aeae788cf47e034628
+ src/mistralai/client/models/workflowschedulerequest.py:
+ id: 2d8256704c6b
+ last_write_checksum: sha1:bac5dd140544747ebcd15a21da1df52303adaa17
+ pristine_git_object: 2ded66b981775a0d60d04ca568bdc497373d2c04
+ src/mistralai/client/models/workflowscheduleresponse.py:
+ id: c77172c4a9f6
+ last_write_checksum: sha1:dd347319557279367f8ccde9158dec6c4586f528
+ pristine_git_object: d74ba1dae81bccffa812df2a64babad0a61af787
+ src/mistralai/client/models/workflowtaskfailedattributes.py:
+ id: c4c09c4d5ea7
+ last_write_checksum: sha1:b1faee3ed43b88829ca68f5cd642f24d8f1fe9f5
+ pristine_git_object: 49444347a46acf8f24c17a0f2b87c961c79efaca
+ src/mistralai/client/models/workflowtaskfailedrequest.py:
+ id: d26184215fe3
+ last_write_checksum: sha1:bb5f93ed8c401aa0b285998d7c3791b0faa2a1c1
+ pristine_git_object: 9075b0bcb3d3d34c044702a530834049fabc3654
+ src/mistralai/client/models/workflowtaskfailedresponse.py:
+ id: a02b01867b7f
+ last_write_checksum: sha1:47813f0c9b611079c346d5d11a80fbf4a51a3164
+ pristine_git_object: 154145e3604b09a202f51b60101ef166ed69a3ce
+ src/mistralai/client/models/workflowtasktimedoutattributes.py:
+ id: 43369570cb96
+ last_write_checksum: sha1:dd30ece2ca4bd84318494f4f49ca63446677f6d4
+ pristine_git_object: 1824990e1e9bf911e0dc422743268a97091576a7
+ src/mistralai/client/models/workflowtasktimedoutrequest.py:
+ id: 70f37007e50f
+ last_write_checksum: sha1:f47efad477bcea8f80e03f26707c0c193d1b9b54
+ pristine_git_object: 86a6bcdb3f18d2ee420783ac26c9eeb0d97fdb59
+ src/mistralai/client/models/workflowtasktimedoutresponse.py:
+ id: 8b7540c47083
+ last_write_checksum: sha1:6ecfe5907b5cdb05054b8e107977527a584e1b08
+ pristine_git_object: 2c3350e09b75f1ebb212909f252cc48124a9997f
+ src/mistralai/client/models/workflowtype.py:
+ id: 01f37d193b17
+ last_write_checksum: sha1:ec2cf5f7adb51eca3b7bbfc7cef89d4da473b11f
+ pristine_git_object: 67858c88fc20add975c7b7d6ae46a5f19a220ad8
+ src/mistralai/client/models/workflowunarchiveresponse.py:
+ id: 30bdd050feac
+ last_write_checksum: sha1:b2cdd8c427160afb82a0137b3da1e8d6da0b6d0e
+ pristine_git_object: 16717856cc8c70d912af14920b4264b89f0ffc5c
+ src/mistralai/client/models/workflowupdaterequest.py:
+ id: 8f3878d3c7c7
+ last_write_checksum: sha1:d2aaea5e39fbdb61036b65f2c8a303786f62cf83
+ pristine_git_object: 480fa47dee918e3fc7b49246322aa258b791ca05
+ src/mistralai/client/models/workflowupdateresponse.py:
+ id: 0ae165cc7a82
+ last_write_checksum: sha1:21b4509ca190bc1abf18b318ae502faf24408184
+ pristine_git_object: 3336e448baf9f21cf5bfd1369e6268ea3a7d5e24
+ src/mistralai/client/models/workflowwithworkerstatus.py:
+ id: e1055203af7d
+ last_write_checksum: sha1:57d7ac9a4e19ddb50d8dad4c2d68695c1c2acbb7
+ pristine_git_object: 7f469007b8ceb86f2d4faa09d3335ac874a6fe24
src/mistralai/client/models_.py:
id: 1d277958a843
last_write_checksum: sha1:f68fc105aca375b135a00026dbbec818cd55cd73
@@ -4379,10 +6017,18 @@ trackedFiles:
id: 10f90c990bd8
last_write_checksum: sha1:47018c027eb2c9d9235f399b939073e396bd52ad
pristine_git_object: ceb8de4fe3edfbd818f6002381c365ea8437ac2c
+ src/mistralai/client/runs.py:
+ id: 4297d58aeb21
+ last_write_checksum: sha1:779390755335dda530d2e563bbcf38e5ea161009
+ pristine_git_object: 01b667cf9daf9ea37064f8e489fb45f84c87f203
+ src/mistralai/client/schedules.py:
+ id: d3b4fe452390
+ last_write_checksum: sha1:74a1455c5ba17253d2e878f638b7314edca66e21
+ pristine_git_object: d6f2e5ff1a4c985acc48e1ea540537fbcf4bf92d
src/mistralai/client/sdk.py:
id: 48edbcb38d7e
- last_write_checksum: sha1:3520e3487aeacdaf5abd24839e76bb98f1e20b93
- pristine_git_object: 52fc5d9ae1a15a241bd8cf76c29a616bffcb36cd
+ last_write_checksum: sha1:a606046063cd946633918efa1662846c987c9b7e
+ pristine_git_object: 03338f46bd3d32f1770ed9b6e4d30f9d707867df
src/mistralai/client/sdkconfiguration.py:
id: b7dd68a0235e
last_write_checksum: sha1:c6944f12c6fdc992d43db943b24c8c90854cde5e
@@ -4481,8 +6127,16 @@ trackedFiles:
pristine_git_object: 2469a9f310a37a7170b54853715274f13d38901c
src/mistralai/client/voices.py:
id: ab76b1377d79
- last_write_checksum: sha1:3acbc4b91444895e83e96fea25cac00b3d327a5b
- pristine_git_object: 68d4bb5bde46804664ecdc8866621236af089002
+ last_write_checksum: sha1:a71af619fe7d44cb4e537b739cf3ae71d1f8da68
+ pristine_git_object: 2d57185746c0b2deee7c614c69a528c4a6565ee7
+ src/mistralai/client/workflows.py:
+ id: e2a0381191f6
+ last_write_checksum: sha1:95e516c32702cfcf121f8278fcaf1bf6f5847c76
+ pristine_git_object: e2de6524a730c7c0d1877e51dac901e354c4c201
+ src/mistralai/client/workflows_events.py:
+ id: 6d4f674ce8ef
+ last_write_checksum: sha1:3c9d60fa7c49e547a25ae62b941cae7a5437c21c
+ pristine_git_object: 03df3f78288f2b3263bf1ed5ce7aa3a8d138d0f0
examples:
list_models_v1_models_get:
speakeasy-default-list-models-v1-models-get:
@@ -5013,9 +6667,15 @@ examples:
application/json: {"pages": [{"index": 1, "markdown": "# LEVERAGING UNLABELED DATA TO PREDICT OUT-OF-DISTRIBUTION PERFORMANCE\nSaurabh Garg*
Carnegie Mellon University
sgarg2@andrew.cmu.edu
Sivaraman Balakrishnan
Carnegie Mellon University
sbalakri@andrew.cmu.edu
Zachary C. Lipton
Carnegie Mellon University
zlipton@andrew.cmu.edu\n## Behnam Neyshabur\nGoogle Research, Blueshift team
neyshabur@google.com\nHanie Sedghi
Google Research, Brain team
hsedghi@google.com\n#### Abstract\nReal-world machine learning deployments are characterized by mismatches between the source (training) and target (test) distributions that may cause performance drops. In this work, we investigate methods for predicting the target domain accuracy using only labeled source data and unlabeled target data. We propose Average Thresholded Confidence (ATC), a practical method that learns a threshold on the model's confidence, predicting accuracy as the fraction of unlabeled examples for which model confidence exceeds that threshold. ATC outperforms previous methods across several model architectures, types of distribution shifts (e.g., due to synthetic corruptions, dataset reproduction, or novel subpopulations), and datasets (WILDS, ImageNet, BREEDS, CIFAR, and MNIST). In our experiments, ATC estimates target performance $2-4 \\times$ more accurately than prior methods. We also explore the theoretical foundations of the problem, proving that, in general, identifying the accuracy is just as hard as identifying the optimal predictor and thus, the efficacy of any method rests upon (perhaps unstated) assumptions on the nature of the shift. Finally, analyzing our method on some toy distributions, we provide insights concerning when it works ${ }^{1}$.\n## 1 INTRODUCTION\nMachine learning models deployed in the real world typically encounter examples from previously unseen distributions. While the IID assumption enables us to evaluate models using held-out data from the source distribution (from which training data is sampled), this estimate is no longer valid in presence of a distribution shift. Moreover, under such shifts, model accuracy tends to degrade (Szegedy et al., 2014; Recht et al., 2019; Koh et al., 2021). Commonly, the only data available to the practitioner are a labeled training set (source) and unlabeled deployment-time data which makes the problem more difficult. In this setting, detecting shifts in the distribution of covariates is known to be possible (but difficult) in theory (Ramdas et al., 2015), and in practice (Rabanser et al., 2018). However, producing an optimal predictor using only labeled source and unlabeled target data is well-known to be impossible absent further assumptions (Ben-David et al., 2010; Lipton et al., 2018).\nTwo vital questions that remain are: (i) the precise conditions under which we can estimate a classifier's target-domain accuracy; and (ii) which methods are most practically useful. To begin, the straightforward way to assess the performance of a model under distribution shift would be to collect labeled (target domain) examples and then to evaluate the model on that data. However, collecting fresh labeled data from the target distribution is prohibitively expensive and time-consuming, especially if the target distribution is non-stationary. Hence, instead of using labeled data, we aim to use unlabeled data from the target distribution, that is comparatively abundant, to predict model performance. Note that in this work, our focus is not to improve performance on the target but, rather, to estimate the accuracy on the target for a given classifier.\n[^0]: Work done in part while Saurabh Garg was interning at Google ${ }^{1}$ Code is available at [https://github.com/saurabhgarg1996/ATC_code](https://github.com/saurabhgarg1996/ATC_code).\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 2, "markdown": "\nFigure 1: Illustration of our proposed method ATC. Left: using source domain validation data, we identify a threshold on a score (e.g. negative entropy) computed on model confidence such that fraction of examples above the threshold matches the validation set accuracy. ATC estimates accuracy on unlabeled target data as the fraction of examples with the score above the threshold. Interestingly, this threshold yields accurate estimates on a wide set of target distributions resulting from natural and synthetic shifts. Right: Efficacy of ATC over previously proposed approaches on our testbed with a post-hoc calibrated model. To obtain errors on the same scale, we rescale all errors with Average Confidence (AC) error. Lower estimation error is better. See Table 1 for exact numbers and comparison on various types of distribution shift. See Sec. 5 for details on our testbed.\nRecently, numerous methods have been proposed for this purpose (Deng & Zheng, 2021; Chen et al., 2021b; Jiang et al., 2021; Deng et al., 2021; Guillory et al., 2021). These methods either require calibration on the target domain to yield consistent estimates (Jiang et al., 2021; Guillory et al., 2021) or additional labeled data from several target domains to learn a linear regression function on a distributional distance that then predicts model performance (Deng et al., 2021; Deng & Zheng, 2021; Guillory et al., 2021). However, methods that require calibration on the target domain typically yield poor estimates since deep models trained and calibrated on source data are not, in general, calibrated on a (previously unseen) target domain (Ovadia et al., 2019). Besides, methods that leverage labeled data from target domains rely on the fact that unseen target domains exhibit strong linear correlation with seen target domains on the underlying distance measure and, hence, can be rendered ineffective when such target domains with labeled data are unavailable (in Sec. 5.1 we demonstrate such a failure on a real-world distribution shift problem). Therefore, throughout the paper, we assume access to labeled source data and only unlabeled data from target domain(s).\nIn this work, we first show that absent assumptions on the source classifier or the nature of the shift, no method of estimating accuracy will work generally (even in non-contrived settings). To estimate accuracy on target domain perfectly, we highlight that even given perfect knowledge of the labeled source distribution (i.e., $p_{s}(x, y)$ ) and unlabeled target distribution (i.e., $p_{t}(x)$ ), we need restrictions on the nature of the shift such that we can uniquely identify the target conditional $p_{t}(y \\mid x)$. Thus, in general, identifying the accuracy of the classifier is as hard as identifying the optimal predictor.\nSecond, motivated by the superiority of methods that use maximum softmax probability (or logit) of a model for Out-Of-Distribution (OOD) detection (Hendrycks & Gimpel, 2016; Hendrycks et al., 2019), we propose a simple method that leverages softmax probability to predict model performance. Our method, Average Thresholded Confidence (ATC), learns a threshold on a score (e.g., maximum confidence or negative entropy) of model confidence on validation source data and predicts target domain accuracy as the fraction of unlabeled target points that receive a score above that threshold. ATC selects a threshold on validation source data such that the fraction of source examples that receive the score above the threshold match the accuracy of those examples. Our primary contribution in ATC is the proposal of obtaining the threshold and observing its efficacy on (practical) accuracy estimation. Importantly, our work takes a step forward in positively answering the question raised in Deng & Zheng (2021); Deng et al. (2021) about a practical strategy to select a threshold that enables accuracy prediction with thresholded model confidence.\n", "images": [{"id": "img-0.jpeg", "top_left_x": 292, "top_left_y": 217, "bottom_right_x": 1405, "bottom_right_y": 649, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 3, "markdown": "", "images": [], "dimensions": {"dpi": 539192, "height": 944919, "width": 247256}}, {"index": 27, "markdown": "\nFigure 9: Scatter plot of predicted accuracy versus (true) OOD accuracy for vision datasets except MNIST with a ResNet50 model. Results reported by aggregating MAE numbers over 4 different seeds.\n", "images": [{"id": "img-8.jpeg", "top_left_x": 290, "top_left_y": 226, "bottom_right_x": 1405, "bottom_right_y": 1834, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 28, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 6.60 | 5.74 | 9.88 | 6.89 | 7.25 | 6.07 | 4.77 | 3.21 | 3.02 | 2.99 | 2.85 | | | | (0.35) | (0.30) | (0.16) | (0.13) | (0.15) | (0.16) | (0.13) | (0.49) | (0.40) | (0.37) | (0.29) | | | Synthetic | 12.33 | 10.20 | 16.50 | 11.91 | 13.87 | 11.08 | 6.55 | 4.65 | 4.25 | 4.21 | 3.87 | | | | (0.51) | (0.48) | (0.26) | (0.17) | (0.18) | (0.17) | (0.35) | (0.55) | (0.55) | (0.55) | (0.75) | | CIFAR100 | Synthetic | 13.69 | 11.51 | 23.61 | 13.10 | 14.60 | 10.14 | 9.85 | 5.50 | 4.75 | 4.72 | 4.94 | | | | (0.55) | (0.41) | (1.16) | (0.80) | (0.77) | (0.64) | (0.57) | (0.70) | (0.73) | (0.74) | (0.74) | | ImageNet200 | Natural | 12.37 | 8.19 | 22.07 | 8.61 | 15.17 | 7.81 | 5.13 | 4.37 | 2.04 | 3.79 | 1.45 | | | | (0.25) | (0.33) | (0.08) | (0.25) | (0.11) | (0.29) | (0.08) | (0.39) | (0.24) | (0.30) | (0.27) | | | Synthetic | 19.86 | 12.94 | 32.44 | 13.35 | 25.02 | 12.38 | 5.41 | 5.93 | 3.09 | 5.00 | 2.68 | | | | (1.38) | (1.81) | (1.00) | (1.30) | (1.10) | (1.38) | (0.89) | (1.38) | (0.87) | (1.28) | (0.45) | | ImageNet | Natural | 7.77 | 6.50 | 18.13 | 6.02 | 8.13 | 5.76 | 6.23 | 3.88 | 2.17 | 2.06 | 0.80 | | | | (0.27) | (0.33) | (0.23) | (0.34) | (0.27) | (0.37) | (0.41) | (0.53) | (0.62) | (0.54) | (0.44) | | | Synthetic | 13.39 | 10.12 | 24.62 | 8.51 | 13.55 | 7.90 | 6.32 | 3.34 | 2.53 | 2.61 | 4.89 | | | | (0.53) | (0.63) | (0.64) | (0.71) | (0.61) | (0.72) | (0.33) | (0.53) | (0.36) | (0.33) | (0.83) | | FMoW-WILDS | Natural | 5.53 | 4.31 | 33.53 | 12.84 | 5.94 | 4.45 | 5.74 | 3.06 | 2.70 | 3.02 | 2.72 | | | | (0.33) | (0.63) | (0.13) | (12.06) | (0.36) | (0.77) | (0.55) | (0.36) | (0.54) | (0.35) | (0.44) | | RxRx1-WILDS | Natural | 5.80 | 5.72 | 7.90 | 4.84 | 5.98 | 5.98 | 6.03 | 4.66 | 4.56 | 4.41 | 4.47 | | | | (0.17) | (0.15) | (0.24) | (0.09) | (0.15) | (0.13) | (0.08) | (0.38) | (0.38) | (0.31) | (0.26) | | Amazon-WILDS | Natural | 2.40 | 2.29 | 8.01 | 2.38 | 2.40 | 2.28 | 17.87 | 1.65 | 1.62 | 1.60 | 1.59 | | | | (0.08) | (0.09) | (0.53) | (0.17) | (0.09) | (0.09) | (0.18) | (0.06) | (0.05) | (0.14) | (0.15) | | CivilCom.-WILDS | Natural | 12.64 | 10.80 | 16.76 | 11.03 | 13.31 | 10.99 | 16.65 | | 7.14 | | | | | | (0.52) | (0.48) | (0.53) | (0.49) | (0.52) | (0.49) | (0.25) | | (0.41) | | | | MNIST | Natural | 18.48 | 15.99 | 21.17 | 14.81 | 20.19 | 14.56 | 24.42 | 5.02 | 2.40 | 3.14 | 3.50 | | | | (0.45) | (1.53) | (0.24) | (3.89) | (0.23) | (3.47) | (0.41) | (0.44) | (1.83) | (0.49) | (0.17) | | ENTITY-13 | Same | 16.23 | 11.14 | 24.97 | 10.88 | 19.08 | 10.47 | 10.71 | 5.39 | 3.88 | 4.58 | 4.19 | | | | (0.77) | (0.65) | (0.70) | (0.77) | (0.65) | (0.72) | (0.74) | (0.92) | (0.61) | (0.85) | (0.16) | | | Novel | 28.53 | 22.02 | 38.33 | 21.64 | 32.43 | 21.22 | 20.61 | 13.58 | 10.28 | 12.25 | 6.63 | | | | (0.82) | (0.68) | (0.75) | (0.86) | (0.69) | (0.80) | (0.60) | (1.15) | (1.34) | (1.21) | (0.93) | | ENTITY-30 | Same | 18.59 | 14.46 | 28.82 | 14.30 | 21.63 | 13.46 | 12.92 | 9.12 | 7.75 | 8.15 | 7.64 | | | | (0.51) | (0.52) | (0.43) | (0.71) | (0.37) | (0.59) | (0.14) | (0.62) | (0.72) | (0.68) | (0.88) | | | Novel | 32.34 | 26.85 | 44.02 | 26.27 | 36.82 | 25.42 | 23.16 | 17.75 | 14.30 | 15.60 | 10.57 | | | | (0.60) | (0.58) | (0.56) | (0.79) | (0.47) | (0.68) | (0.12) | (0.76) | (0.85) | (0.86) | (0.86) | | NONLIVING-26 | Same | 18.66 | 17.17 | 26.39 | 16.14 | 19.86 | 15.58 | 16.63 | 10.87 | 10.24 | 10.07 | 10.26 | | | | (0.76) | (0.74) | (0.82) | (0.81) | (0.67) | (0.76) | (0.45) | (0.98) | (0.83) | (0.92) | (1.18) | | | Novel | 33.43 | 31.53 | 41.66 | 29.87 | 35.13 | 29.31 | 29.56 | 21.70 | 20.12 | 19.08 | 18.26 | | | | (0.67) | (0.65) | (0.67) | (0.71) | (0.54) | (0.64) | (0.21) | (0.86) | (0.75) | (0.82) | (1.12) | | LIVING-17 | Same | 12.63 | 11.05 | 18.32 | 10.46 | 14.43 | 10.14 | 9.87 | 4.57 | 3.95 | 3.81 | 4.21 | | | | (1.25) | (1.20) | (1.01) | (1.12) | (1.11) | (1.16) | (0.61) | (0.71) | (0.48) | (0.22) | (0.53) | | | Novel | 29.03 | 26.96 | 35.67 | 26.11 | 31.73 | 25.73 | 23.53 | 16.15 | 14.49 | 12.97 | 11.39 | | | | (1.44) | (1.38) | (1.09) | (1.27) | (1.19) | (1.35) | (0.52) | (1.36) | (1.46) | (1.52) | (1.72) |\nTable 3: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. For language datasets, we use DistilBERT-base-uncased, for vision dataset we report results with DenseNet model with the exception of MNIST where we use FCN. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 29, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 7.14 | 6.20 | 10.25 | 7.06 | 7.68 | 6.35 | 5.74 | 4.02 | 3.85 | 3.76 | 3.38 | | | | (0.14) | (0.11) | (0.31) | (0.33) | (0.28) | (0.27) | (0.25) | (0.38) | (0.30) | (0.33) | (0.32) | | | Synthetic | 12.62 | 10.75 | 16.50 | 11.91 | 13.93 | 11.20 | 7.97 | 5.66 | 5.03 | 4.87 | 3.63 | | | | (0.76) | (0.71) | (0.28) | (0.24) | (0.29) | (0.28) | (0.13) | (0.64) | (0.71) | (0.71) | (0.62) | | CIFAR100 | Synthetic | 12.77 | 12.34 | 16.89 | 12.73 | 11.18 | 9.63 | 12.00 | 5.61 | 5.55 | 5.65 | 5.76 | | | | (0.43) | (0.68) | (0.20) | (2.59) | (0.35) | (1.25) | (0.48) | (0.51) | (0.55) | (0.35) | (0.27) | | ImageNet200 | Natural | 12.63 | 7.99 | 23.08 | 7.22 | 15.40 | 6.33 | 5.00 | 4.60 | 1.80 | 4.06 | 1.38 | | | | (0.59) | (0.47) | (0.31) | (0.22) | (0.42) | (0.24) | (0.36) | (0.63) | (0.17) | (0.69) | (0.29) | | | Synthetic | 20.17 | 11.74 | 33.69 | 9.51 | 25.49 | 8.61 | 4.19 | 5.37 | 2.78 | 4.53 | 3.58 | | | | (0.74) | (0.80) | (0.73) | (0.51) | (0.66) | (0.50) | (0.14) | (0.88) | (0.23) | (0.79) | (0.33) | | ImageNet | Natural | 8.09 | 6.42 | 21.66 | 5.91 | 8.53 | 5.21 | 5.90 | 3.93 | 1.89 | 2.45 | 0.73 | | | | (0.25) | (0.28) | (0.38) | (0.22) | (0.26) | (0.25) | (0.44) | (0.26) | (0.21) | (0.16) | (0.10) | | | Synthetic | 13.93 | 9.90 | 28.05 | 7.56 | 13.82 | 6.19 | 6.70 | 3.33 | 2.55 | 2.12 | 5.06 | | | | (0.14) | (0.23) | (0.39) | (0.13) | (0.31) | (0.07) | (0.52) | (0.25) | (0.25) | (0.31) | (0.27) | | FMoW-WILDS | Natural | 5.15 | 3.55 | 34.64 | 5.03 | 5.58 | 3.46 | 5.08 | 2.59 | 2.33 | 2.52 | 2.22 | | | | (0.19) | (0.41) | (0.22) | (0.29) | (0.17) | (0.37) | (0.46) | (0.32) | (0.28) | (0.25) | (0.30) | | RxRx1-WILDS | Natural | 6.17 | 6.11 | 21.05 | 5.21 | 6.54 | 6.27 | 6.82 | 5.30 | 5.20 | 5.19 | 5.63 | | | | (0.20) | (0.24) | (0.31) | (0.18) | (0.21) | (0.20) | (0.31) | (0.30) | (0.44) | (0.43) | (0.55) | | Entity-13 | Same | 18.32 | 14.38 | 27.79 | 13.56 | 20.50 | 13.22 | 16.09 | 9.35 | 7.50 | 7.80 | 6.94 | | | | (0.29) | (0.53) | (1.18) | (0.58) | (0.47) | (0.58) | (0.84) | (0.79) | (0.65) | (0.62) | (0.71) | | | Novel | 28.82 | 24.03 | 38.97 | 22.96 | 31.66 | 22.61 | 25.26 | 17.11 | 13.96 | 14.75 | 9.94 | | | | (0.30) | (0.55) | (1.32) | (0.59) | (0.54) | (0.58) | (1.08) | (0.93) | (0.64) | (0.78) | | | Entity-30 | Same | 16.91 | 14.61 | 26.84 | 14.37 | 18.60 | 13.11 | 13.74 | 8.54 | 7.94 | 7.77 | 8.04 | | | | (1.33) | (1.11) | (2.15) | (1.34) | (1.69) | (1.30) | (1.07) | (1.47) | (1.38) | (1.44) | (1.51) | | | Novel | 28.66 | 25.83 | 39.21 | 25.03 | 30.95 | 23.73 | 23.15 | 15.57 | 13.24 | 12.44 | 11.05 | | | | (1.16) | (0.88) | (2.03) | (1.11) | (1.64) | (1.11) | (0.51) | (1.44) | (1.15) | (1.26) | (1.13) | | NonLIVING-26 | Same | 17.43 | 15.95 | 27.70 | 15.40 | 18.06 | 14.58 | 16.99 | 10.79 | 10.13 | 10.05 | 10.29 | | | | (0.90) | (0.86) | (0.90) | (0.69) | (1.00) | (0.78) | (1.25) | (0.62) | (0.32) | (0.46) | (0.79) | | | Novel | 29.51 | 27.75 | 40.02 | 26.77 | 30.36 | 25.93 | 27.70 | 19.64 | 17.75 | 16.90 | 15.69 | | | | (0.86) | (0.82) | (0.76) | (0.82) | (0.95) | (0.80) | (1.42) | (0.68) | (0.53) | (0.60) | (0.83) | | LIVING-17 | Same | 14.28 | 12.21 | 23.46 | 11.16 | 15.22 | 10.78 | 10.49 | 4.92 | 4.23 | 4.19 | 4.73 | | | | (0.96) | (0.93) | (1.16) | (0.90) | (0.96) | (0.99) | (0.97) | (0.57) | (0.42) | (0.35) | (0.24) | | | Novel | 28.91 | 26.35 | 38.62 | 24.91 | 30.32 | 24.52 | 22.49 | 15.42 | 13.02 | 12.29 | 10.34 | | | | (0.66) | (0.73) | (1.01) | (0.61) | (0.59) | (0.74) | (0.85) | (0.59) | (0.53) | (0.73) | (0.62) |\nTable 4: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift for ResNet model. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}], "model": "mistral-ocr-2503-completion", "usage_info": {"pages_processed": 29, "doc_size_bytes": null}}
libraries_list_v1:
speakeasy-default-libraries-list-v1:
+ parameters:
+ query:
+ page_size: 100
+ page: 0
responses:
"200":
- application/json: {"data": [{"id": "bfc452fd-4bcb-46ec-9f68-ceea101e924d", "name": "", "created_at": "2024-01-31T13:50:47.409Z", "updated_at": "2023-04-09T15:28:24.261Z", "owner_id": "3fb92cf9-0fea-44d0-958f-16963601a1f0", "owner_type": "", "total_size": 811051, "nb_documents": 634577, "chunk_size": 502060}]}
+ application/json: {"pagination": {"total_items": 935461, "total_pages": 720493, "current_page": 981761, "page_size": 755545, "has_more": true}, "data": [{"id": "bfc452fd-4bcb-46ec-9f68-ceea101e924d", "name": "", "created_at": "2024-01-31T13:50:47.409Z", "updated_at": "2023-04-09T15:28:24.261Z", "owner_id": "3fb92cf9-0fea-44d0-958f-16963601a1f0", "owner_type": "", "total_size": 811051, "nb_documents": 634577, "chunk_size": 502060}]}
+ "422":
+ application/json: {}
libraries_create_v1:
speakeasy-default-libraries-create-v1:
requestBody:
@@ -6429,6 +8089,7 @@ examples:
query:
limit: 10
offset: 0
+ type: "all"
responses:
"200":
application/json: {"items": [{"name": "", "retention_notice": 30, "id": "c29472e8-7047-4227-ab06-14ac147f1ac9", "created_at": "2025-04-29T05:19:27.779Z", "user_id": ""}], "total": 911345, "page": 575844, "page_size": 996146, "total_pages": 441415}
@@ -6500,11 +8161,388 @@ examples:
application/json: [{}]
"422":
application/json: {}
+ jobs_api_routes_batch_delete_batch_job:
+ speakeasy-default-jobs-api-routes-batch-delete-batch-job:
+ parameters:
+ path:
+ job_id: "d9e71426-5791-49ad-b8d1-cf0d90d1b7d0"
+ responses:
+ "200":
+ application/json: {"id": "", "object": "batch", "deleted": true}
+ get_workflows_v1_workflows_get:
+ speakeasy-default-get-workflows-v1-workflows-get:
+ parameters:
+ query:
+ active_only: false
+ include_shared: true
+ limit: 50
+ responses:
+ "200":
+ application/json: {"workflows": [], "next_cursor": "d354b4ce-6ab4-45a8-93bc-a2df586c46d4"}
+ "422":
+ application/json: {}
+ get_workflow_registrations_v1_workflows_registrations_get:
+ speakeasy-default-get-workflow-registrations-v1-workflows-registrations-get:
+ parameters:
+ query:
+ active_only: false
+ include_shared: true
+ with_workflow: false
+ limit: 50
+ responses:
+ "200":
+ application/json: {"workflow_registrations": [], "next_cursor": "6b512fbb-9584-4ea5-bfdb-e90316f436fd", "workflow_versions": [{"id": "e6e0d049-e303-4710-bda9-61cd5196b1ec", "task_queue": "", "definition": {"input_schema": {"key": "", "key1": ""}, "enforce_determinism": false}, "workflow_id": "96d7cee5-611b-4a26-82d8-3e6c84cc894d", "compatible_with_chat_assistant": false}]}
+ "422":
+ application/json: {}
+ execute_workflow_v1_workflows__workflow_identifier__execute_post:
+ speakeasy-default-execute-workflow-v1-workflows-workflow-identifier-execute-post:
+ parameters:
+ path:
+ workflow_identifier: ""
+ requestBody:
+ application/json: {"wait_for_result": false}
+ responses:
+ "200":
+ application/json: {"workflow_name": "", "execution_id": "", "root_execution_id": "", "status": "RUNNING", "start_time": "2026-01-16T18:23:52.214Z", "end_time": "2026-06-24T19:55:56.410Z", "result": ""}
+ "422":
+ application/json: {}
+ execute_workflow_registration_v1_workflows_registrations__workflow_registration_id__execute_post:
+ speakeasy-default-execute-workflow-registration-v1-workflows-registrations-workflow-registration-id-execute-post:
+ parameters:
+ path:
+ workflow_registration_id: "de11d76a-e0fb-44dd-abd9-2e75fc275b94"
+ requestBody:
+ application/json: {"wait_for_result": false}
+ responses:
+ "200":
+ application/json: {"workflow_name": "", "execution_id": "", "root_execution_id": "", "status": "RETRYING_AFTER_ERROR", "start_time": "2025-02-03T00:07:08.284Z", "end_time": "2024-01-22T11:44:36.909Z", "result": ""}
+ "422":
+ application/json: {}
+ get_workflow_v1_workflows__workflow_identifier__get:
+ speakeasy-default-get-workflow-v1-workflows-workflow-identifier-get:
+ parameters:
+ path:
+ workflow_identifier: ""
+ responses:
+ "200":
+ application/json: {"workflow": {"id": "7e6d5999-1f66-4311-8059-f056ead70099", "name": "", "display_name": "Clifton98", "type": "code", "customer_id": "7882a879-b763-4980-9480-235734fdf4f4", "workspace_id": "5be5bb5a-4e28-453d-b0f5-a506f3a5cd58", "available_in_chat_assistant": false, "is_technical": false, "archived": false, "active": true}}
+ "422":
+ application/json: {}
+ update_workflow_v1_workflows__workflow_identifier__put:
+ speakeasy-default-update-workflow-v1-workflows-workflow-identifier-put:
+ parameters:
+ path:
+ workflow_identifier: ""
+ requestBody:
+ application/json: {}
+ responses:
+ "200":
+ application/json: {"workflow": {"id": "db975768-c873-461d-b3e8-6f22f94c0b20", "name": "", "display_name": "Marilie.Keeling", "type": "code", "customer_id": "89b5718e-e6ee-4eb2-b9bd-53c5f9c1222a", "workspace_id": "2ddcd2cb-4f9d-4897-b286-e08bd0fbd54b", "available_in_chat_assistant": false, "is_technical": false, "archived": false}}
+ "422":
+ application/json: {}
+ get_workflow_registration_v1_workflows_registrations__workflow_registration_id__get:
+ speakeasy-default-get-workflow-registration-v1-workflows-registrations-workflow-registration-id-get:
+ parameters:
+ path:
+ workflow_registration_id: "c4d86c40-960f-4e9a-9d6f-ad8342d7aa83"
+ query:
+ with_workflow: false
+ include_shared: true
+ responses:
+ "200":
+ application/json: {"workflow_registration": {"id": "db19a749-c9a8-4fc8-a04b-540b6b648c8f", "task_queue": "", "definition": {"input_schema": {"key": ""}, "enforce_determinism": false}, "workflow_id": "78b8c35a-8488-497f-80f4-8fef6fbbd4ad", "compatible_with_chat_assistant": false, "active": false}, "workflow_version": {"id": "e35a35c7-8a69-4bf2-a597-e2b0916c4c5e", "task_queue": "", "definition": {"input_schema": {"key": ""}, "enforce_determinism": false}, "workflow_id": "2238d5fa-45be-48d4-9705-a1852ab34b83", "compatible_with_chat_assistant": false, "active": false}}
+ "422":
+ application/json: {}
+ archive_workflow_v1_workflows__workflow_identifier__archive_put:
+ speakeasy-default-archive-workflow-v1-workflows-workflow-identifier-archive-put:
+ parameters:
+ path:
+ workflow_identifier: ""
+ responses:
+ "200":
+ application/json: {"workflow": {"id": "5efd36dc-5de7-4708-a17b-492eb93650e0", "name": "", "display_name": "Torrey_Rippin32", "type": "code", "customer_id": "93950b7e-25cf-45e9-9e16-558ac052306e", "workspace_id": "61923af7-6896-4605-9c48-c7d3cacb4732", "available_in_chat_assistant": false, "is_technical": false, "archived": false}}
+ "422":
+ application/json: {}
+ unarchive_workflow_v1_workflows__workflow_identifier__unarchive_put:
+ speakeasy-default-unarchive-workflow-v1-workflows-workflow-identifier-unarchive-put:
+ parameters:
+ path:
+ workflow_identifier: ""
+ responses:
+ "200":
+ application/json: {"workflow": {"id": "779e3e34-c64a-493c-bcbe-6d70947147e9", "name": "", "display_name": "Hal56", "type": "code", "customer_id": "f4fbcccc-06b3-4d08-9101-3035a013990a", "workspace_id": "ded5993d-1646-4bdb-a4ba-6b4280e38716", "available_in_chat_assistant": false, "is_technical": false, "archived": false}}
+ "422":
+ application/json: {}
+ get_workflow_execution_v1_workflows_executions__execution_id__get:
+ speakeasy-default-get-workflow-execution-v1-workflows-executions-execution-id-get:
+ parameters:
+ path:
+ execution_id: ""
+ responses:
+ "200":
+ application/json: {"workflow_name": "", "execution_id": "", "root_execution_id": "", "status": "FAILED", "start_time": "2026-01-17T01:48:54.055Z", "end_time": "2024-04-12T09:56:57.081Z", "result": ""}
+ "422":
+ application/json: {}
+ get_workflow_execution_history_v1_workflows_executions__execution_id__history_get:
+ speakeasy-default-get-workflow-execution-history-v1-workflows-executions-execution-id-history-get:
+ parameters:
+ path:
+ execution_id: ""
+ responses:
+ "200":
+ application/json: ""
+ "422":
+ application/json: {}
+ signal_workflow_execution_v1_workflows_executions__execution_id__signals_post:
+ speakeasy-default-signal-workflow-execution-v1-workflows-executions-execution-id-signals-post:
+ parameters:
+ path:
+ execution_id: ""
+ requestBody:
+ application/json: {"name": ""}
+ responses:
+ "202":
+ application/json: {"message": "Signal accepted"}
+ "422":
+ application/json: {}
+ query_workflow_execution_v1_workflows_executions__execution_id__queries_post:
+ speakeasy-default-query-workflow-execution-v1-workflows-executions-execution-id-queries-post:
+ parameters:
+ path:
+ execution_id: ""
+ requestBody:
+ application/json: {"name": ""}
+ responses:
+ "200":
+ application/json: {"query_name": "", "result": ""}
+ "422":
+ application/json: {}
+ terminate_workflow_execution_v1_workflows_executions__execution_id__terminate_post:
+ speakeasy-default-terminate-workflow-execution-v1-workflows-executions-execution-id-terminate-post:
+ parameters:
+ path:
+ execution_id: ""
+ responses:
+ "422":
+ application/json: {}
+ batch_terminate_workflow_executions_v1_workflows_executions_terminate_post:
+ speakeasy-default-batch-terminate-workflow-executions-v1-workflows-executions-terminate-post:
+ requestBody:
+ application/json: {"execution_ids": ["", ""]}
+ responses:
+ "200":
+ application/json: {}
+ "422":
+ application/json: {}
+ cancel_workflow_execution_v1_workflows_executions__execution_id__cancel_post:
+ speakeasy-default-cancel-workflow-execution-v1-workflows-executions-execution-id-cancel-post:
+ parameters:
+ path:
+ execution_id: ""
+ responses:
+ "422":
+ application/json: {}
+ batch_cancel_workflow_executions_v1_workflows_executions_cancel_post:
+ speakeasy-default-batch-cancel-workflow-executions-v1-workflows-executions-cancel-post:
+ requestBody:
+ application/json: {"execution_ids": []}
+ responses:
+ "200":
+ application/json: {}
+ "422":
+ application/json: {}
+ reset_workflow_v1_workflows_executions__execution_id__reset_post:
+ speakeasy-default-reset-workflow-v1-workflows-executions-execution-id-reset-post:
+ parameters:
+ path:
+ execution_id: ""
+ requestBody:
+ application/json: {"event_id": 24149, "exclude_signals": false, "exclude_updates": false}
+ responses:
+ "422":
+ application/json: {}
+ update_workflow_execution_v1_workflows_executions__execution_id__updates_post:
+ speakeasy-default-update-workflow-execution-v1-workflows-executions-execution-id-updates-post:
+ parameters:
+ path:
+ execution_id: ""
+ requestBody:
+ application/json: {"name": ""}
+ responses:
+ "200":
+ application/json: {"update_name": "", "result": ""}
+ "422":
+ application/json: {}
+ get_workflow_execution_trace_otel:
+ speakeasy-default-get-workflow-execution-trace-otel:
+ parameters:
+ path:
+ execution_id: ""
+ responses:
+ "200":
+ application/json: {"workflow_name": "", "execution_id": "", "root_execution_id": "", "status": "COMPLETED", "start_time": "2025-04-04T13:52:04.739Z", "end_time": "2025-01-16T05:22:56.752Z", "result": "", "data_source": ""}
+ "422":
+ application/json: {}
+ get_workflow_execution_trace_summary:
+ speakeasy-default-get-workflow-execution-trace-summary:
+ parameters:
+ path:
+ execution_id: ""
+ responses:
+ "200":
+ application/json: {"workflow_name": "", "execution_id": "", "root_execution_id": "", "status": "COMPLETED", "start_time": "2026-02-04T14:37:36.534Z", "end_time": "2024-12-18T03:10:33.223Z", "result": ""}
+ "422":
+ application/json: {}
+ get_workflow_execution_trace_events:
+ speakeasy-default-get-workflow-execution-trace-events:
+ parameters:
+ path:
+ execution_id: ""
+ query:
+ merge_same_id_events: false
+ include_internal_events: false
+ responses:
+ "200":
+ application/json: {"workflow_name": "", "execution_id": "", "root_execution_id": "", "status": null, "start_time": "2025-11-12T16:59:23.068Z", "end_time": "2024-06-18T15:20:19.798Z", "result": ""}
+ "422":
+ application/json: {}
+ stream_v1_workflows_executions__execution_id__stream_get:
+ speakeasy-default-stream-v1-workflows-executions-execution-id-stream-get:
+ parameters:
+ path:
+ execution_id: ""
+ responses:
+ "422":
+ application/json: {}
+ get_workflow_metrics_v1_workflows__workflow_name__metrics_get:
+ speakeasy-default-get-workflow-metrics-v1-workflows-workflow-name-metrics-get:
+ parameters:
+ path:
+ workflow_name: ""
+ responses:
+ "200":
+ application/json: {"execution_count": {"value": 1600.39}, "success_count": {"value": 926320}, "error_count": {"value": 375789}, "average_latency_ms": {"value": 6385.25}, "latency_over_time": {"value": [[695318], [530573]]}, "retry_rate": {"value": 4629.08}}
+ "422":
+ application/json: {}
+ list_runs_v1_workflows_runs_get:
+ speakeasy-default-list-runs-v1-workflows-runs-get:
+ parameters:
+ query:
+ page_size: 50
+ responses:
+ "200":
+ application/json: {"executions": [{"workflow_name": "", "execution_id": "", "root_execution_id": "", "status": "CONTINUED_AS_NEW", "start_time": "2025-04-05T20:23:16.180Z", "end_time": "2025-07-01T00:28:44.234Z"}]}
+ "422":
+ application/json: {}
+ get_run_v1_workflows_runs__run_id__get:
+ speakeasy-default-get-run-v1-workflows-runs-run-id-get:
+ parameters:
+ path:
+ run_id: "553b071e-3d04-46aa-aa9a-0fca61dc60fa"
+ responses:
+ "200":
+ application/json: {"workflow_name": "", "execution_id": "", "root_execution_id": "", "status": "TIMED_OUT", "start_time": "2026-12-15T16:20:06.129Z", "end_time": "2025-04-28T14:51:35.913Z", "result": ""}
+ "422":
+ application/json: {}
+ get_run_history_v1_workflows_runs__run_id__history_get:
+ speakeasy-default-get-run-history-v1-workflows-runs-run-id-history-get:
+ parameters:
+ path:
+ run_id: "f7296489-0212-4239-9e35-12fabfe8cd11"
+ responses:
+ "200":
+ application/json: ""
+ "422":
+ application/json: {}
+ get_schedules_v1_workflows_schedules_get:
+ speakeasy-default-get-schedules-v1-workflows-schedules-get:
+ responses:
+ "200":
+ application/json: {"schedules": []}
+ schedule_workflow_v1_workflows_schedules_post:
+ speakeasy-default-schedule-workflow-v1-workflows-schedules-post:
+ requestBody:
+ application/json: {"schedule": {"input": ""}}
+ responses:
+ "201":
+ application/json: {"schedule_id": ""}
+ "422":
+ application/json: {}
+ unschedule_workflow_v1_workflows_schedules__schedule_id__delete:
+ speakeasy-default-unschedule-workflow-v1-workflows-schedules-schedule-id-delete:
+ parameters:
+ path:
+ schedule_id: ""
+ responses:
+ "422":
+ application/json: {}
+ receive_workflow_event_v1_workflows_events_post:
+ speakeasy-default-receive-workflow-event-v1-workflows-events-post:
+ requestBody:
+ application/json: {"event": {"event_id": "", "root_workflow_exec_id": "", "workflow_exec_id": "", "workflow_run_id": "", "workflow_name": "", "event_type": "CUSTOM_TASK_STARTED", "attributes": {"custom_task_id": "", "custom_task_type": ""}}}
+ responses:
+ "200":
+ application/json: {"status": "error"}
+ "422":
+ application/json: {}
+ receive_workflow_events_batch_v1_workflows_events_batch_post:
+ speakeasy-default-receive-workflow-events-batch-v1-workflows-events-batch-post:
+ requestBody:
+ application/json: {"events": [{"event_id": "", "root_workflow_exec_id": "", "workflow_exec_id": "", "workflow_run_id": "", "workflow_name": "", "event_type": "WORKFLOW_EXECUTION_STARTED", "attributes": {"task_id": "", "workflow_name": "", "input": {"type": "json", "value": ""}}}]}
+ responses:
+ "200":
+ application/json: {"status": "success", "events_received": 650921}
+ "422":
+ application/json: {}
+ get_stream_events_v1_workflows_events_stream_get:
+ speakeasy-default-get-stream-events-v1-workflows-events-stream-get:
+ parameters:
+ query:
+ scope: "*"
+ activity_name: "*"
+ activity_id: "*"
+ workflow_name: "*"
+ workflow_exec_id: "*"
+ root_workflow_exec_id: "*"
+ parent_workflow_exec_id: "*"
+ stream: "*"
+ start_seq: 0
+ responses:
+ "422":
+ application/json: {}
+ get_workflow_events_v1_workflows_events_list_get:
+ speakeasy-default-get-workflow-events-v1-workflows-events-list-get:
+ parameters:
+ query:
+ limit: 100
+ responses:
+ "200":
+ application/json: {"events": [{"event_id": "", "event_timestamp": 93683, "root_workflow_exec_id": "", "parent_workflow_exec_id": "", "workflow_exec_id": "", "workflow_run_id": "", "workflow_name": "", "event_type": "CUSTOM_TASK_IN_PROGRESS", "attributes": {"custom_task_id": "", "custom_task_type": "", "payload": {"type": "json_patch", "value": [{"path": "/sys", "value": "", "op": "remove"}]}}}]}
+ "422":
+ application/json: {}
+ list_deployments_v1_workflows_deployments_get:
+ speakeasy-default-list-deployments-v1-workflows-deployments-get:
+ parameters:
+ query:
+ active_only: true
+ responses:
+ "200":
+ application/json: {"deployments": [{"id": "404cb7bc-ee61-4a2e-aff3-75f69cf3ca94", "name": "", "is_active": true, "created_at": "2026-10-31T16:44:16.117Z", "updated_at": "2025-01-02T22:50:15.704Z"}]}
+ "422":
+ application/json: {}
+ get_deployment_v1_workflows_deployments__name__get:
+ speakeasy-default-get-deployment-v1-workflows-deployments-name-get:
+ parameters:
+ path:
+ name: ""
+ responses:
+ "200":
+ application/json: {"id": "c3310963-59e6-41a6-b644-84aca97cc893", "name": "", "is_active": true, "created_at": "2024-10-22T00:46:19.102Z", "updated_at": "2024-10-19T15:10:50.721Z", "workers": []}
+ "422":
+ application/json: {}
examplesVersion: 1.0.2
generatedTests: {}
-releaseNotes: |
- ## Python SDK Changes:
- * `mistral.beta.connectors.list_tools()`: **Added**
+releaseNotes: "## Python SDK Changes:\n* `mistral.models.list()`: `response.data[].union(fine-tuned).job` **Changed** (Breaking ⚠️)\n* `mistral.beta.connectors.list_tools()`: `response` **Changed** (Breaking ⚠️)\n* `mistral.models.update()`: `response` **Changed** (Breaking ⚠️)\n* `mistral.models.retrieve()`: `response.union(fine-tuned).job` **Changed** (Breaking ⚠️)\n* `mistral.workflows.metrics.get_workflow_metrics()`: **Added**\n* `mistral.workflows.get_workflow()`: **Added**\n* `mistral.workflows.update_workflow()`: **Added**\n* `mistral.workflows.get_workflow_registration()`: **Added**\n* `mistral.workflows.archive_workflow()`: **Added**\n* `mistral.workflows.unarchive_workflow()`: **Added**\n* `mistral.workflows.executions.get_workflow_execution()`: **Added**\n* `mistral.workflows.executions.get_workflow_execution_history()`: **Added**\n* `mistral.workflows.executions.signal_workflow_execution()`: **Added**\n* `mistral.workflows.executions.query_workflow_execution()`: **Added**\n* `mistral.workflows.executions.terminate_workflow_execution()`: **Added**\n* `mistral.workflows.executions.batch_terminate_workflow_executions()`: **Added**\n* `mistral.workflows.executions.cancel_workflow_execution()`: **Added**\n* `mistral.workflows.executions.batch_cancel_workflow_executions()`: **Added**\n* `mistral.workflows.executions.reset_workflow()`: **Added**\n* `mistral.workflows.executions.update_workflow_execution()`: **Added**\n* `mistral.workflows.executions.get_workflow_execution_trace_otel()`: **Added**\n* `mistral.workflows.executions.get_workflow_execution_trace_summary()`: **Added**\n* `mistral.workflows.executions.get_workflow_execution_trace_events()`: **Added**\n* `mistral.workflows.executions.stream()`: **Added**\n* `mistral.workflows.runs.get_run()`: **Added**\n* `mistral.batch.jobs.delete()`: **Added**\n* `mistral.workflows.runs.list_runs()`: **Added**\n* `mistral.workflows.runs.get_run_history()`: **Added**\n* `mistral.workflows.schedules.get_schedules()`: **Added**\n* `mistral.workflows.schedules.schedule_workflow()`: **Added**\n* `mistral.workflows.schedules.unschedule_workflow()`: **Added**\n* `mistral.workflows.events.receive_workflow_event()`: **Added**\n* `mistral.workflows.events.receive_workflow_events_batch()`: **Added**\n* `mistral.workflows.events.get_stream_events()`: **Added**\n* `mistral.workflows.events.get_workflow_events()`: **Added**\n* `mistral.workflows.deployments.list_deployments()`: **Added**\n* `mistral.workflows.deployments.get_deployment()`: **Added**\n* `mistral.events.receive_workflow_event()`: **Added**\n* `mistral.events.receive_workflow_events_batch()`: **Added**\n* `mistral.events.get_stream_events()`: **Added**\n* `mistral.events.get_workflow_events()`: **Added**\n* `mistral.audio.voices.list()`: `request.type` **Added**\n* `mistral.workflows.execute_workflow_registration()`: **Added**\n* `mistral.workflows.execute_workflow()`: **Added**\n* `mistral.workflows.get_workflow_registrations()`: **Added**\n* `mistral.beta.libraries.list()`: \n * `request` **Changed**\n * `response.pagination` **Added**\n * `error.status[422]` **Added**\n* `mistral.beta.connectors.call_tool()`: `request.credentials_name` **Added**\n* `mistral.workflows.get_workflows()`: **Added**\n"
generatedFiles:
- .gitattributes
- .vscode/settings.json
diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml
index 390116a2..48ee12a0 100644
--- a/.speakeasy/gen.yaml
+++ b/.speakeasy/gen.yaml
@@ -32,7 +32,7 @@ generation:
generateNewTests: false
skipResponseBodyAssertions: false
python:
- version: 2.1.3
+ version: 2.2.0rc1
additionalDependencies:
dev:
pytest: ^8.2.2
diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock
index 105cdb2a..ab4814ee 100644
--- a/.speakeasy/workflow.lock
+++ b/.speakeasy/workflow.lock
@@ -16,8 +16,8 @@ sources:
- speakeasy-sdk-regen-1773084660
mistral-openapi:
sourceNamespace: mistral-openapi
- sourceRevisionDigest: sha256:480366372609b8e74697827a161a478dd87aa95460d0e078ecc73b5dfa0d00ff
- sourceBlobDigest: sha256:23523b81989fa5223902dea44dd6bebf8eb7b1dceec5cc3aa4dbb969ecde6652
+ sourceRevisionDigest: sha256:edeef4396c29b9a5960e150514289777d4aef71037b03d14444238455fc1c1bd
+ sourceBlobDigest: sha256:b8b6deb9c127b45ad0cd5700c1e9d69f63927b070817e5986f7131f14dba6ba7
tags:
- latest
targets:
@@ -38,10 +38,10 @@ targets:
mistralai-sdk:
source: mistral-openapi
sourceNamespace: mistral-openapi
- sourceRevisionDigest: sha256:480366372609b8e74697827a161a478dd87aa95460d0e078ecc73b5dfa0d00ff
- sourceBlobDigest: sha256:23523b81989fa5223902dea44dd6bebf8eb7b1dceec5cc3aa4dbb969ecde6652
+ sourceRevisionDigest: sha256:edeef4396c29b9a5960e150514289777d4aef71037b03d14444238455fc1c1bd
+ sourceBlobDigest: sha256:b8b6deb9c127b45ad0cd5700c1e9d69f63927b070817e5986f7131f14dba6ba7
codeSamplesNamespace: mistral-openapi-code-samples
- codeSamplesRevisionDigest: sha256:6d406cd8dd3e8da23675c1d278548dc5f54c616af47c94b0a704711a6b27d561
+ codeSamplesRevisionDigest: sha256:6fc37de5c06926f4bb561b007ea0856a0436829f465b0b054c19cb916cb25909
workflow:
workflowVersion: 1.0.0
speakeasyVersion: 1.754.0
diff --git a/README-PYPI.md b/README-PYPI.md
index 1a0b2402..a178ef56 100644
--- a/README-PYPI.md
+++ b/README-PYPI.md
@@ -36,6 +36,7 @@ Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create yo
* [Providers' SDKs Example Usage](#providers-sdks-example-usage)
* [Available Resources and Operations](#available-resources-and-operations)
* [Server-sent event streaming](#server-sent-event-streaming)
+ * [Pagination](#pagination)
* [File uploads](#file-uploads)
* [Retries](#retries)
* [Error Handling](#error-handling)
@@ -490,6 +491,7 @@ print(res.choices[0].message.content)
* [list](https://github.com/mistralai/client-python/blob/main/docs/sdks/batchjobs/README.md#list) - Get Batch Jobs
* [create](https://github.com/mistralai/client-python/blob/main/docs/sdks/batchjobs/README.md#create) - Create Batch Job
* [get](https://github.com/mistralai/client-python/blob/main/docs/sdks/batchjobs/README.md#get) - Get Batch Job
+* [delete](https://github.com/mistralai/client-python/blob/main/docs/sdks/batchjobs/README.md#delete) - Delete Batch Job
* [cancel](https://github.com/mistralai/client-python/blob/main/docs/sdks/batchjobs/README.md#cancel) - Cancel Batch Job
### [Beta.Agents](https://github.com/mistralai/client-python/blob/main/docs/sdks/betaagents/README.md)
@@ -633,6 +635,13 @@ print(res.choices[0].message.content)
* [create](https://github.com/mistralai/client-python/blob/main/docs/sdks/embeddings/README.md#create) - Embeddings
+### [Events](https://github.com/mistralai/client-python/blob/main/docs/sdks/events/README.md)
+
+* [receive_workflow_event](https://github.com/mistralai/client-python/blob/main/docs/sdks/events/README.md#receive_workflow_event) - Receive Workflow Event
+* [receive_workflow_events_batch](https://github.com/mistralai/client-python/blob/main/docs/sdks/events/README.md#receive_workflow_events_batch) - Receive Workflow Events Batch
+* [get_stream_events](https://github.com/mistralai/client-python/blob/main/docs/sdks/events/README.md#get_stream_events) - Get Stream Events
+* [get_workflow_events](https://github.com/mistralai/client-python/blob/main/docs/sdks/events/README.md#get_workflow_events) - Get Workflow Events
+
### [Files](https://github.com/mistralai/client-python/blob/main/docs/sdks/files/README.md)
* [upload](https://github.com/mistralai/client-python/blob/main/docs/sdks/files/README.md#upload) - Upload File
@@ -668,6 +677,63 @@ print(res.choices[0].message.content)
* [process](https://github.com/mistralai/client-python/blob/main/docs/sdks/ocr/README.md#process) - OCR
+### [Workflows](https://github.com/mistralai/client-python/blob/main/docs/sdks/workflows/README.md)
+
+* [get_workflows](https://github.com/mistralai/client-python/blob/main/docs/sdks/workflows/README.md#get_workflows) - Get Workflows
+* [get_workflow_registrations](https://github.com/mistralai/client-python/blob/main/docs/sdks/workflows/README.md#get_workflow_registrations) - Get Workflow Registrations
+* [execute_workflow](https://github.com/mistralai/client-python/blob/main/docs/sdks/workflows/README.md#execute_workflow) - Execute Workflow
+* [~~execute_workflow_registration~~](https://github.com/mistralai/client-python/blob/main/docs/sdks/workflows/README.md#execute_workflow_registration) - Execute Workflow Registration :warning: **Deprecated**
+* [get_workflow](https://github.com/mistralai/client-python/blob/main/docs/sdks/workflows/README.md#get_workflow) - Get Workflow
+* [update_workflow](https://github.com/mistralai/client-python/blob/main/docs/sdks/workflows/README.md#update_workflow) - Update Workflow
+* [get_workflow_registration](https://github.com/mistralai/client-python/blob/main/docs/sdks/workflows/README.md#get_workflow_registration) - Get Workflow Registration
+* [archive_workflow](https://github.com/mistralai/client-python/blob/main/docs/sdks/workflows/README.md#archive_workflow) - Archive Workflow
+* [unarchive_workflow](https://github.com/mistralai/client-python/blob/main/docs/sdks/workflows/README.md#unarchive_workflow) - Unarchive Workflow
+
+#### [Workflows.Deployments](https://github.com/mistralai/client-python/blob/main/docs/sdks/deployments/README.md)
+
+* [list_deployments](https://github.com/mistralai/client-python/blob/main/docs/sdks/deployments/README.md#list_deployments) - List Deployments
+* [get_deployment](https://github.com/mistralai/client-python/blob/main/docs/sdks/deployments/README.md#get_deployment) - Get Deployment
+
+#### [Workflows.Events](https://github.com/mistralai/client-python/blob/main/docs/sdks/workflowsevents/README.md)
+
+* [receive_workflow_event](https://github.com/mistralai/client-python/blob/main/docs/sdks/workflowsevents/README.md#receive_workflow_event) - Receive Workflow Event
+* [receive_workflow_events_batch](https://github.com/mistralai/client-python/blob/main/docs/sdks/workflowsevents/README.md#receive_workflow_events_batch) - Receive Workflow Events Batch
+* [get_stream_events](https://github.com/mistralai/client-python/blob/main/docs/sdks/workflowsevents/README.md#get_stream_events) - Get Stream Events
+* [get_workflow_events](https://github.com/mistralai/client-python/blob/main/docs/sdks/workflowsevents/README.md#get_workflow_events) - Get Workflow Events
+
+#### [Workflows.Executions](https://github.com/mistralai/client-python/blob/main/docs/sdks/executions/README.md)
+
+* [get_workflow_execution](https://github.com/mistralai/client-python/blob/main/docs/sdks/executions/README.md#get_workflow_execution) - Get Workflow Execution
+* [get_workflow_execution_history](https://github.com/mistralai/client-python/blob/main/docs/sdks/executions/README.md#get_workflow_execution_history) - Get Workflow Execution History
+* [signal_workflow_execution](https://github.com/mistralai/client-python/blob/main/docs/sdks/executions/README.md#signal_workflow_execution) - Signal Workflow Execution
+* [query_workflow_execution](https://github.com/mistralai/client-python/blob/main/docs/sdks/executions/README.md#query_workflow_execution) - Query Workflow Execution
+* [terminate_workflow_execution](https://github.com/mistralai/client-python/blob/main/docs/sdks/executions/README.md#terminate_workflow_execution) - Terminate Workflow Execution
+* [batch_terminate_workflow_executions](https://github.com/mistralai/client-python/blob/main/docs/sdks/executions/README.md#batch_terminate_workflow_executions) - Batch Terminate Workflow Executions
+* [cancel_workflow_execution](https://github.com/mistralai/client-python/blob/main/docs/sdks/executions/README.md#cancel_workflow_execution) - Cancel Workflow Execution
+* [batch_cancel_workflow_executions](https://github.com/mistralai/client-python/blob/main/docs/sdks/executions/README.md#batch_cancel_workflow_executions) - Batch Cancel Workflow Executions
+* [reset_workflow](https://github.com/mistralai/client-python/blob/main/docs/sdks/executions/README.md#reset_workflow) - Reset Workflow
+* [update_workflow_execution](https://github.com/mistralai/client-python/blob/main/docs/sdks/executions/README.md#update_workflow_execution) - Update Workflow Execution
+* [get_workflow_execution_trace_otel](https://github.com/mistralai/client-python/blob/main/docs/sdks/executions/README.md#get_workflow_execution_trace_otel) - Get Workflow Execution Trace Otel
+* [get_workflow_execution_trace_summary](https://github.com/mistralai/client-python/blob/main/docs/sdks/executions/README.md#get_workflow_execution_trace_summary) - Get Workflow Execution Trace Summary
+* [get_workflow_execution_trace_events](https://github.com/mistralai/client-python/blob/main/docs/sdks/executions/README.md#get_workflow_execution_trace_events) - Get Workflow Execution Trace Events
+* [stream](https://github.com/mistralai/client-python/blob/main/docs/sdks/executions/README.md#stream) - Stream
+
+#### [Workflows.Metrics](https://github.com/mistralai/client-python/blob/main/docs/sdks/metrics/README.md)
+
+* [get_workflow_metrics](https://github.com/mistralai/client-python/blob/main/docs/sdks/metrics/README.md#get_workflow_metrics) - Get Workflow Metrics
+
+#### [Workflows.Runs](https://github.com/mistralai/client-python/blob/main/docs/sdks/runs/README.md)
+
+* [list_runs](https://github.com/mistralai/client-python/blob/main/docs/sdks/runs/README.md#list_runs) - List Runs
+* [get_run](https://github.com/mistralai/client-python/blob/main/docs/sdks/runs/README.md#get_run) - Get Run
+* [get_run_history](https://github.com/mistralai/client-python/blob/main/docs/sdks/runs/README.md#get_run_history) - Get Run History
+
+#### [Workflows.Schedules](https://github.com/mistralai/client-python/blob/main/docs/sdks/schedules/README.md)
+
+* [get_schedules](https://github.com/mistralai/client-python/blob/main/docs/sdks/schedules/README.md#get_schedules) - Get Schedules
+* [schedule_workflow](https://github.com/mistralai/client-python/blob/main/docs/sdks/schedules/README.md#schedule_workflow) - Schedule Workflow
+* [unschedule_workflow](https://github.com/mistralai/client-python/blob/main/docs/sdks/schedules/README.md#unschedule_workflow) - Unschedule Workflow
+
@@ -708,6 +774,33 @@ with Mistral(
[context-manager]: https://book.pythontips.com/en/latest/context_managers.html
+
+## Pagination
+
+Some of the endpoints in this SDK support pagination. To use pagination, you make your SDK calls as usual, but the
+returned response object will have a `Next` method that can be called to pull down the next group of results. If the
+return value of `Next` is `None`, then there are no more pages to be fetched.
+
+Here's an example of one such pagination call:
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.get_workflows(active_only=False, include_shared=True, limit=50)
+
+ while res is not None:
+ # Handle items
+
+ res = res.next()
+
+```
+
+
## File uploads
@@ -851,8 +944,8 @@ with Mistral(
**Inherit from [`MistralError`](https://github.com/mistralai/client-python/blob/main/src/mistralai/client/errors/mistralerror.py)**:
-* [`HTTPValidationError`](https://github.com/mistralai/client-python/blob/main/src/mistralai/client/errors/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 69 of 131 methods.*
-* [`ObservabilityError`](https://github.com/mistralai/client-python/blob/main/src/mistralai/client/errors/observabilityerror.py): Bad Request - Invalid request parameters or data. Applicable to 41 of 131 methods.*
+* [`HTTPValidationError`](https://github.com/mistralai/client-python/blob/main/src/mistralai/client/errors/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 105 of 172 methods.*
+* [`ObservabilityError`](https://github.com/mistralai/client-python/blob/main/src/mistralai/client/errors/observabilityerror.py): Bad Request - Invalid request parameters or data. Applicable to 41 of 172 methods.*
* [`ResponseValidationError`](https://github.com/mistralai/client-python/blob/main/src/mistralai/client/errors/responsevalidationerror.py): Type mismatch between the response data and the expected Pydantic model. Provides access to the Pydantic validation error via the `cause` attribute.
diff --git a/README.md b/README.md
index 46ad3c76..4c64fb09 100644
--- a/README.md
+++ b/README.md
@@ -36,6 +36,7 @@ Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create yo
* [Providers' SDKs Example Usage](#providers-sdks-example-usage)
* [Available Resources and Operations](#available-resources-and-operations)
* [Server-sent event streaming](#server-sent-event-streaming)
+ * [Pagination](#pagination)
* [File uploads](#file-uploads)
* [Retries](#retries)
* [Error Handling](#error-handling)
@@ -490,6 +491,7 @@ print(res.choices[0].message.content)
* [list](docs/sdks/batchjobs/README.md#list) - Get Batch Jobs
* [create](docs/sdks/batchjobs/README.md#create) - Create Batch Job
* [get](docs/sdks/batchjobs/README.md#get) - Get Batch Job
+* [delete](docs/sdks/batchjobs/README.md#delete) - Delete Batch Job
* [cancel](docs/sdks/batchjobs/README.md#cancel) - Cancel Batch Job
### [Beta.Agents](docs/sdks/betaagents/README.md)
@@ -633,6 +635,13 @@ print(res.choices[0].message.content)
* [create](docs/sdks/embeddings/README.md#create) - Embeddings
+### [Events](docs/sdks/events/README.md)
+
+* [receive_workflow_event](docs/sdks/events/README.md#receive_workflow_event) - Receive Workflow Event
+* [receive_workflow_events_batch](docs/sdks/events/README.md#receive_workflow_events_batch) - Receive Workflow Events Batch
+* [get_stream_events](docs/sdks/events/README.md#get_stream_events) - Get Stream Events
+* [get_workflow_events](docs/sdks/events/README.md#get_workflow_events) - Get Workflow Events
+
### [Files](docs/sdks/files/README.md)
* [upload](docs/sdks/files/README.md#upload) - Upload File
@@ -668,6 +677,63 @@ print(res.choices[0].message.content)
* [process](docs/sdks/ocr/README.md#process) - OCR
+### [Workflows](docs/sdks/workflows/README.md)
+
+* [get_workflows](docs/sdks/workflows/README.md#get_workflows) - Get Workflows
+* [get_workflow_registrations](docs/sdks/workflows/README.md#get_workflow_registrations) - Get Workflow Registrations
+* [execute_workflow](docs/sdks/workflows/README.md#execute_workflow) - Execute Workflow
+* [~~execute_workflow_registration~~](docs/sdks/workflows/README.md#execute_workflow_registration) - Execute Workflow Registration :warning: **Deprecated**
+* [get_workflow](docs/sdks/workflows/README.md#get_workflow) - Get Workflow
+* [update_workflow](docs/sdks/workflows/README.md#update_workflow) - Update Workflow
+* [get_workflow_registration](docs/sdks/workflows/README.md#get_workflow_registration) - Get Workflow Registration
+* [archive_workflow](docs/sdks/workflows/README.md#archive_workflow) - Archive Workflow
+* [unarchive_workflow](docs/sdks/workflows/README.md#unarchive_workflow) - Unarchive Workflow
+
+#### [Workflows.Deployments](docs/sdks/deployments/README.md)
+
+* [list_deployments](docs/sdks/deployments/README.md#list_deployments) - List Deployments
+* [get_deployment](docs/sdks/deployments/README.md#get_deployment) - Get Deployment
+
+#### [Workflows.Events](docs/sdks/workflowsevents/README.md)
+
+* [receive_workflow_event](docs/sdks/workflowsevents/README.md#receive_workflow_event) - Receive Workflow Event
+* [receive_workflow_events_batch](docs/sdks/workflowsevents/README.md#receive_workflow_events_batch) - Receive Workflow Events Batch
+* [get_stream_events](docs/sdks/workflowsevents/README.md#get_stream_events) - Get Stream Events
+* [get_workflow_events](docs/sdks/workflowsevents/README.md#get_workflow_events) - Get Workflow Events
+
+#### [Workflows.Executions](docs/sdks/executions/README.md)
+
+* [get_workflow_execution](docs/sdks/executions/README.md#get_workflow_execution) - Get Workflow Execution
+* [get_workflow_execution_history](docs/sdks/executions/README.md#get_workflow_execution_history) - Get Workflow Execution History
+* [signal_workflow_execution](docs/sdks/executions/README.md#signal_workflow_execution) - Signal Workflow Execution
+* [query_workflow_execution](docs/sdks/executions/README.md#query_workflow_execution) - Query Workflow Execution
+* [terminate_workflow_execution](docs/sdks/executions/README.md#terminate_workflow_execution) - Terminate Workflow Execution
+* [batch_terminate_workflow_executions](docs/sdks/executions/README.md#batch_terminate_workflow_executions) - Batch Terminate Workflow Executions
+* [cancel_workflow_execution](docs/sdks/executions/README.md#cancel_workflow_execution) - Cancel Workflow Execution
+* [batch_cancel_workflow_executions](docs/sdks/executions/README.md#batch_cancel_workflow_executions) - Batch Cancel Workflow Executions
+* [reset_workflow](docs/sdks/executions/README.md#reset_workflow) - Reset Workflow
+* [update_workflow_execution](docs/sdks/executions/README.md#update_workflow_execution) - Update Workflow Execution
+* [get_workflow_execution_trace_otel](docs/sdks/executions/README.md#get_workflow_execution_trace_otel) - Get Workflow Execution Trace Otel
+* [get_workflow_execution_trace_summary](docs/sdks/executions/README.md#get_workflow_execution_trace_summary) - Get Workflow Execution Trace Summary
+* [get_workflow_execution_trace_events](docs/sdks/executions/README.md#get_workflow_execution_trace_events) - Get Workflow Execution Trace Events
+* [stream](docs/sdks/executions/README.md#stream) - Stream
+
+#### [Workflows.Metrics](docs/sdks/metrics/README.md)
+
+* [get_workflow_metrics](docs/sdks/metrics/README.md#get_workflow_metrics) - Get Workflow Metrics
+
+#### [Workflows.Runs](docs/sdks/runs/README.md)
+
+* [list_runs](docs/sdks/runs/README.md#list_runs) - List Runs
+* [get_run](docs/sdks/runs/README.md#get_run) - Get Run
+* [get_run_history](docs/sdks/runs/README.md#get_run_history) - Get Run History
+
+#### [Workflows.Schedules](docs/sdks/schedules/README.md)
+
+* [get_schedules](docs/sdks/schedules/README.md#get_schedules) - Get Schedules
+* [schedule_workflow](docs/sdks/schedules/README.md#schedule_workflow) - Schedule Workflow
+* [unschedule_workflow](docs/sdks/schedules/README.md#unschedule_workflow) - Unschedule Workflow
+
@@ -708,6 +774,33 @@ with Mistral(
[context-manager]: https://book.pythontips.com/en/latest/context_managers.html
+
+## Pagination
+
+Some of the endpoints in this SDK support pagination. To use pagination, you make your SDK calls as usual, but the
+returned response object will have a `Next` method that can be called to pull down the next group of results. If the
+return value of `Next` is `None`, then there are no more pages to be fetched.
+
+Here's an example of one such pagination call:
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.get_workflows(active_only=False, include_shared=True, limit=50)
+
+ while res is not None:
+ # Handle items
+
+ res = res.next()
+
+```
+
+
## File uploads
@@ -851,8 +944,8 @@ with Mistral(
**Inherit from [`MistralError`](./src/mistralai/client/errors/mistralerror.py)**:
-* [`HTTPValidationError`](./src/mistralai/client/errors/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 69 of 131 methods.*
-* [`ObservabilityError`](./src/mistralai/client/errors/observabilityerror.py): Bad Request - Invalid request parameters or data. Applicable to 41 of 131 methods.*
+* [`HTTPValidationError`](./src/mistralai/client/errors/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 105 of 172 methods.*
+* [`ObservabilityError`](./src/mistralai/client/errors/observabilityerror.py): Bad Request - Invalid request parameters or data. Applicable to 41 of 172 methods.*
* [`ResponseValidationError`](./src/mistralai/client/errors/responsevalidationerror.py): Type mismatch between the response data and the expected Pydantic model. Provides access to the Pydantic validation error via the `cause` attribute.
diff --git a/RELEASES.md b/RELEASES.md
index 954b2675..ed266e46 100644
--- a/RELEASES.md
+++ b/RELEASES.md
@@ -488,4 +488,14 @@ Based on:
### Generated
- [python v2.1.3] .
### Releases
-- [PyPI v2.1.3] https://pypi.org/project/mistralai/2.1.3 - .
\ No newline at end of file
+- [PyPI v2.1.3] https://pypi.org/project/mistralai/2.1.3 - .
+
+## 2026-03-30 14:56:34
+### Changes
+Based on:
+- OpenAPI Doc
+- Speakeasy CLI 1.754.0 (2.862.0) https://github.com/speakeasy-api/speakeasy
+### Generated
+- [python v2.2.0rc1] .
+### Releases
+- [PyPI v2.2.0rc1] https://pypi.org/project/mistralai/2.2.0rc1 - .
\ No newline at end of file
diff --git a/docs/models/activitytaskcompletedattributesrequest.md b/docs/models/activitytaskcompletedattributesrequest.md
new file mode 100644
index 00000000..5050b558
--- /dev/null
+++ b/docs/models/activitytaskcompletedattributesrequest.md
@@ -0,0 +1,12 @@
+# ActivityTaskCompletedAttributesRequest
+
+Attributes for activity task completed events.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- |
+| `task_id` | *str* | :heavy_check_mark: | Unique identifier for the activity task within the workflow. |
+| `activity_name` | *str* | :heavy_check_mark: | The registered name of the activity being executed. |
+| `result` | [models.JSONPayloadRequest](../models/jsonpayloadrequest.md) | :heavy_check_mark: | A payload containing arbitrary JSON data.
Used for complete state snapshots or final results. |
\ No newline at end of file
diff --git a/docs/models/activitytaskcompletedattributesresponse.md b/docs/models/activitytaskcompletedattributesresponse.md
new file mode 100644
index 00000000..5baaffa6
--- /dev/null
+++ b/docs/models/activitytaskcompletedattributesresponse.md
@@ -0,0 +1,12 @@
+# ActivityTaskCompletedAttributesResponse
+
+Attributes for activity task completed events.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- |
+| `task_id` | *str* | :heavy_check_mark: | Unique identifier for the activity task within the workflow. |
+| `activity_name` | *str* | :heavy_check_mark: | The registered name of the activity being executed. |
+| `result` | [models.JSONPayloadResponse](../models/jsonpayloadresponse.md) | :heavy_check_mark: | A payload containing arbitrary JSON data.
Used for complete state snapshots or final results. |
\ No newline at end of file
diff --git a/docs/models/activitytaskcompletedrequest.md b/docs/models/activitytaskcompletedrequest.md
new file mode 100644
index 00000000..9008bbf1
--- /dev/null
+++ b/docs/models/activitytaskcompletedrequest.md
@@ -0,0 +1,20 @@
+# ActivityTaskCompletedRequest
+
+Emitted when an activity task completes successfully.
+
+Contains timing information about the successful execution.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *Optional[int]* | :heavy_minus_sign: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["ACTIVITY_TASK_COMPLETED"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.ActivityTaskCompletedAttributesRequest](../models/activitytaskcompletedattributesrequest.md) | :heavy_check_mark: | Attributes for activity task completed events. |
\ No newline at end of file
diff --git a/docs/models/activitytaskcompletedresponse.md b/docs/models/activitytaskcompletedresponse.md
new file mode 100644
index 00000000..56a64631
--- /dev/null
+++ b/docs/models/activitytaskcompletedresponse.md
@@ -0,0 +1,20 @@
+# ActivityTaskCompletedResponse
+
+Emitted when an activity task completes successfully.
+
+Contains timing information about the successful execution.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *int* | :heavy_check_mark: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *Nullable[str]* | :heavy_check_mark: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["ACTIVITY_TASK_COMPLETED"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.ActivityTaskCompletedAttributesResponse](../models/activitytaskcompletedattributesresponse.md) | :heavy_check_mark: | Attributes for activity task completed events. |
\ No newline at end of file
diff --git a/docs/models/activitytaskfailedattributes.md b/docs/models/activitytaskfailedattributes.md
new file mode 100644
index 00000000..5cb9cafe
--- /dev/null
+++ b/docs/models/activitytaskfailedattributes.md
@@ -0,0 +1,13 @@
+# ActivityTaskFailedAttributes
+
+Attributes for activity task failed events (final failure after all retries).
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- |
+| `task_id` | *str* | :heavy_check_mark: | Unique identifier for the activity task within the workflow. |
+| `activity_name` | *str* | :heavy_check_mark: | The registered name of the activity being executed. |
+| `attempt` | *int* | :heavy_check_mark: | The final attempt number that failed (1-indexed). |
+| `failure` | [models.Failure](../models/failure.md) | :heavy_check_mark: | Represents an error or exception that occurred during execution. |
\ No newline at end of file
diff --git a/docs/models/activitytaskfailedrequest.md b/docs/models/activitytaskfailedrequest.md
new file mode 100644
index 00000000..1db1d90a
--- /dev/null
+++ b/docs/models/activitytaskfailedrequest.md
@@ -0,0 +1,20 @@
+# ActivityTaskFailedRequest
+
+Emitted when an activity task fails after exhausting all retry attempts.
+
+This is a terminal event indicating the activity could not complete successfully.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *Optional[int]* | :heavy_minus_sign: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["ACTIVITY_TASK_FAILED"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.ActivityTaskFailedAttributes](../models/activitytaskfailedattributes.md) | :heavy_check_mark: | Attributes for activity task failed events (final failure after all retries). |
\ No newline at end of file
diff --git a/docs/models/activitytaskfailedresponse.md b/docs/models/activitytaskfailedresponse.md
new file mode 100644
index 00000000..0853f68d
--- /dev/null
+++ b/docs/models/activitytaskfailedresponse.md
@@ -0,0 +1,20 @@
+# ActivityTaskFailedResponse
+
+Emitted when an activity task fails after exhausting all retry attempts.
+
+This is a terminal event indicating the activity could not complete successfully.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *int* | :heavy_check_mark: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *Nullable[str]* | :heavy_check_mark: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["ACTIVITY_TASK_FAILED"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.ActivityTaskFailedAttributes](../models/activitytaskfailedattributes.md) | :heavy_check_mark: | Attributes for activity task failed events (final failure after all retries). |
\ No newline at end of file
diff --git a/docs/models/activitytaskretryingattributes.md b/docs/models/activitytaskretryingattributes.md
new file mode 100644
index 00000000..92166076
--- /dev/null
+++ b/docs/models/activitytaskretryingattributes.md
@@ -0,0 +1,13 @@
+# ActivityTaskRetryingAttributes
+
+Attributes for activity task retrying events.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- |
+| `task_id` | *str* | :heavy_check_mark: | Unique identifier for the activity task within the workflow. |
+| `activity_name` | *str* | :heavy_check_mark: | The registered name of the activity being executed. |
+| `attempt` | *int* | :heavy_check_mark: | The attempt number that failed (1-indexed). |
+| `failure` | [models.Failure](../models/failure.md) | :heavy_check_mark: | Represents an error or exception that occurred during execution. |
\ No newline at end of file
diff --git a/docs/models/activitytaskretryingrequest.md b/docs/models/activitytaskretryingrequest.md
new file mode 100644
index 00000000..fb6e27b5
--- /dev/null
+++ b/docs/models/activitytaskretryingrequest.md
@@ -0,0 +1,20 @@
+# ActivityTaskRetryingRequest
+
+Emitted when an activity task fails and will be retried.
+
+Contains information about the failed attempt and the error that occurred.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *Optional[int]* | :heavy_minus_sign: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["ACTIVITY_TASK_RETRYING"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.ActivityTaskRetryingAttributes](../models/activitytaskretryingattributes.md) | :heavy_check_mark: | Attributes for activity task retrying events. |
\ No newline at end of file
diff --git a/docs/models/activitytaskretryingresponse.md b/docs/models/activitytaskretryingresponse.md
new file mode 100644
index 00000000..8b7bdd42
--- /dev/null
+++ b/docs/models/activitytaskretryingresponse.md
@@ -0,0 +1,20 @@
+# ActivityTaskRetryingResponse
+
+Emitted when an activity task fails and will be retried.
+
+Contains information about the failed attempt and the error that occurred.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *int* | :heavy_check_mark: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *Nullable[str]* | :heavy_check_mark: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["ACTIVITY_TASK_RETRYING"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.ActivityTaskRetryingAttributes](../models/activitytaskretryingattributes.md) | :heavy_check_mark: | Attributes for activity task retrying events. |
\ No newline at end of file
diff --git a/docs/models/activitytaskstartedattributesrequest.md b/docs/models/activitytaskstartedattributesrequest.md
new file mode 100644
index 00000000..e264b20b
--- /dev/null
+++ b/docs/models/activitytaskstartedattributesrequest.md
@@ -0,0 +1,12 @@
+# ActivityTaskStartedAttributesRequest
+
+Attributes for activity task started events.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- |
+| `task_id` | *str* | :heavy_check_mark: | Unique identifier for the activity task within the workflow. |
+| `activity_name` | *str* | :heavy_check_mark: | The registered name of the activity being executed. |
+| `input` | [models.JSONPayloadRequest](../models/jsonpayloadrequest.md) | :heavy_check_mark: | A payload containing arbitrary JSON data.
Used for complete state snapshots or final results. |
\ No newline at end of file
diff --git a/docs/models/activitytaskstartedattributesresponse.md b/docs/models/activitytaskstartedattributesresponse.md
new file mode 100644
index 00000000..5d1f59a8
--- /dev/null
+++ b/docs/models/activitytaskstartedattributesresponse.md
@@ -0,0 +1,12 @@
+# ActivityTaskStartedAttributesResponse
+
+Attributes for activity task started events.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- |
+| `task_id` | *str* | :heavy_check_mark: | Unique identifier for the activity task within the workflow. |
+| `activity_name` | *str* | :heavy_check_mark: | The registered name of the activity being executed. |
+| `input` | [models.JSONPayloadResponse](../models/jsonpayloadresponse.md) | :heavy_check_mark: | A payload containing arbitrary JSON data.
Used for complete state snapshots or final results. |
\ No newline at end of file
diff --git a/docs/models/activitytaskstartedrequest.md b/docs/models/activitytaskstartedrequest.md
new file mode 100644
index 00000000..02421624
--- /dev/null
+++ b/docs/models/activitytaskstartedrequest.md
@@ -0,0 +1,21 @@
+# ActivityTaskStartedRequest
+
+Emitted when an activity task begins execution.
+
+This is the first event for an activity, emitted on the first attempt only.
+Subsequent retry attempts emit ACTIVITY_TASK_RETRYING instead.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *Optional[int]* | :heavy_minus_sign: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["ACTIVITY_TASK_STARTED"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.ActivityTaskStartedAttributesRequest](../models/activitytaskstartedattributesrequest.md) | :heavy_check_mark: | Attributes for activity task started events. |
\ No newline at end of file
diff --git a/docs/models/activitytaskstartedresponse.md b/docs/models/activitytaskstartedresponse.md
new file mode 100644
index 00000000..1aee1533
--- /dev/null
+++ b/docs/models/activitytaskstartedresponse.md
@@ -0,0 +1,21 @@
+# ActivityTaskStartedResponse
+
+Emitted when an activity task begins execution.
+
+This is the first event for an activity, emitted on the first attempt only.
+Subsequent retry attempts emit ACTIVITY_TASK_RETRYING instead.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *int* | :heavy_check_mark: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *Nullable[str]* | :heavy_check_mark: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["ACTIVITY_TASK_STARTED"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.ActivityTaskStartedAttributesResponse](../models/activitytaskstartedattributesresponse.md) | :heavy_check_mark: | Attributes for activity task started events. |
\ No newline at end of file
diff --git a/docs/models/archiveworkflowv1workflowsworkflowidentifierarchiveputrequest.md b/docs/models/archiveworkflowv1workflowsworkflowidentifierarchiveputrequest.md
new file mode 100644
index 00000000..6ebe540a
--- /dev/null
+++ b/docs/models/archiveworkflowv1workflowsworkflowidentifierarchiveputrequest.md
@@ -0,0 +1,8 @@
+# ArchiveWorkflowV1WorkflowsWorkflowIdentifierArchivePutRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------- | --------------------- | --------------------- | --------------------- |
+| `workflow_identifier` | *str* | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/basefielddefinition.md b/docs/models/basefielddefinition.md
index 3f7abea9..3d721d91 100644
--- a/docs/models/basefielddefinition.md
+++ b/docs/models/basefielddefinition.md
@@ -3,10 +3,10 @@
## Fields
-| Field | Type | Required | Description |
-| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- |
-| `name` | *str* | :heavy_check_mark: | N/A |
-| `label` | *str* | :heavy_check_mark: | N/A |
-| `type` | [models.TypeEnum](../models/typeenum.md) | :heavy_check_mark: | N/A |
-| `group` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A |
-| `supported_operators` | List[[models.SupportedOperator](../models/supportedoperator.md)] | :heavy_check_mark: | N/A |
\ No newline at end of file
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- |
+| `name` | *str* | :heavy_check_mark: | N/A |
+| `label` | *str* | :heavy_check_mark: | N/A |
+| `type` | [models.BaseFieldDefinitionType](../models/basefielddefinitiontype.md) | :heavy_check_mark: | N/A |
+| `group` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A |
+| `supported_operators` | List[[models.SupportedOperator](../models/supportedoperator.md)] | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/typeenum.md b/docs/models/basefielddefinitiontype.md
similarity index 67%
rename from docs/models/typeenum.md
rename to docs/models/basefielddefinitiontype.md
index ec47cf96..efdff2a4 100644
--- a/docs/models/typeenum.md
+++ b/docs/models/basefielddefinitiontype.md
@@ -1,12 +1,12 @@
-# TypeEnum
+# BaseFieldDefinitionType
## Example Usage
```python
-from mistralai.client.models import TypeEnum
+from mistralai.client.models import BaseFieldDefinitionType
# Open enum: unrecognized values are captured as UnrecognizedStr
-value: TypeEnum = "ENUM"
+value: BaseFieldDefinitionType = "ENUM"
```
diff --git a/docs/models/batchexecutionbody.md b/docs/models/batchexecutionbody.md
new file mode 100644
index 00000000..9c31e334
--- /dev/null
+++ b/docs/models/batchexecutionbody.md
@@ -0,0 +1,8 @@
+# BatchExecutionBody
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------- | -------------------------------- | -------------------------------- | -------------------------------- |
+| `execution_ids` | List[*str*] | :heavy_check_mark: | List of execution IDs to process |
\ No newline at end of file
diff --git a/docs/models/batchexecutionresponse.md b/docs/models/batchexecutionresponse.md
new file mode 100644
index 00000000..394b6d86
--- /dev/null
+++ b/docs/models/batchexecutionresponse.md
@@ -0,0 +1,8 @@
+# BatchExecutionResponse
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------- | --------------------------------------------------------------------------- | --------------------------------------------------------------------------- | --------------------------------------------------------------------------- |
+| `results` | Dict[str, [models.BatchExecutionResult](../models/batchexecutionresult.md)] | :heavy_minus_sign: | Mapping of execution_id to result with status and optional error message |
\ No newline at end of file
diff --git a/docs/models/batchexecutionresult.md b/docs/models/batchexecutionresult.md
new file mode 100644
index 00000000..57107bba
--- /dev/null
+++ b/docs/models/batchexecutionresult.md
@@ -0,0 +1,9 @@
+# BatchExecutionResult
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ----------------------------------------- | ----------------------------------------- | ----------------------------------------- | ----------------------------------------- |
+| `status` | *str* | :heavy_check_mark: | Status of the operation (success/failure) |
+| `error` | *OptionalNullable[str]* | :heavy_minus_sign: | Error message if operation failed |
\ No newline at end of file
diff --git a/docs/models/cancelworkflowexecutionv1workflowsexecutionsexecutionidcancelpostrequest.md b/docs/models/cancelworkflowexecutionv1workflowsexecutionsexecutionidcancelpostrequest.md
new file mode 100644
index 00000000..4968d664
--- /dev/null
+++ b/docs/models/cancelworkflowexecutionv1workflowsexecutionsexecutionidcancelpostrequest.md
@@ -0,0 +1,8 @@
+# CancelWorkflowExecutionV1WorkflowsExecutionsExecutionIDCancelPostRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------ | ------------------ | ------------------ | ------------------ |
+| `execution_id` | *str* | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/classifierfinetunedmodel.md b/docs/models/classifierfinetunedmodel.md
index ad05f931..9fa69cbb 100644
--- a/docs/models/classifierfinetunedmodel.md
+++ b/docs/models/classifierfinetunedmodel.md
@@ -18,6 +18,6 @@
| `capabilities` | [models.FineTunedModelCapabilities](../models/finetunedmodelcapabilities.md) | :heavy_check_mark: | N/A |
| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A |
| `aliases` | List[*str*] | :heavy_minus_sign: | N/A |
-| `job` | *str* | :heavy_check_mark: | N/A |
+| `job` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A |
| `classifier_targets` | List[[models.ClassifierTargetResult](../models/classifiertargetresult.md)] | :heavy_check_mark: | N/A |
| `model_type` | *Literal["classifier"]* | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/completionfinetunedmodel.md b/docs/models/completionfinetunedmodel.md
index 0055db02..26bcdb17 100644
--- a/docs/models/completionfinetunedmodel.md
+++ b/docs/models/completionfinetunedmodel.md
@@ -18,5 +18,5 @@
| `capabilities` | [models.FineTunedModelCapabilities](../models/finetunedmodelcapabilities.md) | :heavy_check_mark: | N/A |
| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A |
| `aliases` | List[*str*] | :heavy_minus_sign: | N/A |
-| `job` | *str* | :heavy_check_mark: | N/A |
+| `job` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A |
| `model_type` | *Literal["completion"]* | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/connectorcalltoolv1request.md b/docs/models/connectorcalltoolv1request.md
index cdda08cf..dea7083d 100644
--- a/docs/models/connectorcalltoolv1request.md
+++ b/docs/models/connectorcalltoolv1request.md
@@ -6,5 +6,6 @@
| Field | Type | Required | Description |
| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ |
| `tool_name` | *str* | :heavy_check_mark: | N/A |
+| `credentials_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A |
| `connector_id_or_name` | *str* | :heavy_check_mark: | N/A |
| `connector_call_tool_request` | [models.ConnectorCallToolRequest](../models/connectorcalltoolrequest.md) | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/customtaskcanceledattributes.md b/docs/models/customtaskcanceledattributes.md
new file mode 100644
index 00000000..3a5facdd
--- /dev/null
+++ b/docs/models/customtaskcanceledattributes.md
@@ -0,0 +1,12 @@
+# CustomTaskCanceledAttributes
+
+Attributes for custom task canceled events.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- |
+| `custom_task_id` | *str* | :heavy_check_mark: | Unique identifier for the custom task within the workflow. |
+| `custom_task_type` | *str* | :heavy_check_mark: | The type/category of the custom task (e.g., 'llm_call', 'api_request'). |
+| `reason` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional reason provided for the cancellation. |
\ No newline at end of file
diff --git a/docs/models/customtaskcanceledrequest.md b/docs/models/customtaskcanceledrequest.md
new file mode 100644
index 00000000..4a313ab1
--- /dev/null
+++ b/docs/models/customtaskcanceledrequest.md
@@ -0,0 +1,20 @@
+# CustomTaskCanceledRequest
+
+Emitted when a custom task is canceled.
+
+Indicates the task was explicitly stopped before completion.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *Optional[int]* | :heavy_minus_sign: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["CUSTOM_TASK_CANCELED"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.CustomTaskCanceledAttributes](../models/customtaskcanceledattributes.md) | :heavy_check_mark: | Attributes for custom task canceled events. |
\ No newline at end of file
diff --git a/docs/models/customtaskcanceledresponse.md b/docs/models/customtaskcanceledresponse.md
new file mode 100644
index 00000000..0bcee227
--- /dev/null
+++ b/docs/models/customtaskcanceledresponse.md
@@ -0,0 +1,20 @@
+# CustomTaskCanceledResponse
+
+Emitted when a custom task is canceled.
+
+Indicates the task was explicitly stopped before completion.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *int* | :heavy_check_mark: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *Nullable[str]* | :heavy_check_mark: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["CUSTOM_TASK_CANCELED"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.CustomTaskCanceledAttributes](../models/customtaskcanceledattributes.md) | :heavy_check_mark: | Attributes for custom task canceled events. |
\ No newline at end of file
diff --git a/docs/models/customtaskcompletedattributesrequest.md b/docs/models/customtaskcompletedattributesrequest.md
new file mode 100644
index 00000000..43671ef9
--- /dev/null
+++ b/docs/models/customtaskcompletedattributesrequest.md
@@ -0,0 +1,12 @@
+# CustomTaskCompletedAttributesRequest
+
+Attributes for custom task completed events.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- |
+| `custom_task_id` | *str* | :heavy_check_mark: | Unique identifier for the custom task within the workflow. |
+| `custom_task_type` | *str* | :heavy_check_mark: | The type/category of the custom task (e.g., 'llm_call', 'api_request'). |
+| `payload` | [models.JSONPayloadRequest](../models/jsonpayloadrequest.md) | :heavy_check_mark: | A payload containing arbitrary JSON data.
Used for complete state snapshots or final results. |
\ No newline at end of file
diff --git a/docs/models/customtaskcompletedattributesresponse.md b/docs/models/customtaskcompletedattributesresponse.md
new file mode 100644
index 00000000..a6a136e7
--- /dev/null
+++ b/docs/models/customtaskcompletedattributesresponse.md
@@ -0,0 +1,12 @@
+# CustomTaskCompletedAttributesResponse
+
+Attributes for custom task completed events.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- |
+| `custom_task_id` | *str* | :heavy_check_mark: | Unique identifier for the custom task within the workflow. |
+| `custom_task_type` | *str* | :heavy_check_mark: | The type/category of the custom task (e.g., 'llm_call', 'api_request'). |
+| `payload` | [models.JSONPayloadResponse](../models/jsonpayloadresponse.md) | :heavy_check_mark: | A payload containing arbitrary JSON data.
Used for complete state snapshots or final results. |
\ No newline at end of file
diff --git a/docs/models/customtaskcompletedrequest.md b/docs/models/customtaskcompletedrequest.md
new file mode 100644
index 00000000..90ec92a7
--- /dev/null
+++ b/docs/models/customtaskcompletedrequest.md
@@ -0,0 +1,20 @@
+# CustomTaskCompletedRequest
+
+Emitted when a custom task completes successfully.
+
+Contains the final result of the task execution.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *Optional[int]* | :heavy_minus_sign: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["CUSTOM_TASK_COMPLETED"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.CustomTaskCompletedAttributesRequest](../models/customtaskcompletedattributesrequest.md) | :heavy_check_mark: | Attributes for custom task completed events. |
\ No newline at end of file
diff --git a/docs/models/customtaskcompletedresponse.md b/docs/models/customtaskcompletedresponse.md
new file mode 100644
index 00000000..6871184f
--- /dev/null
+++ b/docs/models/customtaskcompletedresponse.md
@@ -0,0 +1,20 @@
+# CustomTaskCompletedResponse
+
+Emitted when a custom task completes successfully.
+
+Contains the final result of the task execution.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *int* | :heavy_check_mark: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *Nullable[str]* | :heavy_check_mark: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["CUSTOM_TASK_COMPLETED"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.CustomTaskCompletedAttributesResponse](../models/customtaskcompletedattributesresponse.md) | :heavy_check_mark: | Attributes for custom task completed events. |
\ No newline at end of file
diff --git a/docs/models/customtaskfailedattributes.md b/docs/models/customtaskfailedattributes.md
new file mode 100644
index 00000000..f4e3ab2d
--- /dev/null
+++ b/docs/models/customtaskfailedattributes.md
@@ -0,0 +1,12 @@
+# CustomTaskFailedAttributes
+
+Attributes for custom task failed events.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- |
+| `custom_task_id` | *str* | :heavy_check_mark: | Unique identifier for the custom task within the workflow. |
+| `custom_task_type` | *str* | :heavy_check_mark: | The type/category of the custom task (e.g., 'llm_call', 'api_request'). |
+| `failure` | [models.Failure](../models/failure.md) | :heavy_check_mark: | Represents an error or exception that occurred during execution. |
\ No newline at end of file
diff --git a/docs/models/customtaskfailedrequest.md b/docs/models/customtaskfailedrequest.md
new file mode 100644
index 00000000..ddfd79a6
--- /dev/null
+++ b/docs/models/customtaskfailedrequest.md
@@ -0,0 +1,20 @@
+# CustomTaskFailedRequest
+
+Emitted when a custom task fails.
+
+Contains details about the failure for debugging and error handling.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *Optional[int]* | :heavy_minus_sign: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["CUSTOM_TASK_FAILED"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.CustomTaskFailedAttributes](../models/customtaskfailedattributes.md) | :heavy_check_mark: | Attributes for custom task failed events. |
\ No newline at end of file
diff --git a/docs/models/customtaskfailedresponse.md b/docs/models/customtaskfailedresponse.md
new file mode 100644
index 00000000..e14e4afa
--- /dev/null
+++ b/docs/models/customtaskfailedresponse.md
@@ -0,0 +1,20 @@
+# CustomTaskFailedResponse
+
+Emitted when a custom task fails.
+
+Contains details about the failure for debugging and error handling.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *int* | :heavy_check_mark: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *Nullable[str]* | :heavy_check_mark: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["CUSTOM_TASK_FAILED"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.CustomTaskFailedAttributes](../models/customtaskfailedattributes.md) | :heavy_check_mark: | Attributes for custom task failed events. |
\ No newline at end of file
diff --git a/docs/models/customtaskinprogressattributesrequest.md b/docs/models/customtaskinprogressattributesrequest.md
new file mode 100644
index 00000000..6241f13a
--- /dev/null
+++ b/docs/models/customtaskinprogressattributesrequest.md
@@ -0,0 +1,12 @@
+# CustomTaskInProgressAttributesRequest
+
+Attributes for custom task in-progress events with streaming updates.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- |
+| `custom_task_id` | *str* | :heavy_check_mark: | Unique identifier for the custom task within the workflow. |
+| `custom_task_type` | *str* | :heavy_check_mark: | The type/category of the custom task (e.g., 'llm_call', 'api_request'). |
+| `payload` | [models.CustomTaskInProgressAttributesRequestPayload](../models/customtaskinprogressattributesrequestpayload.md) | :heavy_check_mark: | The current state or incremental update for the task. |
\ No newline at end of file
diff --git a/docs/models/customtaskinprogressattributesrequestpayload.md b/docs/models/customtaskinprogressattributesrequestpayload.md
new file mode 100644
index 00000000..776898ba
--- /dev/null
+++ b/docs/models/customtaskinprogressattributesrequestpayload.md
@@ -0,0 +1,19 @@
+# CustomTaskInProgressAttributesRequestPayload
+
+The current state or incremental update for the task.
+
+
+## Supported Types
+
+### `models.JSONPayloadRequest`
+
+```python
+value: models.JSONPayloadRequest = /* values here */
+```
+
+### `models.JSONPatchPayloadRequest`
+
+```python
+value: models.JSONPatchPayloadRequest = /* values here */
+```
+
diff --git a/docs/models/customtaskinprogressattributesresponse.md b/docs/models/customtaskinprogressattributesresponse.md
new file mode 100644
index 00000000..311e4fb5
--- /dev/null
+++ b/docs/models/customtaskinprogressattributesresponse.md
@@ -0,0 +1,12 @@
+# CustomTaskInProgressAttributesResponse
+
+Attributes for custom task in-progress events with streaming updates.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------ |
+| `custom_task_id` | *str* | :heavy_check_mark: | Unique identifier for the custom task within the workflow. |
+| `custom_task_type` | *str* | :heavy_check_mark: | The type/category of the custom task (e.g., 'llm_call', 'api_request'). |
+| `payload` | [models.CustomTaskInProgressAttributesResponsePayload](../models/customtaskinprogressattributesresponsepayload.md) | :heavy_check_mark: | The current state or incremental update for the task. |
\ No newline at end of file
diff --git a/docs/models/customtaskinprogressattributesresponsepayload.md b/docs/models/customtaskinprogressattributesresponsepayload.md
new file mode 100644
index 00000000..96e46022
--- /dev/null
+++ b/docs/models/customtaskinprogressattributesresponsepayload.md
@@ -0,0 +1,19 @@
+# CustomTaskInProgressAttributesResponsePayload
+
+The current state or incremental update for the task.
+
+
+## Supported Types
+
+### `models.JSONPayloadResponse`
+
+```python
+value: models.JSONPayloadResponse = /* values here */
+```
+
+### `models.JSONPatchPayloadResponse`
+
+```python
+value: models.JSONPatchPayloadResponse = /* values here */
+```
+
diff --git a/docs/models/customtaskinprogressrequest.md b/docs/models/customtaskinprogressrequest.md
new file mode 100644
index 00000000..4e324363
--- /dev/null
+++ b/docs/models/customtaskinprogressrequest.md
@@ -0,0 +1,21 @@
+# CustomTaskInProgressRequest
+
+Emitted during custom task execution to report progress.
+
+This event supports streaming updates via JSON or JSON Patch payloads,
+enabling real-time progress tracking for long-running tasks.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *Optional[int]* | :heavy_minus_sign: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["CUSTOM_TASK_IN_PROGRESS"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.CustomTaskInProgressAttributesRequest](../models/customtaskinprogressattributesrequest.md) | :heavy_check_mark: | Attributes for custom task in-progress events with streaming updates. |
\ No newline at end of file
diff --git a/docs/models/customtaskinprogressresponse.md b/docs/models/customtaskinprogressresponse.md
new file mode 100644
index 00000000..72afa91f
--- /dev/null
+++ b/docs/models/customtaskinprogressresponse.md
@@ -0,0 +1,21 @@
+# CustomTaskInProgressResponse
+
+Emitted during custom task execution to report progress.
+
+This event supports streaming updates via JSON or JSON Patch payloads,
+enabling real-time progress tracking for long-running tasks.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *int* | :heavy_check_mark: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *Nullable[str]* | :heavy_check_mark: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["CUSTOM_TASK_IN_PROGRESS"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.CustomTaskInProgressAttributesResponse](../models/customtaskinprogressattributesresponse.md) | :heavy_check_mark: | Attributes for custom task in-progress events with streaming updates. |
\ No newline at end of file
diff --git a/docs/models/customtaskstartedattributesrequest.md b/docs/models/customtaskstartedattributesrequest.md
new file mode 100644
index 00000000..ff8d50ec
--- /dev/null
+++ b/docs/models/customtaskstartedattributesrequest.md
@@ -0,0 +1,12 @@
+# CustomTaskStartedAttributesRequest
+
+Attributes for custom task started events.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- |
+| `custom_task_id` | *str* | :heavy_check_mark: | Unique identifier for the custom task within the workflow. |
+| `custom_task_type` | *str* | :heavy_check_mark: | The type/category of the custom task (e.g., 'llm_call', 'api_request'). |
+| `payload` | [Optional[models.JSONPayloadRequest]](../models/jsonpayloadrequest.md) | :heavy_minus_sign: | A payload containing arbitrary JSON data.
Used for complete state snapshots or final results. |
\ No newline at end of file
diff --git a/docs/models/customtaskstartedattributesresponse.md b/docs/models/customtaskstartedattributesresponse.md
new file mode 100644
index 00000000..8bf43423
--- /dev/null
+++ b/docs/models/customtaskstartedattributesresponse.md
@@ -0,0 +1,12 @@
+# CustomTaskStartedAttributesResponse
+
+Attributes for custom task started events.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- |
+| `custom_task_id` | *str* | :heavy_check_mark: | Unique identifier for the custom task within the workflow. |
+| `custom_task_type` | *str* | :heavy_check_mark: | The type/category of the custom task (e.g., 'llm_call', 'api_request'). |
+| `payload` | [Optional[models.JSONPayloadResponse]](../models/jsonpayloadresponse.md) | :heavy_minus_sign: | A payload containing arbitrary JSON data.
Used for complete state snapshots or final results. |
\ No newline at end of file
diff --git a/docs/models/customtaskstartedrequest.md b/docs/models/customtaskstartedrequest.md
new file mode 100644
index 00000000..aa4dc293
--- /dev/null
+++ b/docs/models/customtaskstartedrequest.md
@@ -0,0 +1,21 @@
+# CustomTaskStartedRequest
+
+Emitted when a custom task begins execution.
+
+Custom tasks represent user-defined units of work within a workflow,
+such as LLM calls, API requests, or data processing steps.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *Optional[int]* | :heavy_minus_sign: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["CUSTOM_TASK_STARTED"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.CustomTaskStartedAttributesRequest](../models/customtaskstartedattributesrequest.md) | :heavy_check_mark: | Attributes for custom task started events. |
\ No newline at end of file
diff --git a/docs/models/customtaskstartedresponse.md b/docs/models/customtaskstartedresponse.md
new file mode 100644
index 00000000..80b1b08b
--- /dev/null
+++ b/docs/models/customtaskstartedresponse.md
@@ -0,0 +1,21 @@
+# CustomTaskStartedResponse
+
+Emitted when a custom task begins execution.
+
+Custom tasks represent user-defined units of work within a workflow,
+such as LLM calls, API requests, or data processing steps.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *int* | :heavy_check_mark: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *Nullable[str]* | :heavy_check_mark: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["CUSTOM_TASK_STARTED"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.CustomTaskStartedAttributesResponse](../models/customtaskstartedattributesresponse.md) | :heavy_check_mark: | Attributes for custom task started events. |
\ No newline at end of file
diff --git a/docs/models/customtasktimedoutattributes.md b/docs/models/customtasktimedoutattributes.md
new file mode 100644
index 00000000..b302b076
--- /dev/null
+++ b/docs/models/customtasktimedoutattributes.md
@@ -0,0 +1,12 @@
+# CustomTaskTimedOutAttributes
+
+Attributes for custom task timed out events.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- |
+| `custom_task_id` | *str* | :heavy_check_mark: | Unique identifier for the custom task within the workflow. |
+| `custom_task_type` | *str* | :heavy_check_mark: | The type/category of the custom task (e.g., 'llm_call', 'api_request'). |
+| `timeout_type` | *OptionalNullable[str]* | :heavy_minus_sign: | The type of timeout that occurred. |
\ No newline at end of file
diff --git a/docs/models/customtasktimedoutrequest.md b/docs/models/customtasktimedoutrequest.md
new file mode 100644
index 00000000..5be6c0af
--- /dev/null
+++ b/docs/models/customtasktimedoutrequest.md
@@ -0,0 +1,20 @@
+# CustomTaskTimedOutRequest
+
+Emitted when a custom task exceeds its timeout.
+
+Indicates the task did not complete within its configured time limit.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *Optional[int]* | :heavy_minus_sign: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["CUSTOM_TASK_TIMED_OUT"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.CustomTaskTimedOutAttributes](../models/customtasktimedoutattributes.md) | :heavy_check_mark: | Attributes for custom task timed out events. |
\ No newline at end of file
diff --git a/docs/models/customtasktimedoutresponse.md b/docs/models/customtasktimedoutresponse.md
new file mode 100644
index 00000000..75d49743
--- /dev/null
+++ b/docs/models/customtasktimedoutresponse.md
@@ -0,0 +1,20 @@
+# CustomTaskTimedOutResponse
+
+Emitted when a custom task exceeds its timeout.
+
+Indicates the task did not complete within its configured time limit.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *int* | :heavy_check_mark: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *Nullable[str]* | :heavy_check_mark: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["CUSTOM_TASK_TIMED_OUT"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.CustomTaskTimedOutAttributes](../models/customtasktimedoutattributes.md) | :heavy_check_mark: | Attributes for custom task timed out events. |
\ No newline at end of file
diff --git a/docs/models/deletebatchjobresponse.md b/docs/models/deletebatchjobresponse.md
new file mode 100644
index 00000000..d0df2d19
--- /dev/null
+++ b/docs/models/deletebatchjobresponse.md
@@ -0,0 +1,10 @@
+# DeleteBatchJobResponse
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------- | ---------------------------- | ---------------------------- | ---------------------------- |
+| `id` | *str* | :heavy_check_mark: | N/A |
+| `object` | *Optional[Literal["batch"]]* | :heavy_minus_sign: | N/A |
+| `deleted` | *Optional[bool]* | :heavy_minus_sign: | N/A |
\ No newline at end of file
diff --git a/docs/models/deploymentdetailresponse.md b/docs/models/deploymentdetailresponse.md
new file mode 100644
index 00000000..e1d84e11
--- /dev/null
+++ b/docs/models/deploymentdetailresponse.md
@@ -0,0 +1,13 @@
+# DeploymentDetailResponse
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ |
+| `id` | *str* | :heavy_check_mark: | Unique identifier of the deployment |
+| `name` | *str* | :heavy_check_mark: | Deployment name |
+| `is_active` | *bool* | :heavy_check_mark: | Whether at least one worker is currently live |
+| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | When the deployment was first registered |
+| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | When the deployment was last updated |
+| `workers` | List[[models.DeploymentWorkerResponse](../models/deploymentworkerresponse.md)] | :heavy_check_mark: | Workers registered for the deployment |
\ No newline at end of file
diff --git a/docs/models/deploymentlistresponse.md b/docs/models/deploymentlistresponse.md
new file mode 100644
index 00000000..22868512
--- /dev/null
+++ b/docs/models/deploymentlistresponse.md
@@ -0,0 +1,8 @@
+# DeploymentListResponse
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ |
+| `deployments` | List[[models.DeploymentResponse](../models/deploymentresponse.md)] | :heavy_check_mark: | List of deployments |
\ No newline at end of file
diff --git a/docs/models/deploymentresponse.md b/docs/models/deploymentresponse.md
new file mode 100644
index 00000000..f6f12836
--- /dev/null
+++ b/docs/models/deploymentresponse.md
@@ -0,0 +1,12 @@
+# DeploymentResponse
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- |
+| `id` | *str* | :heavy_check_mark: | Unique identifier of the deployment |
+| `name` | *str* | :heavy_check_mark: | Deployment name |
+| `is_active` | *bool* | :heavy_check_mark: | Whether at least one worker is currently live |
+| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | When the deployment was first registered |
+| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | When the deployment was last updated |
\ No newline at end of file
diff --git a/docs/models/deploymentworkerresponse.md b/docs/models/deploymentworkerresponse.md
new file mode 100644
index 00000000..9b76278e
--- /dev/null
+++ b/docs/models/deploymentworkerresponse.md
@@ -0,0 +1,10 @@
+# DeploymentWorkerResponse
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- |
+| `name` | *str* | :heavy_check_mark: | Worker name |
+| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | When the worker first registered |
+| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | When the worker last registered |
\ No newline at end of file
diff --git a/docs/models/encodedpayloadoptions.md b/docs/models/encodedpayloadoptions.md
new file mode 100644
index 00000000..15367ce6
--- /dev/null
+++ b/docs/models/encodedpayloadoptions.md
@@ -0,0 +1,15 @@
+# EncodedPayloadOptions
+
+## Example Usage
+
+```python
+from mistralai.client.models import EncodedPayloadOptions
+value: EncodedPayloadOptions = "offloaded"
+```
+
+
+## Values
+
+- `"offloaded"`
+- `"encrypted"`
+- `"encrypted-partial"`
diff --git a/docs/models/eventprogressstatus.md b/docs/models/eventprogressstatus.md
new file mode 100644
index 00000000..02033178
--- /dev/null
+++ b/docs/models/eventprogressstatus.md
@@ -0,0 +1,19 @@
+# EventProgressStatus
+
+## Example Usage
+
+```python
+from mistralai.client.models import EventProgressStatus
+
+# Open enum: unrecognized values are captured as UnrecognizedStr
+value: EventProgressStatus = "RUNNING"
+```
+
+
+## Values
+
+This is an open enum. Unrecognized values will not fail type checks.
+
+- `"RUNNING"`
+- `"COMPLETED"`
+- `"FAILED"`
diff --git a/docs/models/eventsource.md b/docs/models/eventsource.md
new file mode 100644
index 00000000..ac53ccf7
--- /dev/null
+++ b/docs/models/eventsource.md
@@ -0,0 +1,14 @@
+# EventSource
+
+## Example Usage
+
+```python
+from mistralai.client.models import EventSource
+value: EventSource = "DATABASE"
+```
+
+
+## Values
+
+- `"DATABASE"`
+- `"LIVE"`
diff --git a/docs/models/eventtype.md b/docs/models/eventtype.md
new file mode 100644
index 00000000..6add793e
--- /dev/null
+++ b/docs/models/eventtype.md
@@ -0,0 +1,18 @@
+# EventType
+
+## Example Usage
+
+```python
+from mistralai.client.models import EventType
+
+# Open enum: unrecognized values are captured as UnrecognizedStr
+value: EventType = "EVENT"
+```
+
+
+## Values
+
+This is an open enum. Unrecognized values will not fail type checks.
+
+- `"EVENT"`
+- `"EVENT_PROGRESS"`
diff --git a/docs/models/executeworkflowregistrationv1workflowsregistrationsworkflowregistrationidexecutepostrequest.md b/docs/models/executeworkflowregistrationv1workflowsregistrationsworkflowregistrationidexecutepostrequest.md
new file mode 100644
index 00000000..54de12a2
--- /dev/null
+++ b/docs/models/executeworkflowregistrationv1workflowsregistrationsworkflowregistrationidexecutepostrequest.md
@@ -0,0 +1,9 @@
+# ExecuteWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDExecutePostRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ |
+| `workflow_registration_id` | *str* | :heavy_check_mark: | N/A |
+| `workflow_execution_request` | [models.WorkflowExecutionRequest](../models/workflowexecutionrequest.md) | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/executeworkflowv1workflowsworkflowidentifierexecutepostrequest.md b/docs/models/executeworkflowv1workflowsworkflowidentifierexecutepostrequest.md
new file mode 100644
index 00000000..29c55a77
--- /dev/null
+++ b/docs/models/executeworkflowv1workflowsworkflowidentifierexecutepostrequest.md
@@ -0,0 +1,9 @@
+# ExecuteWorkflowV1WorkflowsWorkflowIdentifierExecutePostRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ |
+| `workflow_identifier` | *str* | :heavy_check_mark: | N/A |
+| `workflow_execution_request` | [models.WorkflowExecutionRequest](../models/workflowexecutionrequest.md) | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/failure.md b/docs/models/failure.md
new file mode 100644
index 00000000..259d7bcf
--- /dev/null
+++ b/docs/models/failure.md
@@ -0,0 +1,10 @@
+# Failure
+
+Represents an error or exception that occurred during execution.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | -------------------------------------------- |
+| `message` | *str* | :heavy_check_mark: | A human-readable description of the failure. |
\ No newline at end of file
diff --git a/docs/models/ftmodelcard.md b/docs/models/ftmodelcard.md
index f65ff6e8..0381dd81 100644
--- a/docs/models/ftmodelcard.md
+++ b/docs/models/ftmodelcard.md
@@ -20,6 +20,6 @@ Extra fields for fine-tuned models.
| `deprecation_replacement_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A |
| `default_model_temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A |
| `type` | *Literal["fine-tuned"]* | :heavy_check_mark: | N/A |
-| `job` | *str* | :heavy_check_mark: | N/A |
+| `job` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A |
| `root` | *str* | :heavy_check_mark: | N/A |
| `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A |
\ No newline at end of file
diff --git a/docs/models/getdeploymentv1workflowsdeploymentsnamegetrequest.md b/docs/models/getdeploymentv1workflowsdeploymentsnamegetrequest.md
new file mode 100644
index 00000000..6af056b1
--- /dev/null
+++ b/docs/models/getdeploymentv1workflowsdeploymentsnamegetrequest.md
@@ -0,0 +1,8 @@
+# GetDeploymentV1WorkflowsDeploymentsNameGetRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------ | ------------------ | ------------------ | ------------------ |
+| `name` | *str* | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/getrunhistoryv1workflowsrunsrunidhistorygetrequest.md b/docs/models/getrunhistoryv1workflowsrunsrunidhistorygetrequest.md
new file mode 100644
index 00000000..8d4af732
--- /dev/null
+++ b/docs/models/getrunhistoryv1workflowsrunsrunidhistorygetrequest.md
@@ -0,0 +1,8 @@
+# GetRunHistoryV1WorkflowsRunsRunIDHistoryGetRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------ | ------------------ | ------------------ | ------------------ |
+| `run_id` | *str* | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/getrunv1workflowsrunsrunidgetrequest.md b/docs/models/getrunv1workflowsrunsrunidgetrequest.md
new file mode 100644
index 00000000..6b033690
--- /dev/null
+++ b/docs/models/getrunv1workflowsrunsrunidgetrequest.md
@@ -0,0 +1,8 @@
+# GetRunV1WorkflowsRunsRunIDGetRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------ | ------------------ | ------------------ | ------------------ |
+| `run_id` | *str* | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/getstreameventsv1workflowseventsstreamgetrequest.md b/docs/models/getstreameventsv1workflowseventsstreamgetrequest.md
new file mode 100644
index 00000000..9b5ae174
--- /dev/null
+++ b/docs/models/getstreameventsv1workflowseventsstreamgetrequest.md
@@ -0,0 +1,19 @@
+# GetStreamEventsV1WorkflowsEventsStreamGetRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- |
+| `scope` | [Optional[models.Scope]](../models/scope.md) | :heavy_minus_sign: | N/A |
+| `activity_name` | *Optional[str]* | :heavy_minus_sign: | N/A |
+| `activity_id` | *Optional[str]* | :heavy_minus_sign: | N/A |
+| `workflow_name` | *Optional[str]* | :heavy_minus_sign: | N/A |
+| `workflow_exec_id` | *Optional[str]* | :heavy_minus_sign: | N/A |
+| `root_workflow_exec_id` | *Optional[str]* | :heavy_minus_sign: | N/A |
+| `parent_workflow_exec_id` | *Optional[str]* | :heavy_minus_sign: | N/A |
+| `stream` | *Optional[str]* | :heavy_minus_sign: | N/A |
+| `start_seq` | *Optional[int]* | :heavy_minus_sign: | N/A |
+| `metadata_filters` | Dict[str, *Any*] | :heavy_minus_sign: | N/A |
+| `workflow_event_types` | List[[models.WorkflowEventType](../models/workfloweventtype.md)] | :heavy_minus_sign: | N/A |
+| `last_event_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A |
\ No newline at end of file
diff --git a/docs/models/getstreameventsv1workflowseventsstreamgetresponsebody.md b/docs/models/getstreameventsv1workflowseventsstreamgetresponsebody.md
new file mode 100644
index 00000000..18f9b639
--- /dev/null
+++ b/docs/models/getstreameventsv1workflowseventsstreamgetresponsebody.md
@@ -0,0 +1,13 @@
+# GetStreamEventsV1WorkflowsEventsStreamGetResponseBody
+
+Stream of Server-Sent Events (SSE)
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- |
+| `event` | *Optional[str]* | :heavy_minus_sign: | N/A |
+| `data` | [Optional[models.StreamEventSsePayload]](../models/streameventssepayload.md) | :heavy_minus_sign: | N/A |
+| `id` | *Optional[str]* | :heavy_minus_sign: | N/A |
+| `retry` | *Optional[int]* | :heavy_minus_sign: | N/A |
\ No newline at end of file
diff --git a/docs/models/getworkfloweventsv1workflowseventslistgetrequest.md b/docs/models/getworkfloweventsv1workflowseventslistgetrequest.md
new file mode 100644
index 00000000..5594402c
--- /dev/null
+++ b/docs/models/getworkfloweventsv1workflowseventslistgetrequest.md
@@ -0,0 +1,12 @@
+# GetWorkflowEventsV1WorkflowsEventsListGetRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- |
+| `root_workflow_exec_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Execution ID of the root workflow that initiated this execution chain. |
+| `workflow_exec_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Run ID of the workflow that emitted this event. |
+| `limit` | *Optional[int]* | :heavy_minus_sign: | Maximum number of events to return. |
+| `cursor` | *OptionalNullable[str]* | :heavy_minus_sign: | Cursor for pagination. |
\ No newline at end of file
diff --git a/docs/models/getworkflowexecutionhistoryv1workflowsexecutionsexecutionidhistorygetrequest.md b/docs/models/getworkflowexecutionhistoryv1workflowsexecutionsexecutionidhistorygetrequest.md
new file mode 100644
index 00000000..dc63ca0a
--- /dev/null
+++ b/docs/models/getworkflowexecutionhistoryv1workflowsexecutionsexecutionidhistorygetrequest.md
@@ -0,0 +1,8 @@
+# GetWorkflowExecutionHistoryV1WorkflowsExecutionsExecutionIDHistoryGetRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------ | ------------------ | ------------------ | ------------------ |
+| `execution_id` | *str* | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/getworkflowexecutiontraceeventsrequest.md b/docs/models/getworkflowexecutiontraceeventsrequest.md
new file mode 100644
index 00000000..4a1d8021
--- /dev/null
+++ b/docs/models/getworkflowexecutiontraceeventsrequest.md
@@ -0,0 +1,10 @@
+# GetWorkflowExecutionTraceEventsRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------- | ------------------------- | ------------------------- | ------------------------- |
+| `execution_id` | *str* | :heavy_check_mark: | N/A |
+| `merge_same_id_events` | *Optional[bool]* | :heavy_minus_sign: | N/A |
+| `include_internal_events` | *Optional[bool]* | :heavy_minus_sign: | N/A |
\ No newline at end of file
diff --git a/docs/models/getworkflowexecutiontraceotelrequest.md b/docs/models/getworkflowexecutiontraceotelrequest.md
new file mode 100644
index 00000000..8c4f9be7
--- /dev/null
+++ b/docs/models/getworkflowexecutiontraceotelrequest.md
@@ -0,0 +1,8 @@
+# GetWorkflowExecutionTraceOtelRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------ | ------------------ | ------------------ | ------------------ |
+| `execution_id` | *str* | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/getworkflowexecutiontracesummaryrequest.md b/docs/models/getworkflowexecutiontracesummaryrequest.md
new file mode 100644
index 00000000..a4524bbc
--- /dev/null
+++ b/docs/models/getworkflowexecutiontracesummaryrequest.md
@@ -0,0 +1,8 @@
+# GetWorkflowExecutionTraceSummaryRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------ | ------------------ | ------------------ | ------------------ |
+| `execution_id` | *str* | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/getworkflowexecutionv1workflowsexecutionsexecutionidgetrequest.md b/docs/models/getworkflowexecutionv1workflowsexecutionsexecutionidgetrequest.md
new file mode 100644
index 00000000..f9566f1c
--- /dev/null
+++ b/docs/models/getworkflowexecutionv1workflowsexecutionsexecutionidgetrequest.md
@@ -0,0 +1,8 @@
+# GetWorkflowExecutionV1WorkflowsExecutionsExecutionIDGetRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------ | ------------------ | ------------------ | ------------------ |
+| `execution_id` | *str* | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/getworkflowmetricsv1workflowsworkflownamemetricsgetrequest.md b/docs/models/getworkflowmetricsv1workflowsworkflownamemetricsgetrequest.md
new file mode 100644
index 00000000..a6e623dd
--- /dev/null
+++ b/docs/models/getworkflowmetricsv1workflowsworkflownamemetricsgetrequest.md
@@ -0,0 +1,10 @@
+# GetWorkflowMetricsV1WorkflowsWorkflowNameMetricsGetRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- |
+| `workflow_name` | *str* | :heavy_check_mark: | N/A |
+| `start_time` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | Filter workflows started after this time (ISO 8601) |
+| `end_time` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | Filter workflows started before this time (ISO 8601) |
\ No newline at end of file
diff --git a/docs/models/getworkflowregistrationsv1workflowsregistrationsgetrequest.md b/docs/models/getworkflowregistrationsv1workflowsregistrationsgetrequest.md
new file mode 100644
index 00000000..ee724445
--- /dev/null
+++ b/docs/models/getworkflowregistrationsv1workflowsregistrationsgetrequest.md
@@ -0,0 +1,17 @@
+# GetWorkflowRegistrationsV1WorkflowsRegistrationsGetRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- |
+| `workflow_id` | *OptionalNullable[str]* | :heavy_minus_sign: | The workflow ID to filter by |
+| `task_queue` | *OptionalNullable[str]* | :heavy_minus_sign: | The task queue to filter by |
+| `active_only` | *Optional[bool]* | :heavy_minus_sign: | Whether to only return active workflows versions |
+| `include_shared` | *Optional[bool]* | :heavy_minus_sign: | Whether to include shared workflow versions |
+| `workflow_search` | *OptionalNullable[str]* | :heavy_minus_sign: | The workflow name to filter by |
+| `archived` | *OptionalNullable[bool]* | :heavy_minus_sign: | Filter by archived state. False=exclude archived, True=only archived, None=include all |
+| `with_workflow` | *Optional[bool]* | :heavy_minus_sign: | Whether to include the workflow definition |
+| `available_in_chat_assistant` | *OptionalNullable[bool]* | :heavy_minus_sign: | Whether to only return workflows compatible with chat assistant |
+| `limit` | *Optional[int]* | :heavy_minus_sign: | The maximum number of workflows versions to return |
+| `cursor` | *OptionalNullable[str]* | :heavy_minus_sign: | The cursor for pagination |
\ No newline at end of file
diff --git a/docs/models/getworkflowregistrationv1workflowsregistrationsworkflowregistrationidgetrequest.md b/docs/models/getworkflowregistrationv1workflowsregistrationsworkflowregistrationidgetrequest.md
new file mode 100644
index 00000000..ec43ad56
--- /dev/null
+++ b/docs/models/getworkflowregistrationv1workflowsregistrationsworkflowregistrationidgetrequest.md
@@ -0,0 +1,10 @@
+# GetWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDGetRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------------- | ------------------------------------------- | ------------------------------------------- | ------------------------------------------- |
+| `workflow_registration_id` | *str* | :heavy_check_mark: | N/A |
+| `with_workflow` | *Optional[bool]* | :heavy_minus_sign: | Whether to include the workflow definition |
+| `include_shared` | *Optional[bool]* | :heavy_minus_sign: | Whether to include shared workflow versions |
\ No newline at end of file
diff --git a/docs/models/getworkflowsv1workflowsgetrequest.md b/docs/models/getworkflowsv1workflowsgetrequest.md
new file mode 100644
index 00000000..c83ed6d1
--- /dev/null
+++ b/docs/models/getworkflowsv1workflowsgetrequest.md
@@ -0,0 +1,13 @@
+# GetWorkflowsV1WorkflowsGetRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- |
+| `active_only` | *Optional[bool]* | :heavy_minus_sign: | Whether to only return active workflows |
+| `include_shared` | *Optional[bool]* | :heavy_minus_sign: | Whether to include shared workflows |
+| `available_in_chat_assistant` | *OptionalNullable[bool]* | :heavy_minus_sign: | Whether to only return workflows compatible with chat assistant |
+| `archived` | *OptionalNullable[bool]* | :heavy_minus_sign: | Filter by archived state. False=exclude archived, True=only archived, None=include all |
+| `cursor` | *OptionalNullable[str]* | :heavy_minus_sign: | The cursor for pagination |
+| `limit` | *Optional[int]* | :heavy_minus_sign: | The maximum number of workflows to return |
\ No newline at end of file
diff --git a/docs/models/getworkflowsv1workflowsgetresponse.md b/docs/models/getworkflowsv1workflowsgetresponse.md
new file mode 100644
index 00000000..6ec4f4e7
--- /dev/null
+++ b/docs/models/getworkflowsv1workflowsgetresponse.md
@@ -0,0 +1,8 @@
+# GetWorkflowsV1WorkflowsGetResponse
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- |
+| `result` | [models.WorkflowListResponse](../models/workflowlistresponse.md) | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/getworkflowv1workflowsworkflowidentifiergetrequest.md b/docs/models/getworkflowv1workflowsworkflowidentifiergetrequest.md
new file mode 100644
index 00000000..2f5c2b4d
--- /dev/null
+++ b/docs/models/getworkflowv1workflowsworkflowidentifiergetrequest.md
@@ -0,0 +1,8 @@
+# GetWorkflowV1WorkflowsWorkflowIdentifierGetRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------- | --------------------- | --------------------- | --------------------- |
+| `workflow_identifier` | *str* | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/jobsapiroutesbatchdeletebatchjobrequest.md b/docs/models/jobsapiroutesbatchdeletebatchjobrequest.md
new file mode 100644
index 00000000..02f1ec24
--- /dev/null
+++ b/docs/models/jobsapiroutesbatchdeletebatchjobrequest.md
@@ -0,0 +1,8 @@
+# JobsAPIRoutesBatchDeleteBatchJobRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------ | ------------------ | ------------------ | ------------------ |
+| `job_id` | *str* | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/jsonpatchadd.md b/docs/models/jsonpatchadd.md
new file mode 100644
index 00000000..2a091c8a
--- /dev/null
+++ b/docs/models/jsonpatchadd.md
@@ -0,0 +1,10 @@
+# JSONPatchAdd
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `path` | *str* | :heavy_check_mark: | A JSON Pointer (RFC 6901) identifying the target location within the document. Can be a string path (e.g., '/foo/bar'), '/', '', or an empty list [] for root-level operations. |
+| `value` | *Any* | :heavy_check_mark: | The value to use for the operation |
+| `op` | *Literal["add"]* | :heavy_check_mark: | Add operation |
\ No newline at end of file
diff --git a/docs/models/jsonpatchappend.md b/docs/models/jsonpatchappend.md
new file mode 100644
index 00000000..684cc896
--- /dev/null
+++ b/docs/models/jsonpatchappend.md
@@ -0,0 +1,10 @@
+# JSONPatchAppend
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `path` | *str* | :heavy_check_mark: | A JSON Pointer (RFC 6901) identifying the target location within the document. Can be a string path (e.g., '/foo/bar'), '/', '', or an empty list [] for root-level operations. |
+| `value` | *str* | :heavy_check_mark: | The value to use for the operation. A string to append to the existing value |
+| `op` | *Literal["append"]* | :heavy_check_mark: | 'append' is an extension for efficient string concatenation in streaming scenarios. |
\ No newline at end of file
diff --git a/docs/models/jsonpatchpayloadrequest.md b/docs/models/jsonpatchpayloadrequest.md
new file mode 100644
index 00000000..fb4da675
--- /dev/null
+++ b/docs/models/jsonpatchpayloadrequest.md
@@ -0,0 +1,13 @@
+# JSONPatchPayloadRequest
+
+A payload containing a list of JSON Patch operations.
+
+Used for streaming incremental updates to workflow state.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- |
+| `type` | *Literal["json_patch"]* | :heavy_check_mark: | Discriminator indicating this is a JSON Patch payload. |
+| `value` | List[[models.JSONPatchPayloadRequestValue](../models/jsonpatchpayloadrequestvalue.md)] | :heavy_check_mark: | The list of JSON Patch operations to apply in order. |
\ No newline at end of file
diff --git a/docs/models/jsonpatchpayloadrequestvalue.md b/docs/models/jsonpatchpayloadrequestvalue.md
new file mode 100644
index 00000000..d58a1323
--- /dev/null
+++ b/docs/models/jsonpatchpayloadrequestvalue.md
@@ -0,0 +1,29 @@
+# JSONPatchPayloadRequestValue
+
+
+## Supported Types
+
+### `models.JSONPatchAdd`
+
+```python
+value: models.JSONPatchAdd = /* values here */
+```
+
+### `models.JSONPatchAppend`
+
+```python
+value: models.JSONPatchAppend = /* values here */
+```
+
+### `models.JSONPatchRemove`
+
+```python
+value: models.JSONPatchRemove = /* values here */
+```
+
+### `models.JSONPatchReplace`
+
+```python
+value: models.JSONPatchReplace = /* values here */
+```
+
diff --git a/docs/models/jsonpatchpayloadresponse.md b/docs/models/jsonpatchpayloadresponse.md
new file mode 100644
index 00000000..ce9dbd90
--- /dev/null
+++ b/docs/models/jsonpatchpayloadresponse.md
@@ -0,0 +1,13 @@
+# JSONPatchPayloadResponse
+
+A payload containing a list of JSON Patch operations.
+
+Used for streaming incremental updates to workflow state.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- |
+| `type` | *Literal["json_patch"]* | :heavy_check_mark: | Discriminator indicating this is a JSON Patch payload. |
+| `value` | List[[models.JSONPatchPayloadResponseValue](../models/jsonpatchpayloadresponsevalue.md)] | :heavy_check_mark: | The list of JSON Patch operations to apply in order. |
\ No newline at end of file
diff --git a/docs/models/jsonpatchpayloadresponsevalue.md b/docs/models/jsonpatchpayloadresponsevalue.md
new file mode 100644
index 00000000..21c7d186
--- /dev/null
+++ b/docs/models/jsonpatchpayloadresponsevalue.md
@@ -0,0 +1,29 @@
+# JSONPatchPayloadResponseValue
+
+
+## Supported Types
+
+### `models.JSONPatchAdd`
+
+```python
+value: models.JSONPatchAdd = /* values here */
+```
+
+### `models.JSONPatchAppend`
+
+```python
+value: models.JSONPatchAppend = /* values here */
+```
+
+### `models.JSONPatchRemove`
+
+```python
+value: models.JSONPatchRemove = /* values here */
+```
+
+### `models.JSONPatchReplace`
+
+```python
+value: models.JSONPatchReplace = /* values here */
+```
+
diff --git a/docs/models/jsonpatchremove.md b/docs/models/jsonpatchremove.md
new file mode 100644
index 00000000..735305cf
--- /dev/null
+++ b/docs/models/jsonpatchremove.md
@@ -0,0 +1,10 @@
+# JSONPatchRemove
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `path` | *str* | :heavy_check_mark: | A JSON Pointer (RFC 6901) identifying the target location within the document. Can be a string path (e.g., '/foo/bar'), '/', '', or an empty list [] for root-level operations. |
+| `value` | *Any* | :heavy_check_mark: | The value to use for the operation |
+| `op` | *Literal["remove"]* | :heavy_check_mark: | Remove operation |
\ No newline at end of file
diff --git a/docs/models/jsonpatchreplace.md b/docs/models/jsonpatchreplace.md
new file mode 100644
index 00000000..382b7d93
--- /dev/null
+++ b/docs/models/jsonpatchreplace.md
@@ -0,0 +1,10 @@
+# JSONPatchReplace
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `path` | *str* | :heavy_check_mark: | A JSON Pointer (RFC 6901) identifying the target location within the document. Can be a string path (e.g., '/foo/bar'), '/', '', or an empty list [] for root-level operations. |
+| `value` | *Any* | :heavy_check_mark: | The value to use for the operation |
+| `op` | *Literal["replace"]* | :heavy_check_mark: | Replace operation |
\ No newline at end of file
diff --git a/docs/models/jsonpayloadrequest.md b/docs/models/jsonpayloadrequest.md
new file mode 100644
index 00000000..63ec2142
--- /dev/null
+++ b/docs/models/jsonpayloadrequest.md
@@ -0,0 +1,13 @@
+# JSONPayloadRequest
+
+A payload containing arbitrary JSON data.
+
+Used for complete state snapshots or final results.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- |
+| `type` | *Optional[Literal["json"]]* | :heavy_minus_sign: | Discriminator indicating this is a raw JSON payload. |
+| `value` | *Any* | :heavy_check_mark: | The JSON-serializable payload value. |
\ No newline at end of file
diff --git a/docs/models/jsonpayloadresponse.md b/docs/models/jsonpayloadresponse.md
new file mode 100644
index 00000000..01eaae61
--- /dev/null
+++ b/docs/models/jsonpayloadresponse.md
@@ -0,0 +1,13 @@
+# JSONPayloadResponse
+
+A payload containing arbitrary JSON data.
+
+Used for complete state snapshots or final results.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- |
+| `type` | *Optional[Literal["json"]]* | :heavy_minus_sign: | Discriminator indicating this is a raw JSON payload. |
+| `value` | *Any* | :heavy_check_mark: | The JSON-serializable payload value. |
\ No newline at end of file
diff --git a/docs/models/librarieslistv1request.md b/docs/models/librarieslistv1request.md
new file mode 100644
index 00000000..b9f99a17
--- /dev/null
+++ b/docs/models/librarieslistv1request.md
@@ -0,0 +1,9 @@
+# LibrariesListV1Request
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------ | ------------------ | ------------------ | ------------------ |
+| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A |
+| `page` | *Optional[int]* | :heavy_minus_sign: | N/A |
\ No newline at end of file
diff --git a/docs/models/listdeploymentsv1workflowsdeploymentsgetrequest.md b/docs/models/listdeploymentsv1workflowsdeploymentsgetrequest.md
new file mode 100644
index 00000000..c93c0dbb
--- /dev/null
+++ b/docs/models/listdeploymentsv1workflowsdeploymentsgetrequest.md
@@ -0,0 +1,9 @@
+# ListDeploymentsV1WorkflowsDeploymentsGetRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ----------------------- | ----------------------- | ----------------------- | ----------------------- |
+| `active_only` | *Optional[bool]* | :heavy_minus_sign: | N/A |
+| `workflow_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A |
\ No newline at end of file
diff --git a/docs/models/listlibrariesresponse.md b/docs/models/listlibrariesresponse.md
index e21b9ced..18cb63c1 100644
--- a/docs/models/listlibrariesresponse.md
+++ b/docs/models/listlibrariesresponse.md
@@ -3,6 +3,7 @@
## Fields
-| Field | Type | Required | Description |
-| -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | -------------------------------------------- |
-| `data` | List[[models.Library](../models/library.md)] | :heavy_check_mark: | N/A |
\ No newline at end of file
+| Field | Type | Required | Description |
+| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- |
+| `pagination` | [models.PaginationInfo](../models/paginationinfo.md) | :heavy_check_mark: | N/A |
+| `data` | List[[models.Library](../models/library.md)] | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/listrunsv1workflowsrunsgetrequest.md b/docs/models/listrunsv1workflowsrunsgetrequest.md
new file mode 100644
index 00000000..259876c6
--- /dev/null
+++ b/docs/models/listrunsv1workflowsrunsgetrequest.md
@@ -0,0 +1,12 @@
+# ListRunsV1WorkflowsRunsGetRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- |
+| `workflow_identifier` | *OptionalNullable[str]* | :heavy_minus_sign: | Filter by workflow name or id |
+| `search` | *OptionalNullable[str]* | :heavy_minus_sign: | Search by workflow name, display name or id |
+| `status` | [OptionalNullable[models.ListRunsV1WorkflowsRunsGetStatus]](../models/listrunsv1workflowsrunsgetstatus.md) | :heavy_minus_sign: | Filter by workflow status |
+| `page_size` | *Optional[int]* | :heavy_minus_sign: | Number of items per page |
+| `next_page_token` | *OptionalNullable[str]* | :heavy_minus_sign: | Token for the next page of results |
\ No newline at end of file
diff --git a/docs/models/listrunsv1workflowsrunsgetresponse.md b/docs/models/listrunsv1workflowsrunsgetresponse.md
new file mode 100644
index 00000000..405119b0
--- /dev/null
+++ b/docs/models/listrunsv1workflowsrunsgetresponse.md
@@ -0,0 +1,8 @@
+# ListRunsV1WorkflowsRunsGetResponse
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- |
+| `result` | [models.WorkflowExecutionListResponse](../models/workflowexecutionlistresponse.md) | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/listrunsv1workflowsrunsgetstatus.md b/docs/models/listrunsv1workflowsrunsgetstatus.md
new file mode 100644
index 00000000..e6f0d606
--- /dev/null
+++ b/docs/models/listrunsv1workflowsrunsgetstatus.md
@@ -0,0 +1,19 @@
+# ListRunsV1WorkflowsRunsGetStatus
+
+Filter by workflow status
+
+
+## Supported Types
+
+### `models.WorkflowExecutionStatus`
+
+```python
+value: models.WorkflowExecutionStatus = /* values here */
+```
+
+### `List[models.WorkflowExecutionStatus]`
+
+```python
+value: List[models.WorkflowExecutionStatus] = /* values here */
+```
+
diff --git a/docs/models/listvoicesv1audiovoicesgetrequest.md b/docs/models/listvoicesv1audiovoicesgetrequest.md
index f767d309..67c04d7c 100644
--- a/docs/models/listvoicesv1audiovoicesgetrequest.md
+++ b/docs/models/listvoicesv1audiovoicesgetrequest.md
@@ -3,7 +3,8 @@
## Fields
-| Field | Type | Required | Description |
-| ---------------------------------- | ---------------------------------- | ---------------------------------- | ---------------------------------- |
-| `limit` | *Optional[int]* | :heavy_minus_sign: | Maximum number of voices to return |
-| `offset` | *Optional[int]* | :heavy_minus_sign: | Offset for pagination |
\ No newline at end of file
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- |
+| `limit` | *Optional[int]* | :heavy_minus_sign: | Maximum number of voices to return |
+| `offset` | *Optional[int]* | :heavy_minus_sign: | Offset for pagination |
+| `type` | [Optional[models.ListVoicesV1AudioVoicesGetType]](../models/listvoicesv1audiovoicesgettype.md) | :heavy_minus_sign: | Filter the voices between customs and presets |
\ No newline at end of file
diff --git a/docs/models/listvoicesv1audiovoicesgettype.md b/docs/models/listvoicesv1audiovoicesgettype.md
new file mode 100644
index 00000000..a44f95b3
--- /dev/null
+++ b/docs/models/listvoicesv1audiovoicesgettype.md
@@ -0,0 +1,17 @@
+# ListVoicesV1AudioVoicesGetType
+
+Filter the voices between customs and presets
+
+## Example Usage
+
+```python
+from mistralai.client.models import ListVoicesV1AudioVoicesGetType
+value: ListVoicesV1AudioVoicesGetType = "all"
+```
+
+
+## Values
+
+- `"all"`
+- `"custom"`
+- `"preset"`
diff --git a/docs/models/listworkfloweventresponse.md b/docs/models/listworkfloweventresponse.md
new file mode 100644
index 00000000..72c51a16
--- /dev/null
+++ b/docs/models/listworkfloweventresponse.md
@@ -0,0 +1,9 @@
+# ListWorkflowEventResponse
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ |
+| `events` | List[[models.ListWorkflowEventResponseEvent](../models/listworkfloweventresponseevent.md)] | :heavy_check_mark: | List of workflow events. |
+| `next_cursor` | *OptionalNullable[str]* | :heavy_minus_sign: | Cursor for pagination. |
\ No newline at end of file
diff --git a/docs/models/listworkfloweventresponseevent.md b/docs/models/listworkfloweventresponseevent.md
new file mode 100644
index 00000000..2b8cb1e4
--- /dev/null
+++ b/docs/models/listworkfloweventresponseevent.md
@@ -0,0 +1,107 @@
+# ListWorkflowEventResponseEvent
+
+
+## Supported Types
+
+### `models.WorkflowExecutionStartedResponse`
+
+```python
+value: models.WorkflowExecutionStartedResponse = /* values here */
+```
+
+### `models.WorkflowExecutionCompletedResponse`
+
+```python
+value: models.WorkflowExecutionCompletedResponse = /* values here */
+```
+
+### `models.WorkflowExecutionFailedResponse`
+
+```python
+value: models.WorkflowExecutionFailedResponse = /* values here */
+```
+
+### `models.WorkflowExecutionCanceledResponse`
+
+```python
+value: models.WorkflowExecutionCanceledResponse = /* values here */
+```
+
+### `models.WorkflowExecutionContinuedAsNewResponse`
+
+```python
+value: models.WorkflowExecutionContinuedAsNewResponse = /* values here */
+```
+
+### `models.WorkflowTaskTimedOutResponse`
+
+```python
+value: models.WorkflowTaskTimedOutResponse = /* values here */
+```
+
+### `models.WorkflowTaskFailedResponse`
+
+```python
+value: models.WorkflowTaskFailedResponse = /* values here */
+```
+
+### `models.CustomTaskStartedResponse`
+
+```python
+value: models.CustomTaskStartedResponse = /* values here */
+```
+
+### `models.CustomTaskInProgressResponse`
+
+```python
+value: models.CustomTaskInProgressResponse = /* values here */
+```
+
+### `models.CustomTaskCompletedResponse`
+
+```python
+value: models.CustomTaskCompletedResponse = /* values here */
+```
+
+### `models.CustomTaskFailedResponse`
+
+```python
+value: models.CustomTaskFailedResponse = /* values here */
+```
+
+### `models.CustomTaskTimedOutResponse`
+
+```python
+value: models.CustomTaskTimedOutResponse = /* values here */
+```
+
+### `models.CustomTaskCanceledResponse`
+
+```python
+value: models.CustomTaskCanceledResponse = /* values here */
+```
+
+### `models.ActivityTaskStartedResponse`
+
+```python
+value: models.ActivityTaskStartedResponse = /* values here */
+```
+
+### `models.ActivityTaskCompletedResponse`
+
+```python
+value: models.ActivityTaskCompletedResponse = /* values here */
+```
+
+### `models.ActivityTaskRetryingResponse`
+
+```python
+value: models.ActivityTaskRetryingResponse = /* values here */
+```
+
+### `models.ActivityTaskFailedResponse`
+
+```python
+value: models.ActivityTaskFailedResponse = /* values here */
+```
+
diff --git a/docs/models/networkencodedinput.md b/docs/models/networkencodedinput.md
new file mode 100644
index 00000000..71e38615
--- /dev/null
+++ b/docs/models/networkencodedinput.md
@@ -0,0 +1,10 @@
+# NetworkEncodedInput
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ |
+| `b64payload` | *str* | :heavy_check_mark: | The encoded payload |
+| `encoding_options` | List[[models.EncodedPayloadOptions](../models/encodedpayloadoptions.md)] | :heavy_minus_sign: | The encoding of the payload |
+| `empty` | *Optional[bool]* | :heavy_minus_sign: | Whether the payload is empty |
\ No newline at end of file
diff --git a/docs/models/querydefinition.md b/docs/models/querydefinition.md
new file mode 100644
index 00000000..9d094fda
--- /dev/null
+++ b/docs/models/querydefinition.md
@@ -0,0 +1,11 @@
+# QueryDefinition
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------- | --------------------------------------- | --------------------------------------- | --------------------------------------- |
+| `name` | *str* | :heavy_check_mark: | Name of the query |
+| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the query |
+| `input_schema` | Dict[str, *Any*] | :heavy_check_mark: | Input JSON schema of the query's model |
+| `output_schema` | Dict[str, *Any*] | :heavy_minus_sign: | Output JSON schema of the query's model |
\ No newline at end of file
diff --git a/docs/models/queryinvocationbody.md b/docs/models/queryinvocationbody.md
new file mode 100644
index 00000000..3d4e5441
--- /dev/null
+++ b/docs/models/queryinvocationbody.md
@@ -0,0 +1,9 @@
+# QueryInvocationBody
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ |
+| `name` | *str* | :heavy_check_mark: | The name of the query to request |
+| `input` | [OptionalNullable[models.QueryInvocationBodyInput]](../models/queryinvocationbodyinput.md) | :heavy_minus_sign: | Input data for the query, matching its schema |
\ No newline at end of file
diff --git a/docs/models/queryinvocationbodyinput.md b/docs/models/queryinvocationbodyinput.md
new file mode 100644
index 00000000..deb0db27
--- /dev/null
+++ b/docs/models/queryinvocationbodyinput.md
@@ -0,0 +1,19 @@
+# QueryInvocationBodyInput
+
+Input data for the query, matching its schema
+
+
+## Supported Types
+
+### `models.NetworkEncodedInput`
+
+```python
+value: models.NetworkEncodedInput = /* values here */
+```
+
+### `Dict[str, Any]`
+
+```python
+value: Dict[str, Any] = /* values here */
+```
+
diff --git a/docs/models/queryworkflowexecutionv1workflowsexecutionsexecutionidqueriespostrequest.md b/docs/models/queryworkflowexecutionv1workflowsexecutionsexecutionidqueriespostrequest.md
new file mode 100644
index 00000000..0e0d378d
--- /dev/null
+++ b/docs/models/queryworkflowexecutionv1workflowsexecutionsexecutionidqueriespostrequest.md
@@ -0,0 +1,9 @@
+# QueryWorkflowExecutionV1WorkflowsExecutionsExecutionIDQueriesPostRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- |
+| `execution_id` | *str* | :heavy_check_mark: | N/A |
+| `query_invocation_body` | [models.QueryInvocationBody](../models/queryinvocationbody.md) | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/queryworkflowresponse.md b/docs/models/queryworkflowresponse.md
new file mode 100644
index 00000000..cc5b0866
--- /dev/null
+++ b/docs/models/queryworkflowresponse.md
@@ -0,0 +1,9 @@
+# QueryWorkflowResponse
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------- | ------------------------------------- | ------------------------------------- | ------------------------------------- |
+| `query_name` | *str* | :heavy_check_mark: | N/A |
+| `result` | *Any* | :heavy_check_mark: | The result of the Query workflow call |
\ No newline at end of file
diff --git a/docs/models/resetinvocationbody.md b/docs/models/resetinvocationbody.md
new file mode 100644
index 00000000..9f306e2d
--- /dev/null
+++ b/docs/models/resetinvocationbody.md
@@ -0,0 +1,11 @@
+# ResetInvocationBody
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- |
+| `event_id` | *int* | :heavy_check_mark: | The event ID to reset the workflow execution to |
+| `reason` | *OptionalNullable[str]* | :heavy_minus_sign: | Reason for resetting the workflow execution |
+| `exclude_signals` | *Optional[bool]* | :heavy_minus_sign: | Whether to exclude signals that happened after the reset point |
+| `exclude_updates` | *Optional[bool]* | :heavy_minus_sign: | Whether to exclude updates that happened after the reset point |
\ No newline at end of file
diff --git a/docs/models/resetworkflowv1workflowsexecutionsexecutionidresetpostrequest.md b/docs/models/resetworkflowv1workflowsexecutionsexecutionidresetpostrequest.md
new file mode 100644
index 00000000..418c6c5a
--- /dev/null
+++ b/docs/models/resetworkflowv1workflowsexecutionsexecutionidresetpostrequest.md
@@ -0,0 +1,9 @@
+# ResetWorkflowV1WorkflowsExecutionsExecutionIDResetPostRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- |
+| `execution_id` | *str* | :heavy_check_mark: | N/A |
+| `reset_invocation_body` | [models.ResetInvocationBody](../models/resetinvocationbody.md) | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/responseconnectorlisttoolsv1.md b/docs/models/responseconnectorlisttoolsv1.md
new file mode 100644
index 00000000..18a31c56
--- /dev/null
+++ b/docs/models/responseconnectorlisttoolsv1.md
@@ -0,0 +1,25 @@
+# ResponseConnectorListToolsV1
+
+Successful Response
+
+
+## Supported Types
+
+### `List[models.ConnectorTool]`
+
+```python
+value: List[models.ConnectorTool] = /* values here */
+```
+
+### `List[models.MCPTool]`
+
+```python
+value: List[models.MCPTool] = /* values here */
+```
+
+### `List[Dict[str, Any]]`
+
+```python
+value: List[Dict[str, Any]] = /* values here */
+```
+
diff --git a/docs/models/responseconnectorlisttoolsv11.md b/docs/models/responseconnectorlisttoolsv11.md
deleted file mode 100644
index 9b5c98c6..00000000
--- a/docs/models/responseconnectorlisttoolsv11.md
+++ /dev/null
@@ -1,17 +0,0 @@
-# ResponseConnectorListToolsV11
-
-
-## Supported Types
-
-### `models.ConnectorTool`
-
-```python
-value: models.ConnectorTool = /* values here */
-```
-
-### `models.MCPTool`
-
-```python
-value: models.MCPTool = /* values here */
-```
-
diff --git a/docs/models/responseconnectorlisttoolsv12.md b/docs/models/responseconnectorlisttoolsv12.md
deleted file mode 100644
index 0266f66d..00000000
--- a/docs/models/responseconnectorlisttoolsv12.md
+++ /dev/null
@@ -1,19 +0,0 @@
-# ResponseConnectorListToolsV12
-
-Successful Response
-
-
-## Supported Types
-
-### `List[models.ResponseConnectorListToolsV11]`
-
-```python
-value: List[models.ResponseConnectorListToolsV11] = /* values here */
-```
-
-### `List[Dict[str, Any]]`
-
-```python
-value: List[Dict[str, Any]] = /* values here */
-```
-
diff --git a/docs/models/responseexecuteworkflowregistrationv1workflowsregistrationsworkflowregistrationidexecutepost.md b/docs/models/responseexecuteworkflowregistrationv1workflowsregistrationsworkflowregistrationidexecutepost.md
new file mode 100644
index 00000000..381987b6
--- /dev/null
+++ b/docs/models/responseexecuteworkflowregistrationv1workflowsregistrationsworkflowregistrationidexecutepost.md
@@ -0,0 +1,19 @@
+# ResponseExecuteWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDExecutePost
+
+Successful Response
+
+
+## Supported Types
+
+### `models.WorkflowExecutionResponse`
+
+```python
+value: models.WorkflowExecutionResponse = /* values here */
+```
+
+### `models.WorkflowExecutionSyncResponse`
+
+```python
+value: models.WorkflowExecutionSyncResponse = /* values here */
+```
+
diff --git a/docs/models/responseexecuteworkflowv1workflowsworkflowidentifierexecutepost.md b/docs/models/responseexecuteworkflowv1workflowsworkflowidentifierexecutepost.md
new file mode 100644
index 00000000..368f2318
--- /dev/null
+++ b/docs/models/responseexecuteworkflowv1workflowsworkflowidentifierexecutepost.md
@@ -0,0 +1,19 @@
+# ResponseExecuteWorkflowV1WorkflowsWorkflowIdentifierExecutePost
+
+Successful Response
+
+
+## Supported Types
+
+### `models.WorkflowExecutionResponse`
+
+```python
+value: models.WorkflowExecutionResponse = /* values here */
+```
+
+### `models.WorkflowExecutionSyncResponse`
+
+```python
+value: models.WorkflowExecutionSyncResponse = /* values here */
+```
+
diff --git a/docs/models/scalarmetric.md b/docs/models/scalarmetric.md
new file mode 100644
index 00000000..96b93231
--- /dev/null
+++ b/docs/models/scalarmetric.md
@@ -0,0 +1,10 @@
+# ScalarMetric
+
+Scalar metric with a single value.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- |
+| `value` | [models.ScalarMetricValue](../models/scalarmetricvalue.md) | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/scalarmetricvalue.md b/docs/models/scalarmetricvalue.md
new file mode 100644
index 00000000..6723310e
--- /dev/null
+++ b/docs/models/scalarmetricvalue.md
@@ -0,0 +1,17 @@
+# ScalarMetricValue
+
+
+## Supported Types
+
+### `int`
+
+```python
+value: int = /* values here */
+```
+
+### `float`
+
+```python
+value: float = /* values here */
+```
+
diff --git a/docs/models/schedulecalendar.md b/docs/models/schedulecalendar.md
new file mode 100644
index 00000000..4166bef5
--- /dev/null
+++ b/docs/models/schedulecalendar.md
@@ -0,0 +1,15 @@
+# ScheduleCalendar
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- |
+| `second` | List[[models.ScheduleRange](../models/schedulerange.md)] | :heavy_minus_sign: | N/A |
+| `minute` | List[[models.ScheduleRange](../models/schedulerange.md)] | :heavy_minus_sign: | N/A |
+| `hour` | List[[models.ScheduleRange](../models/schedulerange.md)] | :heavy_minus_sign: | N/A |
+| `day_of_month` | List[[models.ScheduleRange](../models/schedulerange.md)] | :heavy_minus_sign: | N/A |
+| `month` | List[[models.ScheduleRange](../models/schedulerange.md)] | :heavy_minus_sign: | N/A |
+| `year` | List[[models.ScheduleRange](../models/schedulerange.md)] | :heavy_minus_sign: | N/A |
+| `day_of_week` | List[[models.ScheduleRange](../models/schedulerange.md)] | :heavy_minus_sign: | N/A |
+| `comment` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A |
\ No newline at end of file
diff --git a/docs/models/scheduledefinition.md b/docs/models/scheduledefinition.md
new file mode 100644
index 00000000..4f8f8b80
--- /dev/null
+++ b/docs/models/scheduledefinition.md
@@ -0,0 +1,25 @@
+# ScheduleDefinition
+
+Specification of the times scheduled actions may occur.
+
+The times are the union of :py:attr:`calendars`, :py:attr:`intervals`, and
+:py:attr:`cron_expressions` excluding anything in :py:attr:`skip`.
+
+Used for input where schedule_id is optional (can be provided or auto-generated).
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `input` | *Any* | :heavy_check_mark: | Input to provide to the workflow when starting it. |
+| `calendars` | List[[models.ScheduleCalendar](../models/schedulecalendar.md)] | :heavy_minus_sign: | Calendar-based specification of times. |
+| `intervals` | List[[models.ScheduleInterval](../models/scheduleinterval.md)] | :heavy_minus_sign: | Interval-based specification of times. |
+| `cron_expressions` | List[*str*] | :heavy_minus_sign: | Cron-based specification of times. |
+| `skip` | List[[models.ScheduleCalendar](../models/schedulecalendar.md)] | :heavy_minus_sign: | Set of calendar times to skip. |
+| `start_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | Time after which the first action may be run. |
+| `end_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | Time after which no more actions will be run. |
+| `jitter` | *OptionalNullable[str]* | :heavy_minus_sign: | Jitter to apply each action.
An action's scheduled time will be incremented by a random value between 0
and this value if present (but not past the next schedule).
|
+| `time_zone_name` | *OptionalNullable[str]* | :heavy_minus_sign: | IANA time zone name, for example ``US/Central``. |
+| `policy` | [Optional[models.SchedulePolicy]](../models/schedulepolicy.md) | :heavy_minus_sign: | N/A |
+| `schedule_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Unique identifier for the schedule. |
\ No newline at end of file
diff --git a/docs/models/scheduledefinitionoutput.md b/docs/models/scheduledefinitionoutput.md
new file mode 100644
index 00000000..c4937c69
--- /dev/null
+++ b/docs/models/scheduledefinitionoutput.md
@@ -0,0 +1,22 @@
+# ScheduleDefinitionOutput
+
+Output representation of a schedule with required schedule_id.
+
+Used when returning schedules from the API where schedule_id is always present.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `input` | *Any* | :heavy_check_mark: | Input to provide to the workflow when starting it. |
+| `calendars` | List[[models.ScheduleCalendar](../models/schedulecalendar.md)] | :heavy_minus_sign: | Calendar-based specification of times. |
+| `intervals` | List[[models.ScheduleInterval](../models/scheduleinterval.md)] | :heavy_minus_sign: | Interval-based specification of times. |
+| `cron_expressions` | List[*str*] | :heavy_minus_sign: | Cron-based specification of times. |
+| `skip` | List[[models.ScheduleCalendar](../models/schedulecalendar.md)] | :heavy_minus_sign: | Set of calendar times to skip. |
+| `start_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | Time after which the first action may be run. |
+| `end_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | Time after which no more actions will be run. |
+| `jitter` | *OptionalNullable[str]* | :heavy_minus_sign: | Jitter to apply each action.
An action's scheduled time will be incremented by a random value between 0
and this value if present (but not past the next schedule).
|
+| `time_zone_name` | *OptionalNullable[str]* | :heavy_minus_sign: | IANA time zone name, for example ``US/Central``. |
+| `policy` | [Optional[models.SchedulePolicy]](../models/schedulepolicy.md) | :heavy_minus_sign: | N/A |
+| `schedule_id` | *str* | :heavy_check_mark: | Unique identifier for the schedule. |
\ No newline at end of file
diff --git a/docs/models/scheduleinterval.md b/docs/models/scheduleinterval.md
new file mode 100644
index 00000000..e99d552f
--- /dev/null
+++ b/docs/models/scheduleinterval.md
@@ -0,0 +1,9 @@
+# ScheduleInterval
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ----------------------- | ----------------------- | ----------------------- | ----------------------- |
+| `every` | *str* | :heavy_check_mark: | N/A |
+| `offset` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A |
\ No newline at end of file
diff --git a/docs/models/scheduleoverlappolicy.md b/docs/models/scheduleoverlappolicy.md
new file mode 100644
index 00000000..1df8c0ea
--- /dev/null
+++ b/docs/models/scheduleoverlappolicy.md
@@ -0,0 +1,25 @@
+# ScheduleOverlapPolicy
+
+Controls what happens when a workflow would be started by a schedule but
+one is already running.
+
+## Example Usage
+
+```python
+from mistralai.client.models import ScheduleOverlapPolicy
+
+# Open enum: unrecognized values are captured as UnrecognizedInt
+value: ScheduleOverlapPolicy = 1
+```
+
+
+## Values
+
+This is an open enum. Unrecognized values will not fail type checks.
+
+- `1`
+- `2`
+- `3`
+- `4`
+- `5`
+- `6`
diff --git a/docs/models/schedulepolicy.md b/docs/models/schedulepolicy.md
new file mode 100644
index 00000000..5f079049
--- /dev/null
+++ b/docs/models/schedulepolicy.md
@@ -0,0 +1,10 @@
+# SchedulePolicy
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- |
+| `catchup_window_seconds` | *Optional[int]* | :heavy_minus_sign: | After a Temporal server is unavailable, amount of time in seconds in the past to execute missed actions. |
+| `overlap` | [Optional[models.ScheduleOverlapPolicy]](../models/scheduleoverlappolicy.md) | :heavy_minus_sign: | Controls what happens when a workflow would be started by a schedule but
one is already running. |
+| `pause_on_failure` | *Optional[bool]* | :heavy_minus_sign: | Whether to pause the schedule after a workflow failure. |
\ No newline at end of file
diff --git a/docs/models/schedulerange.md b/docs/models/schedulerange.md
new file mode 100644
index 00000000..d6cb0975
--- /dev/null
+++ b/docs/models/schedulerange.md
@@ -0,0 +1,10 @@
+# ScheduleRange
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------ | ------------------ | ------------------ | ------------------ |
+| `start` | *int* | :heavy_check_mark: | N/A |
+| `end` | *Optional[int]* | :heavy_minus_sign: | N/A |
+| `step` | *Optional[int]* | :heavy_minus_sign: | N/A |
\ No newline at end of file
diff --git a/docs/models/scope.md b/docs/models/scope.md
new file mode 100644
index 00000000..0dc29aa5
--- /dev/null
+++ b/docs/models/scope.md
@@ -0,0 +1,15 @@
+# Scope
+
+## Example Usage
+
+```python
+from mistralai.client.models import Scope
+value: Scope = "activity"
+```
+
+
+## Values
+
+- `"activity"`
+- `"workflow"`
+- `"*"`
diff --git a/docs/models/signaldefinition.md b/docs/models/signaldefinition.md
new file mode 100644
index 00000000..c1974f70
--- /dev/null
+++ b/docs/models/signaldefinition.md
@@ -0,0 +1,10 @@
+# SignalDefinition
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------- | --------------------------------------- | --------------------------------------- | --------------------------------------- |
+| `name` | *str* | :heavy_check_mark: | Name of the signal |
+| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the signal |
+| `input_schema` | Dict[str, *Any*] | :heavy_check_mark: | Input JSON schema of the signal's model |
\ No newline at end of file
diff --git a/docs/models/signalinvocationbody.md b/docs/models/signalinvocationbody.md
new file mode 100644
index 00000000..14dc23e0
--- /dev/null
+++ b/docs/models/signalinvocationbody.md
@@ -0,0 +1,9 @@
+# SignalInvocationBody
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- |
+| `name` | *str* | :heavy_check_mark: | The name of the signal to send |
+| `input` | [OptionalNullable[models.SignalInvocationBodyInput]](../models/signalinvocationbodyinput.md) | :heavy_minus_sign: | Input data for the signal, matching its schema |
\ No newline at end of file
diff --git a/docs/models/signalinvocationbodyinput.md b/docs/models/signalinvocationbodyinput.md
new file mode 100644
index 00000000..e9308bcb
--- /dev/null
+++ b/docs/models/signalinvocationbodyinput.md
@@ -0,0 +1,19 @@
+# SignalInvocationBodyInput
+
+Input data for the signal, matching its schema
+
+
+## Supported Types
+
+### `models.SignalInvocationBodyNetworkEncodedInput`
+
+```python
+value: models.SignalInvocationBodyNetworkEncodedInput = /* values here */
+```
+
+### `Dict[str, Any]`
+
+```python
+value: Dict[str, Any] = /* values here */
+```
+
diff --git a/docs/models/signalinvocationbodynetworkencodedinput.md b/docs/models/signalinvocationbodynetworkencodedinput.md
new file mode 100644
index 00000000..09368656
--- /dev/null
+++ b/docs/models/signalinvocationbodynetworkencodedinput.md
@@ -0,0 +1,11 @@
+# SignalInvocationBodyNetworkEncodedInput
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ |
+| `b64payload` | *str* | :heavy_check_mark: | The encoded payload |
+| `encoding_options` | List[[models.EncodedPayloadOptions](../models/encodedpayloadoptions.md)] | :heavy_minus_sign: | The encoding of the payload |
+| `empty` | *Optional[bool]* | :heavy_minus_sign: | Whether the payload is empty |
+| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A |
\ No newline at end of file
diff --git a/docs/models/signalworkflowexecutionv1workflowsexecutionsexecutionidsignalspostrequest.md b/docs/models/signalworkflowexecutionv1workflowsexecutionsexecutionidsignalspostrequest.md
new file mode 100644
index 00000000..2ca356d8
--- /dev/null
+++ b/docs/models/signalworkflowexecutionv1workflowsexecutionsexecutionidsignalspostrequest.md
@@ -0,0 +1,9 @@
+# SignalWorkflowExecutionV1WorkflowsExecutionsExecutionIDSignalsPostRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- |
+| `execution_id` | *str* | :heavy_check_mark: | N/A |
+| `signal_invocation_body` | [models.SignalInvocationBody](../models/signalinvocationbody.md) | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/signalworkflowresponse.md b/docs/models/signalworkflowresponse.md
new file mode 100644
index 00000000..f65d7924
--- /dev/null
+++ b/docs/models/signalworkflowresponse.md
@@ -0,0 +1,8 @@
+# SignalWorkflowResponse
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------ | ------------------ | ------------------ | ------------------ |
+| `message` | *Optional[str]* | :heavy_minus_sign: | N/A |
\ No newline at end of file
diff --git a/docs/models/streameventssepayload.md b/docs/models/streameventssepayload.md
new file mode 100644
index 00000000..6ec6726a
--- /dev/null
+++ b/docs/models/streameventssepayload.md
@@ -0,0 +1,13 @@
+# StreamEventSsePayload
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- |
+| `stream` | *str* | :heavy_check_mark: | N/A |
+| `timestamp` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A |
+| `data` | [models.StreamEventSsePayloadData](../models/streameventssepayloaddata.md) | :heavy_check_mark: | N/A |
+| `workflow_context` | [models.StreamEventWorkflowContext](../models/streameventworkflowcontext.md) | :heavy_check_mark: | N/A |
+| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A |
+| `broker_sequence` | *int* | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/streameventssepayloaddata.md b/docs/models/streameventssepayloaddata.md
new file mode 100644
index 00000000..61e65562
--- /dev/null
+++ b/docs/models/streameventssepayloaddata.md
@@ -0,0 +1,107 @@
+# StreamEventSsePayloadData
+
+
+## Supported Types
+
+### `models.WorkflowExecutionStartedResponse`
+
+```python
+value: models.WorkflowExecutionStartedResponse = /* values here */
+```
+
+### `models.WorkflowExecutionCompletedResponse`
+
+```python
+value: models.WorkflowExecutionCompletedResponse = /* values here */
+```
+
+### `models.WorkflowExecutionFailedResponse`
+
+```python
+value: models.WorkflowExecutionFailedResponse = /* values here */
+```
+
+### `models.WorkflowExecutionCanceledResponse`
+
+```python
+value: models.WorkflowExecutionCanceledResponse = /* values here */
+```
+
+### `models.WorkflowExecutionContinuedAsNewResponse`
+
+```python
+value: models.WorkflowExecutionContinuedAsNewResponse = /* values here */
+```
+
+### `models.WorkflowTaskTimedOutResponse`
+
+```python
+value: models.WorkflowTaskTimedOutResponse = /* values here */
+```
+
+### `models.WorkflowTaskFailedResponse`
+
+```python
+value: models.WorkflowTaskFailedResponse = /* values here */
+```
+
+### `models.CustomTaskStartedResponse`
+
+```python
+value: models.CustomTaskStartedResponse = /* values here */
+```
+
+### `models.CustomTaskInProgressResponse`
+
+```python
+value: models.CustomTaskInProgressResponse = /* values here */
+```
+
+### `models.CustomTaskCompletedResponse`
+
+```python
+value: models.CustomTaskCompletedResponse = /* values here */
+```
+
+### `models.CustomTaskFailedResponse`
+
+```python
+value: models.CustomTaskFailedResponse = /* values here */
+```
+
+### `models.CustomTaskTimedOutResponse`
+
+```python
+value: models.CustomTaskTimedOutResponse = /* values here */
+```
+
+### `models.CustomTaskCanceledResponse`
+
+```python
+value: models.CustomTaskCanceledResponse = /* values here */
+```
+
+### `models.ActivityTaskStartedResponse`
+
+```python
+value: models.ActivityTaskStartedResponse = /* values here */
+```
+
+### `models.ActivityTaskCompletedResponse`
+
+```python
+value: models.ActivityTaskCompletedResponse = /* values here */
+```
+
+### `models.ActivityTaskRetryingResponse`
+
+```python
+value: models.ActivityTaskRetryingResponse = /* values here */
+```
+
+### `models.ActivityTaskFailedResponse`
+
+```python
+value: models.ActivityTaskFailedResponse = /* values here */
+```
+
diff --git a/docs/models/streameventworkflowcontext.md b/docs/models/streameventworkflowcontext.md
new file mode 100644
index 00000000..098f48fd
--- /dev/null
+++ b/docs/models/streameventworkflowcontext.md
@@ -0,0 +1,12 @@
+# StreamEventWorkflowContext
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------- | ------------------------- | ------------------------- | ------------------------- |
+| `namespace` | *str* | :heavy_check_mark: | N/A |
+| `workflow_name` | *str* | :heavy_check_mark: | N/A |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | N/A |
+| `parent_workflow_exec_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A |
+| `root_workflow_exec_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A |
\ No newline at end of file
diff --git a/docs/models/streamv1workflowsexecutionsexecutionidstreamgetrequest.md b/docs/models/streamv1workflowsexecutionsexecutionidstreamgetrequest.md
new file mode 100644
index 00000000..19d02a40
--- /dev/null
+++ b/docs/models/streamv1workflowsexecutionsexecutionidstreamgetrequest.md
@@ -0,0 +1,10 @@
+# StreamV1WorkflowsExecutionsExecutionIDStreamGetRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- |
+| `execution_id` | *str* | :heavy_check_mark: | N/A |
+| `event_source` | [OptionalNullable[models.EventSource]](../models/eventsource.md) | :heavy_minus_sign: | N/A |
+| `last_event_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A |
\ No newline at end of file
diff --git a/docs/models/streamv1workflowsexecutionsexecutionidstreamgetresponsebody.md b/docs/models/streamv1workflowsexecutionsexecutionidstreamgetresponsebody.md
new file mode 100644
index 00000000..e12bb938
--- /dev/null
+++ b/docs/models/streamv1workflowsexecutionsexecutionidstreamgetresponsebody.md
@@ -0,0 +1,13 @@
+# StreamV1WorkflowsExecutionsExecutionIDStreamGetResponseBody
+
+Stream of Server-Sent Events (SSE)
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- |
+| `event` | *Optional[str]* | :heavy_minus_sign: | N/A |
+| `data` | [Optional[models.StreamEventSsePayload]](../models/streameventssepayload.md) | :heavy_minus_sign: | N/A |
+| `id` | *Optional[str]* | :heavy_minus_sign: | N/A |
+| `retry` | *Optional[int]* | :heavy_minus_sign: | N/A |
\ No newline at end of file
diff --git a/docs/models/tempogettraceresponse.md b/docs/models/tempogettraceresponse.md
new file mode 100644
index 00000000..cc4a4e93
--- /dev/null
+++ b/docs/models/tempogettraceresponse.md
@@ -0,0 +1,14 @@
+# TempoGetTraceResponse
+
+Trace response in OpenTelemetry format.
+
+This is the unified trace format used across all trace providers (Tempo, ClickHouse, etc.).
+Regardless of the underlying backend, all trace data is normalized to this Tempo-compatible
+OpenTelemetry format to ensure consistency in the API response structure.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ |
+| `batches` | List[[models.TempoTraceBatch](../models/tempotracebatch.md)] | :heavy_minus_sign: | The batches of the trace |
\ No newline at end of file
diff --git a/docs/models/tempotraceattribute.md b/docs/models/tempotraceattribute.md
new file mode 100644
index 00000000..6061df86
--- /dev/null
+++ b/docs/models/tempotraceattribute.md
@@ -0,0 +1,9 @@
+# TempoTraceAttribute
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ |
+| `key` | *str* | :heavy_check_mark: | The key of the attribute |
+| `value` | [models.TempoTraceAttributeValue](../models/tempotraceattributevalue.md) | :heavy_check_mark: | The value of the attribute |
\ No newline at end of file
diff --git a/docs/models/tempotraceattributeboolvalue.md b/docs/models/tempotraceattributeboolvalue.md
new file mode 100644
index 00000000..07b77e43
--- /dev/null
+++ b/docs/models/tempotraceattributeboolvalue.md
@@ -0,0 +1,8 @@
+# TempoTraceAttributeBoolValue
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------- | ---------------------------------- | ---------------------------------- | ---------------------------------- |
+| `bool_value` | *bool* | :heavy_check_mark: | The boolean value of the attribute |
\ No newline at end of file
diff --git a/docs/models/tempotraceattributeintvalue.md b/docs/models/tempotraceattributeintvalue.md
new file mode 100644
index 00000000..5c47f1eb
--- /dev/null
+++ b/docs/models/tempotraceattributeintvalue.md
@@ -0,0 +1,8 @@
+# TempoTraceAttributeIntValue
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------- | ---------------------------------- | ---------------------------------- | ---------------------------------- |
+| `int_value` | *str* | :heavy_check_mark: | The integer value of the attribute |
\ No newline at end of file
diff --git a/docs/models/tempotraceattributestringvalue.md b/docs/models/tempotraceattributestringvalue.md
new file mode 100644
index 00000000..61e7eb8a
--- /dev/null
+++ b/docs/models/tempotraceattributestringvalue.md
@@ -0,0 +1,8 @@
+# TempoTraceAttributeStringValue
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------- | --------------------------------- | --------------------------------- | --------------------------------- |
+| `string_value` | *str* | :heavy_check_mark: | The string value of the attribute |
\ No newline at end of file
diff --git a/docs/models/tempotraceattributevalue.md b/docs/models/tempotraceattributevalue.md
new file mode 100644
index 00000000..eb448c3b
--- /dev/null
+++ b/docs/models/tempotraceattributevalue.md
@@ -0,0 +1,25 @@
+# TempoTraceAttributeValue
+
+The value of the attribute
+
+
+## Supported Types
+
+### `models.TempoTraceAttributeStringValue`
+
+```python
+value: models.TempoTraceAttributeStringValue = /* values here */
+```
+
+### `models.TempoTraceAttributeIntValue`
+
+```python
+value: models.TempoTraceAttributeIntValue = /* values here */
+```
+
+### `models.TempoTraceAttributeBoolValue`
+
+```python
+value: models.TempoTraceAttributeBoolValue = /* values here */
+```
+
diff --git a/docs/models/tempotracebatch.md b/docs/models/tempotracebatch.md
new file mode 100644
index 00000000..1e7779a2
--- /dev/null
+++ b/docs/models/tempotracebatch.md
@@ -0,0 +1,9 @@
+# TempoTraceBatch
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- |
+| `resource` | [models.TempoTraceResource](../models/tempotraceresource.md) | :heavy_check_mark: | N/A |
+| `scope_spans` | List[[models.TempoTraceScopeSpan](../models/tempotracescopespan.md)] | :heavy_minus_sign: | The spans of the scope |
\ No newline at end of file
diff --git a/docs/models/tempotraceevent.md b/docs/models/tempotraceevent.md
new file mode 100644
index 00000000..e456fa7c
--- /dev/null
+++ b/docs/models/tempotraceevent.md
@@ -0,0 +1,10 @@
+# TempoTraceEvent
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- |
+| `name` | *str* | :heavy_check_mark: | The name of the event |
+| `time_unix_nano` | *str* | :heavy_check_mark: | The time of the event in Unix nano |
+| `attributes` | List[[models.TempoTraceAttribute](../models/tempotraceattribute.md)] | :heavy_minus_sign: | The attributes of the event |
\ No newline at end of file
diff --git a/docs/models/tempotraceresource.md b/docs/models/tempotraceresource.md
new file mode 100644
index 00000000..0d0e2ded
--- /dev/null
+++ b/docs/models/tempotraceresource.md
@@ -0,0 +1,8 @@
+# TempoTraceResource
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- |
+| `attributes` | List[[models.TempoTraceAttribute](../models/tempotraceattribute.md)] | :heavy_minus_sign: | The attributes of the resource |
\ No newline at end of file
diff --git a/docs/models/tempotracescope.md b/docs/models/tempotracescope.md
new file mode 100644
index 00000000..f3678a9f
--- /dev/null
+++ b/docs/models/tempotracescope.md
@@ -0,0 +1,8 @@
+# TempoTraceScope
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------- | -------------------- | -------------------- | -------------------- |
+| `name` | *str* | :heavy_check_mark: | The name of the span |
\ No newline at end of file
diff --git a/docs/models/tempotracescopekind.md b/docs/models/tempotracescopekind.md
new file mode 100644
index 00000000..e08cc9be
--- /dev/null
+++ b/docs/models/tempotracescopekind.md
@@ -0,0 +1,19 @@
+# TempoTraceScopeKind
+
+## Example Usage
+
+```python
+from mistralai.client.models import TempoTraceScopeKind
+
+# Open enum: unrecognized values are captured as UnrecognizedStr
+value: TempoTraceScopeKind = "SPAN_KIND_INTERNAL"
+```
+
+
+## Values
+
+This is an open enum. Unrecognized values will not fail type checks.
+
+- `"SPAN_KIND_INTERNAL"`
+- `"SPAN_KIND_SERVER"`
+- `"SPAN_KIND_CLIENT"`
diff --git a/docs/models/tempotracescopespan.md b/docs/models/tempotracescopespan.md
new file mode 100644
index 00000000..e3b6d58a
--- /dev/null
+++ b/docs/models/tempotracescopespan.md
@@ -0,0 +1,9 @@
+# TempoTraceScopeSpan
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- |
+| `scope` | [models.TempoTraceScope](../models/tempotracescope.md) | :heavy_check_mark: | N/A |
+| `spans` | List[[models.TempoTraceSpan](../models/tempotracespan.md)] | :heavy_minus_sign: | The spans of the scope |
\ No newline at end of file
diff --git a/docs/models/tempotracespan.md b/docs/models/tempotracespan.md
new file mode 100644
index 00000000..f657acc6
--- /dev/null
+++ b/docs/models/tempotracespan.md
@@ -0,0 +1,16 @@
+# TempoTraceSpan
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- |
+| `trace_id` | *str* | :heavy_check_mark: | The trace ID of the scope |
+| `span_id` | *str* | :heavy_check_mark: | The span ID of the scope |
+| `parent_span_id` | *OptionalNullable[str]* | :heavy_minus_sign: | The parent span ID of the scope |
+| `name` | *str* | :heavy_check_mark: | The name of the scope |
+| `kind` | [models.TempoTraceScopeKind](../models/tempotracescopekind.md) | :heavy_check_mark: | N/A |
+| `start_time_unix_nano` | *str* | :heavy_check_mark: | The start time of the scope in Unix nano |
+| `end_time_unix_nano` | *str* | :heavy_check_mark: | The end time of the scope in Unix nano |
+| `attributes` | List[[models.TempoTraceAttribute](../models/tempotraceattribute.md)] | :heavy_minus_sign: | The attributes of the scope |
+| `events` | List[[models.TempoTraceEvent](../models/tempotraceevent.md)] | :heavy_minus_sign: | The events of the scope |
\ No newline at end of file
diff --git a/docs/models/terminateworkflowexecutionv1workflowsexecutionsexecutionidterminatepostrequest.md b/docs/models/terminateworkflowexecutionv1workflowsexecutionsexecutionidterminatepostrequest.md
new file mode 100644
index 00000000..227066fb
--- /dev/null
+++ b/docs/models/terminateworkflowexecutionv1workflowsexecutionsexecutionidterminatepostrequest.md
@@ -0,0 +1,8 @@
+# TerminateWorkflowExecutionV1WorkflowsExecutionsExecutionIDTerminatePostRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------ | ------------------ | ------------------ | ------------------ |
+| `execution_id` | *str* | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/timeseriesmetric.md b/docs/models/timeseriesmetric.md
new file mode 100644
index 00000000..2b5fc61d
--- /dev/null
+++ b/docs/models/timeseriesmetric.md
@@ -0,0 +1,10 @@
+# TimeSeriesMetric
+
+Time-series metric with timestamp-value pairs.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- |
+| `value` | List[List[[models.TimeSeriesMetricValue2](../models/timeseriesmetricvalue2.md)]] | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/timeseriesmetricvalue1.md b/docs/models/timeseriesmetricvalue1.md
new file mode 100644
index 00000000..f1783455
--- /dev/null
+++ b/docs/models/timeseriesmetricvalue1.md
@@ -0,0 +1,17 @@
+# TimeSeriesMetricValue1
+
+
+## Supported Types
+
+### `int`
+
+```python
+value: int = /* values here */
+```
+
+### `float`
+
+```python
+value: float = /* values here */
+```
+
diff --git a/docs/models/timeseriesmetricvalue2.md b/docs/models/timeseriesmetricvalue2.md
new file mode 100644
index 00000000..c6ba5eca
--- /dev/null
+++ b/docs/models/timeseriesmetricvalue2.md
@@ -0,0 +1,17 @@
+# TimeSeriesMetricValue2
+
+
+## Supported Types
+
+### `int`
+
+```python
+value: int = /* values here */
+```
+
+### `models.TimeSeriesMetricValue1`
+
+```python
+value: models.TimeSeriesMetricValue1 = /* values here */
+```
+
diff --git a/docs/models/unarchiveworkflowv1workflowsworkflowidentifierunarchiveputrequest.md b/docs/models/unarchiveworkflowv1workflowsworkflowidentifierunarchiveputrequest.md
new file mode 100644
index 00000000..e25cd4bc
--- /dev/null
+++ b/docs/models/unarchiveworkflowv1workflowsworkflowidentifierunarchiveputrequest.md
@@ -0,0 +1,8 @@
+# UnarchiveWorkflowV1WorkflowsWorkflowIdentifierUnarchivePutRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------- | --------------------- | --------------------- | --------------------- |
+| `workflow_identifier` | *str* | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/unscheduleworkflowv1workflowsschedulesscheduleiddeleterequest.md b/docs/models/unscheduleworkflowv1workflowsschedulesscheduleiddeleterequest.md
new file mode 100644
index 00000000..c2bc3373
--- /dev/null
+++ b/docs/models/unscheduleworkflowv1workflowsschedulesscheduleiddeleterequest.md
@@ -0,0 +1,8 @@
+# UnscheduleWorkflowV1WorkflowsSchedulesScheduleIDDeleteRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------ | ------------------ | ------------------ | ------------------ |
+| `schedule_id` | *str* | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/updatedefinition.md b/docs/models/updatedefinition.md
new file mode 100644
index 00000000..8cf62617
--- /dev/null
+++ b/docs/models/updatedefinition.md
@@ -0,0 +1,11 @@
+# UpdateDefinition
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- |
+| `name` | *str* | :heavy_check_mark: | Name of the update |
+| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the update |
+| `input_schema` | Dict[str, *Any*] | :heavy_check_mark: | Input JSON schema of the update's model |
+| `output_schema` | Dict[str, *Any*] | :heavy_minus_sign: | Output JSON schema of the update's model |
\ No newline at end of file
diff --git a/docs/models/updateinvocationbody.md b/docs/models/updateinvocationbody.md
new file mode 100644
index 00000000..e810a041
--- /dev/null
+++ b/docs/models/updateinvocationbody.md
@@ -0,0 +1,9 @@
+# UpdateInvocationBody
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- |
+| `name` | *str* | :heavy_check_mark: | The name of the update to request |
+| `input` | [OptionalNullable[models.UpdateInvocationBodyInput]](../models/updateinvocationbodyinput.md) | :heavy_minus_sign: | Input data for the update, matching its schema |
\ No newline at end of file
diff --git a/docs/models/updateinvocationbodyinput.md b/docs/models/updateinvocationbodyinput.md
new file mode 100644
index 00000000..70665f9e
--- /dev/null
+++ b/docs/models/updateinvocationbodyinput.md
@@ -0,0 +1,19 @@
+# UpdateInvocationBodyInput
+
+Input data for the update, matching its schema
+
+
+## Supported Types
+
+### `models.NetworkEncodedInput`
+
+```python
+value: models.NetworkEncodedInput = /* values here */
+```
+
+### `Dict[str, Any]`
+
+```python
+value: Dict[str, Any] = /* values here */
+```
+
diff --git a/docs/models/updateworkflowexecutionv1workflowsexecutionsexecutionidupdatespostrequest.md b/docs/models/updateworkflowexecutionv1workflowsexecutionsexecutionidupdatespostrequest.md
new file mode 100644
index 00000000..5705af6b
--- /dev/null
+++ b/docs/models/updateworkflowexecutionv1workflowsexecutionsexecutionidupdatespostrequest.md
@@ -0,0 +1,9 @@
+# UpdateWorkflowExecutionV1WorkflowsExecutionsExecutionIDUpdatesPostRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- |
+| `execution_id` | *str* | :heavy_check_mark: | N/A |
+| `update_invocation_body` | [models.UpdateInvocationBody](../models/updateinvocationbody.md) | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/updateworkflowresponse.md b/docs/models/updateworkflowresponse.md
new file mode 100644
index 00000000..a4e9494e
--- /dev/null
+++ b/docs/models/updateworkflowresponse.md
@@ -0,0 +1,9 @@
+# UpdateWorkflowResponse
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------- | -------------------------------------- | -------------------------------------- | -------------------------------------- |
+| `update_name` | *str* | :heavy_check_mark: | N/A |
+| `result` | *Any* | :heavy_check_mark: | The result of the Update workflow call |
\ No newline at end of file
diff --git a/docs/models/updateworkflowv1workflowsworkflowidentifierputrequest.md b/docs/models/updateworkflowv1workflowsworkflowidentifierputrequest.md
new file mode 100644
index 00000000..51877da9
--- /dev/null
+++ b/docs/models/updateworkflowv1workflowsworkflowidentifierputrequest.md
@@ -0,0 +1,9 @@
+# UpdateWorkflowV1WorkflowsWorkflowIdentifierPutRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ |
+| `workflow_identifier` | *str* | :heavy_check_mark: | N/A |
+| `workflow_update_request` | [models.WorkflowUpdateRequest](../models/workflowupdaterequest.md) | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/workflow.md b/docs/models/workflow.md
new file mode 100644
index 00000000..4b245e07
--- /dev/null
+++ b/docs/models/workflow.md
@@ -0,0 +1,18 @@
+# Workflow
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------- | --------------------------------------------------------------------------- | --------------------------------------------------------------------------- | --------------------------------------------------------------------------- |
+| `id` | *str* | :heavy_check_mark: | Unique identifier of the workflow |
+| `name` | *str* | :heavy_check_mark: | Name of the workflow |
+| `display_name` | *str* | :heavy_check_mark: | Display name of the workflow |
+| `type` | [models.WorkflowType](../models/workflowtype.md) | :heavy_check_mark: | N/A |
+| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the workflow |
+| `customer_id` | *str* | :heavy_check_mark: | Customer ID of the workflow |
+| `workspace_id` | *str* | :heavy_check_mark: | Workspace ID of the workflow |
+| `shared_namespace` | *OptionalNullable[str]* | :heavy_minus_sign: | Reserved namespace for shared workflows (e.g., 'shared:my-shared-workflow') |
+| `available_in_chat_assistant` | *Optional[bool]* | :heavy_minus_sign: | Whether the workflow is available in chat assistant |
+| `is_technical` | *Optional[bool]* | :heavy_minus_sign: | Whether the workflow is technical (e.g. SDK-managed) |
+| `archived` | *Optional[bool]* | :heavy_minus_sign: | Whether the workflow is archived |
\ No newline at end of file
diff --git a/docs/models/workflowarchiveresponse.md b/docs/models/workflowarchiveresponse.md
new file mode 100644
index 00000000..6192c2b2
--- /dev/null
+++ b/docs/models/workflowarchiveresponse.md
@@ -0,0 +1,8 @@
+# WorkflowArchiveResponse
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- |
+| `workflow` | [models.Workflow](../models/workflow.md) | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/workflowbasicdefinition.md b/docs/models/workflowbasicdefinition.md
new file mode 100644
index 00000000..a534ad34
--- /dev/null
+++ b/docs/models/workflowbasicdefinition.md
@@ -0,0 +1,13 @@
+# WorkflowBasicDefinition
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ |
+| `id` | *str* | :heavy_check_mark: | N/A |
+| `name` | *str* | :heavy_check_mark: | The name of the workflow |
+| `display_name` | *str* | :heavy_check_mark: | The display name of the workflow |
+| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | A description of the workflow |
+| `metadata` | [Optional[models.WorkflowMetadata]](../models/workflowmetadata.md) | :heavy_minus_sign: | N/A |
+| `archived` | *bool* | :heavy_check_mark: | Whether the workflow is archived |
\ No newline at end of file
diff --git a/docs/models/workflowcodedefinition.md b/docs/models/workflowcodedefinition.md
new file mode 100644
index 00000000..2034f81f
--- /dev/null
+++ b/docs/models/workflowcodedefinition.md
@@ -0,0 +1,14 @@
+# WorkflowCodeDefinition
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ |
+| `input_schema` | Dict[str, *Any*] | :heavy_check_mark: | Input schema of the workflow's run method |
+| `output_schema` | Dict[str, *Any*] | :heavy_minus_sign: | Output schema of the workflow's run method |
+| `signals` | List[[models.SignalDefinition](../models/signaldefinition.md)] | :heavy_minus_sign: | Signal handlers defined by the workflow |
+| `queries` | List[[models.QueryDefinition](../models/querydefinition.md)] | :heavy_minus_sign: | Query handlers defined by the workflow |
+| `updates` | List[[models.UpdateDefinition](../models/updatedefinition.md)] | :heavy_minus_sign: | Update handlers defined by the workflow |
+| `enforce_determinism` | *Optional[bool]* | :heavy_minus_sign: | Whether the workflow enforces deterministic execution |
+| `execution_timeout` | *Optional[float]* | :heavy_minus_sign: | Maximum total execution time including retries and continue-as-new |
\ No newline at end of file
diff --git a/docs/models/workfloweventbatchrequest.md b/docs/models/workfloweventbatchrequest.md
new file mode 100644
index 00000000..cf0af15f
--- /dev/null
+++ b/docs/models/workfloweventbatchrequest.md
@@ -0,0 +1,10 @@
+# WorkflowEventBatchRequest
+
+Request model containing multiple workflow events.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ |
+| `events` | List[[models.WorkflowEventBatchRequestEvent](../models/workfloweventbatchrequestevent.md)] | :heavy_check_mark: | List of workflow events to send. |
\ No newline at end of file
diff --git a/docs/models/workfloweventbatchrequestevent.md b/docs/models/workfloweventbatchrequestevent.md
new file mode 100644
index 00000000..98ac66e8
--- /dev/null
+++ b/docs/models/workfloweventbatchrequestevent.md
@@ -0,0 +1,107 @@
+# WorkflowEventBatchRequestEvent
+
+
+## Supported Types
+
+### `models.WorkflowExecutionStartedRequest`
+
+```python
+value: models.WorkflowExecutionStartedRequest = /* values here */
+```
+
+### `models.WorkflowExecutionCompletedRequest`
+
+```python
+value: models.WorkflowExecutionCompletedRequest = /* values here */
+```
+
+### `models.WorkflowExecutionFailedRequest`
+
+```python
+value: models.WorkflowExecutionFailedRequest = /* values here */
+```
+
+### `models.WorkflowExecutionCanceledRequest`
+
+```python
+value: models.WorkflowExecutionCanceledRequest = /* values here */
+```
+
+### `models.WorkflowExecutionContinuedAsNewRequest`
+
+```python
+value: models.WorkflowExecutionContinuedAsNewRequest = /* values here */
+```
+
+### `models.WorkflowTaskTimedOutRequest`
+
+```python
+value: models.WorkflowTaskTimedOutRequest = /* values here */
+```
+
+### `models.WorkflowTaskFailedRequest`
+
+```python
+value: models.WorkflowTaskFailedRequest = /* values here */
+```
+
+### `models.CustomTaskStartedRequest`
+
+```python
+value: models.CustomTaskStartedRequest = /* values here */
+```
+
+### `models.CustomTaskInProgressRequest`
+
+```python
+value: models.CustomTaskInProgressRequest = /* values here */
+```
+
+### `models.CustomTaskCompletedRequest`
+
+```python
+value: models.CustomTaskCompletedRequest = /* values here */
+```
+
+### `models.CustomTaskFailedRequest`
+
+```python
+value: models.CustomTaskFailedRequest = /* values here */
+```
+
+### `models.CustomTaskTimedOutRequest`
+
+```python
+value: models.CustomTaskTimedOutRequest = /* values here */
+```
+
+### `models.CustomTaskCanceledRequest`
+
+```python
+value: models.CustomTaskCanceledRequest = /* values here */
+```
+
+### `models.ActivityTaskStartedRequest`
+
+```python
+value: models.ActivityTaskStartedRequest = /* values here */
+```
+
+### `models.ActivityTaskCompletedRequest`
+
+```python
+value: models.ActivityTaskCompletedRequest = /* values here */
+```
+
+### `models.ActivityTaskRetryingRequest`
+
+```python
+value: models.ActivityTaskRetryingRequest = /* values here */
+```
+
+### `models.ActivityTaskFailedRequest`
+
+```python
+value: models.ActivityTaskFailedRequest = /* values here */
+```
+
diff --git a/docs/models/workfloweventbatchresponse.md b/docs/models/workfloweventbatchresponse.md
new file mode 100644
index 00000000..84ab9e39
--- /dev/null
+++ b/docs/models/workfloweventbatchresponse.md
@@ -0,0 +1,12 @@
+# WorkflowEventBatchResponse
+
+Response model for batch workflow event reception.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- |
+| `status` | [models.WorkflowEventBatchResponseStatus](../models/workfloweventbatchresponsestatus.md) | :heavy_check_mark: | Status of the batch event reception |
+| `message` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional message |
+| `events_received` | *int* | :heavy_check_mark: | Number of events successfully received |
\ No newline at end of file
diff --git a/docs/models/workfloweventbatchresponsestatus.md b/docs/models/workfloweventbatchresponsestatus.md
new file mode 100644
index 00000000..47dd74bc
--- /dev/null
+++ b/docs/models/workfloweventbatchresponsestatus.md
@@ -0,0 +1,20 @@
+# WorkflowEventBatchResponseStatus
+
+Status of the batch event reception
+
+## Example Usage
+
+```python
+from mistralai.client.models import WorkflowEventBatchResponseStatus
+
+# Open enum: unrecognized values are captured as UnrecognizedStr
+value: WorkflowEventBatchResponseStatus = "success"
+```
+
+
+## Values
+
+This is an open enum. Unrecognized values will not fail type checks.
+
+- `"success"`
+- `"error"`
diff --git a/docs/models/workfloweventrequest.md b/docs/models/workfloweventrequest.md
new file mode 100644
index 00000000..bec82172
--- /dev/null
+++ b/docs/models/workfloweventrequest.md
@@ -0,0 +1,10 @@
+# WorkflowEventRequest
+
+Request model containing a workflow event.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- |
+| `event` | [models.WorkflowEventRequestEvent](../models/workfloweventrequestevent.md) | :heavy_check_mark: | The workflow event payload. |
\ No newline at end of file
diff --git a/docs/models/workfloweventrequestevent.md b/docs/models/workfloweventrequestevent.md
new file mode 100644
index 00000000..2156cd37
--- /dev/null
+++ b/docs/models/workfloweventrequestevent.md
@@ -0,0 +1,109 @@
+# WorkflowEventRequestEvent
+
+The workflow event payload.
+
+
+## Supported Types
+
+### `models.WorkflowExecutionStartedRequest`
+
+```python
+value: models.WorkflowExecutionStartedRequest = /* values here */
+```
+
+### `models.WorkflowExecutionCompletedRequest`
+
+```python
+value: models.WorkflowExecutionCompletedRequest = /* values here */
+```
+
+### `models.WorkflowExecutionFailedRequest`
+
+```python
+value: models.WorkflowExecutionFailedRequest = /* values here */
+```
+
+### `models.WorkflowExecutionCanceledRequest`
+
+```python
+value: models.WorkflowExecutionCanceledRequest = /* values here */
+```
+
+### `models.WorkflowExecutionContinuedAsNewRequest`
+
+```python
+value: models.WorkflowExecutionContinuedAsNewRequest = /* values here */
+```
+
+### `models.WorkflowTaskTimedOutRequest`
+
+```python
+value: models.WorkflowTaskTimedOutRequest = /* values here */
+```
+
+### `models.WorkflowTaskFailedRequest`
+
+```python
+value: models.WorkflowTaskFailedRequest = /* values here */
+```
+
+### `models.CustomTaskStartedRequest`
+
+```python
+value: models.CustomTaskStartedRequest = /* values here */
+```
+
+### `models.CustomTaskInProgressRequest`
+
+```python
+value: models.CustomTaskInProgressRequest = /* values here */
+```
+
+### `models.CustomTaskCompletedRequest`
+
+```python
+value: models.CustomTaskCompletedRequest = /* values here */
+```
+
+### `models.CustomTaskFailedRequest`
+
+```python
+value: models.CustomTaskFailedRequest = /* values here */
+```
+
+### `models.CustomTaskTimedOutRequest`
+
+```python
+value: models.CustomTaskTimedOutRequest = /* values here */
+```
+
+### `models.CustomTaskCanceledRequest`
+
+```python
+value: models.CustomTaskCanceledRequest = /* values here */
+```
+
+### `models.ActivityTaskStartedRequest`
+
+```python
+value: models.ActivityTaskStartedRequest = /* values here */
+```
+
+### `models.ActivityTaskCompletedRequest`
+
+```python
+value: models.ActivityTaskCompletedRequest = /* values here */
+```
+
+### `models.ActivityTaskRetryingRequest`
+
+```python
+value: models.ActivityTaskRetryingRequest = /* values here */
+```
+
+### `models.ActivityTaskFailedRequest`
+
+```python
+value: models.ActivityTaskFailedRequest = /* values here */
+```
+
diff --git a/docs/models/workfloweventresponse.md b/docs/models/workfloweventresponse.md
new file mode 100644
index 00000000..e336dfb2
--- /dev/null
+++ b/docs/models/workfloweventresponse.md
@@ -0,0 +1,11 @@
+# WorkflowEventResponse
+
+Response model for workflow event reception.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ |
+| `status` | [models.WorkflowEventResponseStatus](../models/workfloweventresponsestatus.md) | :heavy_check_mark: | Status of the event reception |
+| `message` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional message |
\ No newline at end of file
diff --git a/docs/models/workfloweventresponsestatus.md b/docs/models/workfloweventresponsestatus.md
new file mode 100644
index 00000000..684f3fc7
--- /dev/null
+++ b/docs/models/workfloweventresponsestatus.md
@@ -0,0 +1,20 @@
+# WorkflowEventResponseStatus
+
+Status of the event reception
+
+## Example Usage
+
+```python
+from mistralai.client.models import WorkflowEventResponseStatus
+
+# Open enum: unrecognized values are captured as UnrecognizedStr
+value: WorkflowEventResponseStatus = "success"
+```
+
+
+## Values
+
+This is an open enum. Unrecognized values will not fail type checks.
+
+- `"success"`
+- `"error"`
diff --git a/docs/models/workfloweventtype.md b/docs/models/workfloweventtype.md
new file mode 100644
index 00000000..ebfe2d86
--- /dev/null
+++ b/docs/models/workfloweventtype.md
@@ -0,0 +1,29 @@
+# WorkflowEventType
+
+## Example Usage
+
+```python
+from mistralai.client.models import WorkflowEventType
+value: WorkflowEventType = "WORKFLOW_EXECUTION_STARTED"
+```
+
+
+## Values
+
+- `"WORKFLOW_EXECUTION_STARTED"`
+- `"WORKFLOW_EXECUTION_COMPLETED"`
+- `"WORKFLOW_EXECUTION_FAILED"`
+- `"WORKFLOW_EXECUTION_CANCELED"`
+- `"WORKFLOW_EXECUTION_CONTINUED_AS_NEW"`
+- `"WORKFLOW_TASK_TIMED_OUT"`
+- `"WORKFLOW_TASK_FAILED"`
+- `"CUSTOM_TASK_STARTED"`
+- `"CUSTOM_TASK_IN_PROGRESS"`
+- `"CUSTOM_TASK_COMPLETED"`
+- `"CUSTOM_TASK_FAILED"`
+- `"CUSTOM_TASK_TIMED_OUT"`
+- `"CUSTOM_TASK_CANCELED"`
+- `"ACTIVITY_TASK_STARTED"`
+- `"ACTIVITY_TASK_COMPLETED"`
+- `"ACTIVITY_TASK_RETRYING"`
+- `"ACTIVITY_TASK_FAILED"`
diff --git a/docs/models/workflowexecutioncanceledattributes.md b/docs/models/workflowexecutioncanceledattributes.md
new file mode 100644
index 00000000..8c2929eb
--- /dev/null
+++ b/docs/models/workflowexecutioncanceledattributes.md
@@ -0,0 +1,11 @@
+# WorkflowExecutionCanceledAttributes
+
+Attributes for workflow execution canceled events.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------------------------------- | ------------------------------------------------------------- | ------------------------------------------------------------- | ------------------------------------------------------------- |
+| `task_id` | *str* | :heavy_check_mark: | Unique identifier for the task within the workflow execution. |
+| `reason` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional reason provided for the cancellation. |
\ No newline at end of file
diff --git a/docs/models/workflowexecutioncanceledrequest.md b/docs/models/workflowexecutioncanceledrequest.md
new file mode 100644
index 00000000..e32f3c9d
--- /dev/null
+++ b/docs/models/workflowexecutioncanceledrequest.md
@@ -0,0 +1,20 @@
+# WorkflowExecutionCanceledRequest
+
+Emitted when a workflow execution is canceled.
+
+This is a terminal event indicating the workflow was explicitly canceled.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *Optional[int]* | :heavy_minus_sign: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["WORKFLOW_EXECUTION_CANCELED"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.WorkflowExecutionCanceledAttributes](../models/workflowexecutioncanceledattributes.md) | :heavy_check_mark: | Attributes for workflow execution canceled events. |
\ No newline at end of file
diff --git a/docs/models/workflowexecutioncanceledresponse.md b/docs/models/workflowexecutioncanceledresponse.md
new file mode 100644
index 00000000..f9f4c01d
--- /dev/null
+++ b/docs/models/workflowexecutioncanceledresponse.md
@@ -0,0 +1,20 @@
+# WorkflowExecutionCanceledResponse
+
+Emitted when a workflow execution is canceled.
+
+This is a terminal event indicating the workflow was explicitly canceled.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *int* | :heavy_check_mark: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *Nullable[str]* | :heavy_check_mark: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["WORKFLOW_EXECUTION_CANCELED"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.WorkflowExecutionCanceledAttributes](../models/workflowexecutioncanceledattributes.md) | :heavy_check_mark: | Attributes for workflow execution canceled events. |
\ No newline at end of file
diff --git a/docs/models/workflowexecutioncompletedattributesrequest.md b/docs/models/workflowexecutioncompletedattributesrequest.md
new file mode 100644
index 00000000..5ab8341c
--- /dev/null
+++ b/docs/models/workflowexecutioncompletedattributesrequest.md
@@ -0,0 +1,11 @@
+# WorkflowExecutionCompletedAttributesRequest
+
+Attributes for workflow execution completed events.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- |
+| `task_id` | *str* | :heavy_check_mark: | Unique identifier for the task within the workflow execution. |
+| `result` | [models.JSONPayloadRequest](../models/jsonpayloadrequest.md) | :heavy_check_mark: | A payload containing arbitrary JSON data.
Used for complete state snapshots or final results. |
\ No newline at end of file
diff --git a/docs/models/workflowexecutioncompletedattributesresponse.md b/docs/models/workflowexecutioncompletedattributesresponse.md
new file mode 100644
index 00000000..30a33052
--- /dev/null
+++ b/docs/models/workflowexecutioncompletedattributesresponse.md
@@ -0,0 +1,11 @@
+# WorkflowExecutionCompletedAttributesResponse
+
+Attributes for workflow execution completed events.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- |
+| `task_id` | *str* | :heavy_check_mark: | Unique identifier for the task within the workflow execution. |
+| `result` | [models.JSONPayloadResponse](../models/jsonpayloadresponse.md) | :heavy_check_mark: | A payload containing arbitrary JSON data.
Used for complete state snapshots or final results. |
\ No newline at end of file
diff --git a/docs/models/workflowexecutioncompletedrequest.md b/docs/models/workflowexecutioncompletedrequest.md
new file mode 100644
index 00000000..02bd2a44
--- /dev/null
+++ b/docs/models/workflowexecutioncompletedrequest.md
@@ -0,0 +1,20 @@
+# WorkflowExecutionCompletedRequest
+
+Emitted when a workflow execution completes successfully.
+
+This is a terminal event indicating the workflow finished without errors.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *Optional[int]* | :heavy_minus_sign: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["WORKFLOW_EXECUTION_COMPLETED"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.WorkflowExecutionCompletedAttributesRequest](../models/workflowexecutioncompletedattributesrequest.md) | :heavy_check_mark: | Attributes for workflow execution completed events. |
\ No newline at end of file
diff --git a/docs/models/workflowexecutioncompletedresponse.md b/docs/models/workflowexecutioncompletedresponse.md
new file mode 100644
index 00000000..a4d40dc5
--- /dev/null
+++ b/docs/models/workflowexecutioncompletedresponse.md
@@ -0,0 +1,20 @@
+# WorkflowExecutionCompletedResponse
+
+Emitted when a workflow execution completes successfully.
+
+This is a terminal event indicating the workflow finished without errors.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *int* | :heavy_check_mark: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *Nullable[str]* | :heavy_check_mark: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["WORKFLOW_EXECUTION_COMPLETED"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.WorkflowExecutionCompletedAttributesResponse](../models/workflowexecutioncompletedattributesresponse.md) | :heavy_check_mark: | Attributes for workflow execution completed events. |
\ No newline at end of file
diff --git a/docs/models/workflowexecutioncontinuedasnewattributesrequest.md b/docs/models/workflowexecutioncontinuedasnewattributesrequest.md
new file mode 100644
index 00000000..c85a7b30
--- /dev/null
+++ b/docs/models/workflowexecutioncontinuedasnewattributesrequest.md
@@ -0,0 +1,13 @@
+# WorkflowExecutionContinuedAsNewAttributesRequest
+
+Attributes for workflow execution continued-as-new events.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- |
+| `task_id` | *str* | :heavy_check_mark: | Unique identifier for the task within the workflow execution. |
+| `new_execution_run_id` | *str* | :heavy_check_mark: | The run ID of the new workflow execution that continues this workflow. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the continued workflow. |
+| `input` | [models.JSONPayloadRequest](../models/jsonpayloadrequest.md) | :heavy_check_mark: | A payload containing arbitrary JSON data.
Used for complete state snapshots or final results. |
\ No newline at end of file
diff --git a/docs/models/workflowexecutioncontinuedasnewattributesresponse.md b/docs/models/workflowexecutioncontinuedasnewattributesresponse.md
new file mode 100644
index 00000000..b49a2596
--- /dev/null
+++ b/docs/models/workflowexecutioncontinuedasnewattributesresponse.md
@@ -0,0 +1,13 @@
+# WorkflowExecutionContinuedAsNewAttributesResponse
+
+Attributes for workflow execution continued-as-new events.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- |
+| `task_id` | *str* | :heavy_check_mark: | Unique identifier for the task within the workflow execution. |
+| `new_execution_run_id` | *str* | :heavy_check_mark: | The run ID of the new workflow execution that continues this workflow. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the continued workflow. |
+| `input` | [models.JSONPayloadResponse](../models/jsonpayloadresponse.md) | :heavy_check_mark: | A payload containing arbitrary JSON data.
Used for complete state snapshots or final results. |
\ No newline at end of file
diff --git a/docs/models/workflowexecutioncontinuedasnewrequest.md b/docs/models/workflowexecutioncontinuedasnewrequest.md
new file mode 100644
index 00000000..f83f7b51
--- /dev/null
+++ b/docs/models/workflowexecutioncontinuedasnewrequest.md
@@ -0,0 +1,21 @@
+# WorkflowExecutionContinuedAsNewRequest
+
+Emitted when a workflow continues as a new execution.
+
+This occurs when a workflow uses continue-as-new to reset its history
+while maintaining logical continuity.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *Optional[int]* | :heavy_minus_sign: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["WORKFLOW_EXECUTION_CONTINUED_AS_NEW"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.WorkflowExecutionContinuedAsNewAttributesRequest](../models/workflowexecutioncontinuedasnewattributesrequest.md) | :heavy_check_mark: | Attributes for workflow execution continued-as-new events. |
\ No newline at end of file
diff --git a/docs/models/workflowexecutioncontinuedasnewresponse.md b/docs/models/workflowexecutioncontinuedasnewresponse.md
new file mode 100644
index 00000000..e6a7212a
--- /dev/null
+++ b/docs/models/workflowexecutioncontinuedasnewresponse.md
@@ -0,0 +1,21 @@
+# WorkflowExecutionContinuedAsNewResponse
+
+Emitted when a workflow continues as a new execution.
+
+This occurs when a workflow uses continue-as-new to reset its history
+while maintaining logical continuity.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *int* | :heavy_check_mark: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *Nullable[str]* | :heavy_check_mark: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["WORKFLOW_EXECUTION_CONTINUED_AS_NEW"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.WorkflowExecutionContinuedAsNewAttributesResponse](../models/workflowexecutioncontinuedasnewattributesresponse.md) | :heavy_check_mark: | Attributes for workflow execution continued-as-new events. |
\ No newline at end of file
diff --git a/docs/models/workflowexecutionfailedattributes.md b/docs/models/workflowexecutionfailedattributes.md
new file mode 100644
index 00000000..9fa14fa4
--- /dev/null
+++ b/docs/models/workflowexecutionfailedattributes.md
@@ -0,0 +1,11 @@
+# WorkflowExecutionFailedAttributes
+
+Attributes for workflow execution failed events.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- |
+| `task_id` | *str* | :heavy_check_mark: | Unique identifier for the task within the workflow execution. |
+| `failure` | [models.Failure](../models/failure.md) | :heavy_check_mark: | Represents an error or exception that occurred during execution. |
\ No newline at end of file
diff --git a/docs/models/workflowexecutionfailedrequest.md b/docs/models/workflowexecutionfailedrequest.md
new file mode 100644
index 00000000..160cfd1a
--- /dev/null
+++ b/docs/models/workflowexecutionfailedrequest.md
@@ -0,0 +1,20 @@
+# WorkflowExecutionFailedRequest
+
+Emitted when a workflow execution fails due to an unhandled exception.
+
+This is a terminal event indicating the workflow ended with an error.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *Optional[int]* | :heavy_minus_sign: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["WORKFLOW_EXECUTION_FAILED"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.WorkflowExecutionFailedAttributes](../models/workflowexecutionfailedattributes.md) | :heavy_check_mark: | Attributes for workflow execution failed events. |
\ No newline at end of file
diff --git a/docs/models/workflowexecutionfailedresponse.md b/docs/models/workflowexecutionfailedresponse.md
new file mode 100644
index 00000000..222fbfd7
--- /dev/null
+++ b/docs/models/workflowexecutionfailedresponse.md
@@ -0,0 +1,20 @@
+# WorkflowExecutionFailedResponse
+
+Emitted when a workflow execution fails due to an unhandled exception.
+
+This is a terminal event indicating the workflow ended with an error.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *int* | :heavy_check_mark: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *Nullable[str]* | :heavy_check_mark: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["WORKFLOW_EXECUTION_FAILED"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.WorkflowExecutionFailedAttributes](../models/workflowexecutionfailedattributes.md) | :heavy_check_mark: | Attributes for workflow execution failed events. |
\ No newline at end of file
diff --git a/docs/models/workflowexecutionlistresponse.md b/docs/models/workflowexecutionlistresponse.md
new file mode 100644
index 00000000..3fe61f60
--- /dev/null
+++ b/docs/models/workflowexecutionlistresponse.md
@@ -0,0 +1,9 @@
+# WorkflowExecutionListResponse
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- |
+| `executions` | List[[models.WorkflowExecutionWithoutResultResponse](../models/workflowexecutionwithoutresultresponse.md)] | :heavy_check_mark: | A list of workflow executions |
+| `next_page_token` | *OptionalNullable[str]* | :heavy_minus_sign: | Token to use for fetching the next page of results. Null if this is the last page. |
\ No newline at end of file
diff --git a/docs/models/workflowexecutionprogresstraceevent.md b/docs/models/workflowexecutionprogresstraceevent.md
new file mode 100644
index 00000000..ac50a894
--- /dev/null
+++ b/docs/models/workflowexecutionprogresstraceevent.md
@@ -0,0 +1,17 @@
+# WorkflowExecutionProgressTraceEvent
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------- |
+| `type` | [Optional[models.EventType]](../models/eventtype.md) | :heavy_minus_sign: | N/A |
+| `name` | *str* | :heavy_check_mark: | Name of the event |
+| `id` | *str* | :heavy_check_mark: | The ID of the event |
+| `timestamp_unix_nano` | *int* | :heavy_check_mark: | The timestamp of the event in nanoseconds since the Unix epoch |
+| `attributes` | Dict[str, [Nullable[models.WorkflowExecutionTraceSummaryAttributesValues]](../models/workflowexecutiontracesummaryattributesvalues.md)] | :heavy_check_mark: | The attributes of the event |
+| `internal` | *Optional[bool]* | :heavy_minus_sign: | Whether the event is internal |
+| `status` | [Optional[models.EventProgressStatus]](../models/eventprogressstatus.md) | :heavy_minus_sign: | N/A |
+| `start_time_unix_ms` | *int* | :heavy_check_mark: | The start time of the event in milliseconds since the Unix epoch |
+| `end_time_unix_ms` | *OptionalNullable[int]* | :heavy_minus_sign: | The end time of the event in milliseconds since the Unix epoch |
+| `error` | *OptionalNullable[str]* | :heavy_minus_sign: | The error message, if any |
\ No newline at end of file
diff --git a/docs/models/workflowexecutionrequest.md b/docs/models/workflowexecutionrequest.md
new file mode 100644
index 00000000..553119b1
--- /dev/null
+++ b/docs/models/workflowexecutionrequest.md
@@ -0,0 +1,15 @@
+# WorkflowExecutionRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `execution_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Allows you to specify a custom execution ID. If not provided, a random ID will be generated. |
+| `input` | Dict[str, *Any*] | :heavy_minus_sign: | The input to the workflow. This should be a dictionary that matches the workflow's input schema. |
+| `encoded_input` | [OptionalNullable[models.NetworkEncodedInput]](../models/networkencodedinput.md) | :heavy_minus_sign: | Encoded input to the workflow, used when payload encoding is enabled. |
+| `wait_for_result` | *Optional[bool]* | :heavy_minus_sign: | If true, wait for the workflow to complete and return the result directly. |
+| `timeout_seconds` | *OptionalNullable[float]* | :heavy_minus_sign: | Maximum time to wait for completion when wait_for_result is true. |
+| `custom_tracing_attributes` | Dict[str, *str*] | :heavy_minus_sign: | N/A |
+| ~~`task_queue`~~ | *OptionalNullable[str]* | :heavy_minus_sign: | : warning: ** DEPRECATED **: This will be removed in a future release, please migrate away from it as soon as possible.
Deprecated. Use deployment_name instead. |
+| `deployment_name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name of the deployment to route this execution to |
\ No newline at end of file
diff --git a/docs/models/workflowexecutionresponse.md b/docs/models/workflowexecutionresponse.md
new file mode 100644
index 00000000..e414c47e
--- /dev/null
+++ b/docs/models/workflowexecutionresponse.md
@@ -0,0 +1,16 @@
+# WorkflowExecutionResponse
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- |
+| `workflow_name` | *str* | :heavy_check_mark: | The name of the workflow |
+| `execution_id` | *str* | :heavy_check_mark: | The ID of the workflow execution |
+| `parent_execution_id` | *OptionalNullable[str]* | :heavy_minus_sign: | The parent execution ID of the workflow execution |
+| `root_execution_id` | *str* | :heavy_check_mark: | The root execution ID of the workflow execution |
+| `status` | [Nullable[models.WorkflowExecutionStatus]](../models/workflowexecutionstatus.md) | :heavy_check_mark: | The status of the workflow execution |
+| `start_time` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | The start time of the workflow execution |
+| `end_time` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | The end time of the workflow execution, if available |
+| `total_duration_ms` | *OptionalNullable[int]* | :heavy_minus_sign: | The total duration of the trace in milliseconds |
+| `result` | *Nullable[Any]* | :heavy_check_mark: | The result of the workflow execution, if available |
\ No newline at end of file
diff --git a/docs/models/workflowexecutionstartedattributesrequest.md b/docs/models/workflowexecutionstartedattributesrequest.md
new file mode 100644
index 00000000..a65df97a
--- /dev/null
+++ b/docs/models/workflowexecutionstartedattributesrequest.md
@@ -0,0 +1,12 @@
+# WorkflowExecutionStartedAttributesRequest
+
+Attributes for workflow execution started events.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- |
+| `task_id` | *str* | :heavy_check_mark: | Unique identifier for the task within the workflow execution. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow being executed. |
+| `input` | [models.JSONPayloadRequest](../models/jsonpayloadrequest.md) | :heavy_check_mark: | A payload containing arbitrary JSON data.
Used for complete state snapshots or final results. |
\ No newline at end of file
diff --git a/docs/models/workflowexecutionstartedattributesresponse.md b/docs/models/workflowexecutionstartedattributesresponse.md
new file mode 100644
index 00000000..c28de680
--- /dev/null
+++ b/docs/models/workflowexecutionstartedattributesresponse.md
@@ -0,0 +1,12 @@
+# WorkflowExecutionStartedAttributesResponse
+
+Attributes for workflow execution started events.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- |
+| `task_id` | *str* | :heavy_check_mark: | Unique identifier for the task within the workflow execution. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow being executed. |
+| `input` | [models.JSONPayloadResponse](../models/jsonpayloadresponse.md) | :heavy_check_mark: | A payload containing arbitrary JSON data.
Used for complete state snapshots or final results. |
\ No newline at end of file
diff --git a/docs/models/workflowexecutionstartedrequest.md b/docs/models/workflowexecutionstartedrequest.md
new file mode 100644
index 00000000..c2bce40e
--- /dev/null
+++ b/docs/models/workflowexecutionstartedrequest.md
@@ -0,0 +1,20 @@
+# WorkflowExecutionStartedRequest
+
+Emitted when a workflow execution begins.
+
+This is the first event in any workflow execution lifecycle.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *Optional[int]* | :heavy_minus_sign: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["WORKFLOW_EXECUTION_STARTED"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.WorkflowExecutionStartedAttributesRequest](../models/workflowexecutionstartedattributesrequest.md) | :heavy_check_mark: | Attributes for workflow execution started events. |
\ No newline at end of file
diff --git a/docs/models/workflowexecutionstartedresponse.md b/docs/models/workflowexecutionstartedresponse.md
new file mode 100644
index 00000000..7c370d46
--- /dev/null
+++ b/docs/models/workflowexecutionstartedresponse.md
@@ -0,0 +1,20 @@
+# WorkflowExecutionStartedResponse
+
+Emitted when a workflow execution begins.
+
+This is the first event in any workflow execution lifecycle.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *int* | :heavy_check_mark: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *Nullable[str]* | :heavy_check_mark: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["WORKFLOW_EXECUTION_STARTED"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.WorkflowExecutionStartedAttributesResponse](../models/workflowexecutionstartedattributesresponse.md) | :heavy_check_mark: | Attributes for workflow execution started events. |
\ No newline at end of file
diff --git a/docs/models/workflowexecutionstatus.md b/docs/models/workflowexecutionstatus.md
new file mode 100644
index 00000000..1be6d741
--- /dev/null
+++ b/docs/models/workflowexecutionstatus.md
@@ -0,0 +1,24 @@
+# WorkflowExecutionStatus
+
+## Example Usage
+
+```python
+from mistralai.client.models import WorkflowExecutionStatus
+
+# Open enum: unrecognized values are captured as UnrecognizedStr
+value: WorkflowExecutionStatus = "RUNNING"
+```
+
+
+## Values
+
+This is an open enum. Unrecognized values will not fail type checks.
+
+- `"RUNNING"`
+- `"COMPLETED"`
+- `"FAILED"`
+- `"CANCELED"`
+- `"TERMINATED"`
+- `"CONTINUED_AS_NEW"`
+- `"TIMED_OUT"`
+- `"RETRYING_AFTER_ERROR"`
diff --git a/docs/models/workflowexecutionsyncresponse.md b/docs/models/workflowexecutionsyncresponse.md
new file mode 100644
index 00000000..88cbc21c
--- /dev/null
+++ b/docs/models/workflowexecutionsyncresponse.md
@@ -0,0 +1,12 @@
+# WorkflowExecutionSyncResponse
+
+Response model for synchronous workflow execution
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------- | -------------------------------------- | -------------------------------------- | -------------------------------------- |
+| `workflow_name` | *str* | :heavy_check_mark: | Name of the workflow that was executed |
+| `execution_id` | *str* | :heavy_check_mark: | ID of the workflow execution |
+| `result` | *Any* | :heavy_check_mark: | The result of the workflow execution |
\ No newline at end of file
diff --git a/docs/models/workflowexecutiontraceevent.md b/docs/models/workflowexecutiontraceevent.md
new file mode 100644
index 00000000..b6bb388c
--- /dev/null
+++ b/docs/models/workflowexecutiontraceevent.md
@@ -0,0 +1,13 @@
+# WorkflowExecutionTraceEvent
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------- |
+| `type` | [Optional[models.EventType]](../models/eventtype.md) | :heavy_minus_sign: | N/A |
+| `name` | *str* | :heavy_check_mark: | Name of the event |
+| `id` | *str* | :heavy_check_mark: | The ID of the event |
+| `timestamp_unix_nano` | *int* | :heavy_check_mark: | The timestamp of the event in nanoseconds since the Unix epoch |
+| `attributes` | Dict[str, [Nullable[models.WorkflowExecutionTraceSummaryAttributesValues]](../models/workflowexecutiontracesummaryattributesvalues.md)] | :heavy_check_mark: | The attributes of the event |
+| `internal` | *Optional[bool]* | :heavy_minus_sign: | Whether the event is internal |
\ No newline at end of file
diff --git a/docs/models/workflowexecutiontraceeventsresponse.md b/docs/models/workflowexecutiontraceeventsresponse.md
new file mode 100644
index 00000000..a0e6e486
--- /dev/null
+++ b/docs/models/workflowexecutiontraceeventsresponse.md
@@ -0,0 +1,17 @@
+# WorkflowExecutionTraceEventsResponse
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- |
+| `workflow_name` | *str* | :heavy_check_mark: | The name of the workflow |
+| `execution_id` | *str* | :heavy_check_mark: | The ID of the workflow execution |
+| `parent_execution_id` | *OptionalNullable[str]* | :heavy_minus_sign: | The parent execution ID of the workflow execution |
+| `root_execution_id` | *str* | :heavy_check_mark: | The root execution ID of the workflow execution |
+| `status` | [Nullable[models.WorkflowExecutionStatus]](../models/workflowexecutionstatus.md) | :heavy_check_mark: | The status of the workflow execution |
+| `start_time` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | The start time of the workflow execution |
+| `end_time` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | The end time of the workflow execution, if available |
+| `total_duration_ms` | *OptionalNullable[int]* | :heavy_minus_sign: | The total duration of the trace in milliseconds |
+| `result` | *Nullable[Any]* | :heavy_check_mark: | The result of the workflow execution, if available |
+| `events` | List[[models.WorkflowExecutionTraceEventsResponseEvent](../models/workflowexecutiontraceeventsresponseevent.md)] | :heavy_minus_sign: | The events of the workflow execution |
\ No newline at end of file
diff --git a/docs/models/workflowexecutiontraceeventsresponseevent.md b/docs/models/workflowexecutiontraceeventsresponseevent.md
new file mode 100644
index 00000000..81c4f573
--- /dev/null
+++ b/docs/models/workflowexecutiontraceeventsresponseevent.md
@@ -0,0 +1,17 @@
+# WorkflowExecutionTraceEventsResponseEvent
+
+
+## Supported Types
+
+### `models.WorkflowExecutionTraceEvent`
+
+```python
+value: models.WorkflowExecutionTraceEvent = /* values here */
+```
+
+### `models.WorkflowExecutionProgressTraceEvent`
+
+```python
+value: models.WorkflowExecutionProgressTraceEvent = /* values here */
+```
+
diff --git a/docs/models/workflowexecutiontraceotelresponse.md b/docs/models/workflowexecutiontraceotelresponse.md
new file mode 100644
index 00000000..62188776
--- /dev/null
+++ b/docs/models/workflowexecutiontraceotelresponse.md
@@ -0,0 +1,19 @@
+# WorkflowExecutionTraceOTelResponse
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ |
+| `workflow_name` | *str* | :heavy_check_mark: | The name of the workflow |
+| `execution_id` | *str* | :heavy_check_mark: | The ID of the workflow execution |
+| `parent_execution_id` | *OptionalNullable[str]* | :heavy_minus_sign: | The parent execution ID of the workflow execution |
+| `root_execution_id` | *str* | :heavy_check_mark: | The root execution ID of the workflow execution |
+| `status` | [Nullable[models.WorkflowExecutionStatus]](../models/workflowexecutionstatus.md) | :heavy_check_mark: | The status of the workflow execution |
+| `start_time` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | The start time of the workflow execution |
+| `end_time` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | The end time of the workflow execution, if available |
+| `total_duration_ms` | *OptionalNullable[int]* | :heavy_minus_sign: | The total duration of the trace in milliseconds |
+| `result` | *Nullable[Any]* | :heavy_check_mark: | The result of the workflow execution, if available |
+| `data_source` | *str* | :heavy_check_mark: | The data source of the trace |
+| `otel_trace_id` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the trace |
+| `otel_trace_data` | [OptionalNullable[models.TempoGetTraceResponse]](../models/tempogettraceresponse.md) | :heavy_minus_sign: | The raw OpenTelemetry trace data |
\ No newline at end of file
diff --git a/docs/models/workflowexecutiontracesummaryattributesvalues.md b/docs/models/workflowexecutiontracesummaryattributesvalues.md
new file mode 100644
index 00000000..c01a122f
--- /dev/null
+++ b/docs/models/workflowexecutiontracesummaryattributesvalues.md
@@ -0,0 +1,29 @@
+# WorkflowExecutionTraceSummaryAttributesValues
+
+
+## Supported Types
+
+### `str`
+
+```python
+value: str = /* values here */
+```
+
+### `int`
+
+```python
+value: int = /* values here */
+```
+
+### `float`
+
+```python
+value: float = /* values here */
+```
+
+### `bool`
+
+```python
+value: bool = /* values here */
+```
+
diff --git a/docs/models/workflowexecutiontracesummaryresponse.md b/docs/models/workflowexecutiontracesummaryresponse.md
new file mode 100644
index 00000000..3c1ab0e5
--- /dev/null
+++ b/docs/models/workflowexecutiontracesummaryresponse.md
@@ -0,0 +1,17 @@
+# WorkflowExecutionTraceSummaryResponse
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------ |
+| `workflow_name` | *str* | :heavy_check_mark: | The name of the workflow |
+| `execution_id` | *str* | :heavy_check_mark: | The ID of the workflow execution |
+| `parent_execution_id` | *OptionalNullable[str]* | :heavy_minus_sign: | The parent execution ID of the workflow execution |
+| `root_execution_id` | *str* | :heavy_check_mark: | The root execution ID of the workflow execution |
+| `status` | [Nullable[models.WorkflowExecutionStatus]](../models/workflowexecutionstatus.md) | :heavy_check_mark: | The status of the workflow execution |
+| `start_time` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | The start time of the workflow execution |
+| `end_time` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | The end time of the workflow execution, if available |
+| `total_duration_ms` | *OptionalNullable[int]* | :heavy_minus_sign: | The total duration of the trace in milliseconds |
+| `result` | *Nullable[Any]* | :heavy_check_mark: | The result of the workflow execution, if available |
+| `span_tree` | [OptionalNullable[models.WorkflowExecutionTraceSummarySpan]](../models/workflowexecutiontracesummaryspan.md) | :heavy_minus_sign: | The root span of the trace |
\ No newline at end of file
diff --git a/docs/models/workflowexecutiontracesummaryspan.md b/docs/models/workflowexecutiontracesummaryspan.md
new file mode 100644
index 00000000..8ee487a2
--- /dev/null
+++ b/docs/models/workflowexecutiontracesummaryspan.md
@@ -0,0 +1,14 @@
+# WorkflowExecutionTraceSummarySpan
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------- |
+| `span_id` | *str* | :heavy_check_mark: | The ID of the span |
+| `name` | *str* | :heavy_check_mark: | The name of the span |
+| `start_time_unix_nano` | *int* | :heavy_check_mark: | The start time of the span in nanoseconds since the Unix epoch |
+| `end_time_unix_nano` | *Nullable[int]* | :heavy_check_mark: | The end time of the span in nanoseconds since the Unix epoch |
+| `attributes` | Dict[str, [Nullable[models.WorkflowExecutionTraceSummaryAttributesValues]](../models/workflowexecutiontracesummaryattributesvalues.md)] | :heavy_check_mark: | The attributes of the span |
+| `events` | List[[models.WorkflowExecutionTraceEvent](../models/workflowexecutiontraceevent.md)] | :heavy_check_mark: | The events of the span |
+| `children` | List[[models.WorkflowExecutionTraceSummarySpan](../models/workflowexecutiontracesummaryspan.md)] | :heavy_minus_sign: | The child spans of the span |
\ No newline at end of file
diff --git a/docs/models/workflowexecutionwithoutresultresponse.md b/docs/models/workflowexecutionwithoutresultresponse.md
new file mode 100644
index 00000000..f74414a6
--- /dev/null
+++ b/docs/models/workflowexecutionwithoutresultresponse.md
@@ -0,0 +1,15 @@
+# WorkflowExecutionWithoutResultResponse
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- |
+| `workflow_name` | *str* | :heavy_check_mark: | The name of the workflow |
+| `execution_id` | *str* | :heavy_check_mark: | The ID of the workflow execution |
+| `parent_execution_id` | *OptionalNullable[str]* | :heavy_minus_sign: | The parent execution ID of the workflow execution |
+| `root_execution_id` | *str* | :heavy_check_mark: | The root execution ID of the workflow execution |
+| `status` | [Nullable[models.WorkflowExecutionStatus]](../models/workflowexecutionstatus.md) | :heavy_check_mark: | The status of the workflow execution |
+| `start_time` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | The start time of the workflow execution |
+| `end_time` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | The end time of the workflow execution, if available |
+| `total_duration_ms` | *OptionalNullable[int]* | :heavy_minus_sign: | The total duration of the trace in milliseconds |
\ No newline at end of file
diff --git a/docs/models/workflowgetresponse.md b/docs/models/workflowgetresponse.md
new file mode 100644
index 00000000..ef9baa44
--- /dev/null
+++ b/docs/models/workflowgetresponse.md
@@ -0,0 +1,8 @@
+# WorkflowGetResponse
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ |
+| `workflow` | [models.WorkflowWithWorkerStatus](../models/workflowwithworkerstatus.md) | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/workflowlistresponse.md b/docs/models/workflowlistresponse.md
new file mode 100644
index 00000000..d36e48c6
--- /dev/null
+++ b/docs/models/workflowlistresponse.md
@@ -0,0 +1,9 @@
+# WorkflowListResponse
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- |
+| `workflows` | List[[models.WorkflowBasicDefinition](../models/workflowbasicdefinition.md)] | :heavy_check_mark: | A list of workflows |
+| `next_cursor` | *Nullable[str]* | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/workflowmetadata.md b/docs/models/workflowmetadata.md
new file mode 100644
index 00000000..c69cb08b
--- /dev/null
+++ b/docs/models/workflowmetadata.md
@@ -0,0 +1,8 @@
+# WorkflowMetadata
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- |
+| `shared_namespace` | *OptionalNullable[str]* | :heavy_minus_sign: | Namespace for shared workflows, None if user-owned |
\ No newline at end of file
diff --git a/docs/models/workflowmetrics.md b/docs/models/workflowmetrics.md
new file mode 100644
index 00000000..4936382b
--- /dev/null
+++ b/docs/models/workflowmetrics.md
@@ -0,0 +1,17 @@
+# WorkflowMetrics
+
+Complete metrics for a specific workflow.
+
+This type combines all metric categories.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- |
+| `execution_count` | [models.ScalarMetric](../models/scalarmetric.md) | :heavy_check_mark: | Scalar metric with a single value. |
+| `success_count` | [models.ScalarMetric](../models/scalarmetric.md) | :heavy_check_mark: | Scalar metric with a single value. |
+| `error_count` | [models.ScalarMetric](../models/scalarmetric.md) | :heavy_check_mark: | Scalar metric with a single value. |
+| `average_latency_ms` | [models.ScalarMetric](../models/scalarmetric.md) | :heavy_check_mark: | Scalar metric with a single value. |
+| `latency_over_time` | [models.TimeSeriesMetric](../models/timeseriesmetric.md) | :heavy_check_mark: | Time-series metric with timestamp-value pairs. |
+| `retry_rate` | [models.ScalarMetric](../models/scalarmetric.md) | :heavy_check_mark: | Scalar metric with a single value. |
\ No newline at end of file
diff --git a/docs/models/workflowregistration.md b/docs/models/workflowregistration.md
new file mode 100644
index 00000000..aad6831a
--- /dev/null
+++ b/docs/models/workflowregistration.md
@@ -0,0 +1,13 @@
+# WorkflowRegistration
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- |
+| `id` | *str* | :heavy_check_mark: | Unique identifier of the workflow registration |
+| `task_queue` | *str* | :heavy_check_mark: | Project name of the workflow |
+| `definition` | [models.WorkflowCodeDefinition](../models/workflowcodedefinition.md) | :heavy_check_mark: | N/A |
+| `workflow_id` | *str* | :heavy_check_mark: | Workflow ID of the workflow |
+| `workflow` | [OptionalNullable[models.Workflow]](../models/workflow.md) | :heavy_minus_sign: | Workflow of the workflow registration |
+| `compatible_with_chat_assistant` | *Optional[bool]* | :heavy_minus_sign: | Whether the workflow is compatible with chat assistant |
\ No newline at end of file
diff --git a/docs/models/workflowregistrationgetresponse.md b/docs/models/workflowregistrationgetresponse.md
new file mode 100644
index 00000000..c524fc68
--- /dev/null
+++ b/docs/models/workflowregistrationgetresponse.md
@@ -0,0 +1,9 @@
+# WorkflowRegistrationGetResponse
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ |
+| `workflow_registration` | [models.WorkflowRegistrationWithWorkerStatus](../models/workflowregistrationwithworkerstatus.md) | :heavy_check_mark: | N/A |
+| `workflow_version` | [models.WorkflowRegistrationWithWorkerStatus](../models/workflowregistrationwithworkerstatus.md) | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/workflowregistrationlistresponse.md b/docs/models/workflowregistrationlistresponse.md
new file mode 100644
index 00000000..e37b3671
--- /dev/null
+++ b/docs/models/workflowregistrationlistresponse.md
@@ -0,0 +1,10 @@
+# WorkflowRegistrationListResponse
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- |
+| `workflow_registrations` | List[[models.WorkflowRegistration](../models/workflowregistration.md)] | :heavy_check_mark: | A list of workflow registrations |
+| `next_cursor` | *Nullable[str]* | :heavy_check_mark: | N/A |
+| `workflow_versions` | List[[models.WorkflowRegistration](../models/workflowregistration.md)] | :heavy_check_mark: | Deprecated: use workflow_registrations |
\ No newline at end of file
diff --git a/docs/models/workflowregistrationwithworkerstatus.md b/docs/models/workflowregistrationwithworkerstatus.md
new file mode 100644
index 00000000..625871c1
--- /dev/null
+++ b/docs/models/workflowregistrationwithworkerstatus.md
@@ -0,0 +1,14 @@
+# WorkflowRegistrationWithWorkerStatus
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- |
+| `id` | *str* | :heavy_check_mark: | Unique identifier of the workflow registration |
+| `task_queue` | *str* | :heavy_check_mark: | Project name of the workflow |
+| `definition` | [models.WorkflowCodeDefinition](../models/workflowcodedefinition.md) | :heavy_check_mark: | N/A |
+| `workflow_id` | *str* | :heavy_check_mark: | Workflow ID of the workflow |
+| `workflow` | [OptionalNullable[models.Workflow]](../models/workflow.md) | :heavy_minus_sign: | Workflow of the workflow registration |
+| `compatible_with_chat_assistant` | *Optional[bool]* | :heavy_minus_sign: | Whether the workflow is compatible with chat assistant |
+| `active` | *bool* | :heavy_check_mark: | Whether the workflow registration is active |
\ No newline at end of file
diff --git a/docs/models/workflowschedulelistresponse.md b/docs/models/workflowschedulelistresponse.md
new file mode 100644
index 00000000..7142185b
--- /dev/null
+++ b/docs/models/workflowschedulelistresponse.md
@@ -0,0 +1,8 @@
+# WorkflowScheduleListResponse
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ |
+| `schedules` | List[[models.ScheduleDefinitionOutput](../models/scheduledefinitionoutput.md)] | :heavy_check_mark: | A list of workflow schedules |
\ No newline at end of file
diff --git a/docs/models/workflowschedulerequest.md b/docs/models/workflowschedulerequest.md
new file mode 100644
index 00000000..a6255c75
--- /dev/null
+++ b/docs/models/workflowschedulerequest.md
@@ -0,0 +1,14 @@
+# WorkflowScheduleRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `schedule` | [models.ScheduleDefinition](../models/scheduledefinition.md) | :heavy_check_mark: | Specification of the times scheduled actions may occur.
The times are the union of :py:attr:`calendars`, :py:attr:`intervals`, and
:py:attr:`cron_expressions` excluding anything in :py:attr:`skip`.
Used for input where schedule_id is optional (can be provided or auto-generated). |
+| `workflow_registration_id` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the workflow registration to schedule |
+| `workflow_version_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Deprecated: use workflow_registration_id |
+| `workflow_identifier` | *OptionalNullable[str]* | :heavy_minus_sign: | The name or ID of the workflow to schedule |
+| ~~`workflow_task_queue`~~ | *OptionalNullable[str]* | :heavy_minus_sign: | : warning: ** DEPRECATED **: This will be removed in a future release, please migrate away from it as soon as possible.
Deprecated. Use deployment_name instead. |
+| `schedule_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Allows you to specify a custom schedule ID. If not provided, a random ID will be generated. |
+| `deployment_name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name of the deployment to route this schedule to |
\ No newline at end of file
diff --git a/docs/models/workflowscheduleresponse.md b/docs/models/workflowscheduleresponse.md
new file mode 100644
index 00000000..96d7ced7
--- /dev/null
+++ b/docs/models/workflowscheduleresponse.md
@@ -0,0 +1,8 @@
+# WorkflowScheduleResponse
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------- | ---------------------- | ---------------------- | ---------------------- |
+| `schedule_id` | *str* | :heavy_check_mark: | The ID of the schedule |
\ No newline at end of file
diff --git a/docs/models/workflowtaskfailedattributes.md b/docs/models/workflowtaskfailedattributes.md
new file mode 100644
index 00000000..c6515f84
--- /dev/null
+++ b/docs/models/workflowtaskfailedattributes.md
@@ -0,0 +1,11 @@
+# WorkflowTaskFailedAttributes
+
+Attributes for workflow task failed events.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- |
+| `task_id` | *str* | :heavy_check_mark: | Unique identifier for the task within the workflow execution. |
+| `failure` | [models.Failure](../models/failure.md) | :heavy_check_mark: | Represents an error or exception that occurred during execution. |
\ No newline at end of file
diff --git a/docs/models/workflowtaskfailedrequest.md b/docs/models/workflowtaskfailedrequest.md
new file mode 100644
index 00000000..a39cd97c
--- /dev/null
+++ b/docs/models/workflowtaskfailedrequest.md
@@ -0,0 +1,21 @@
+# WorkflowTaskFailedRequest
+
+Emitted when a workflow task fails.
+
+This indicates an error occurred during workflow task execution,
+which may trigger a retry depending on configuration.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *Optional[int]* | :heavy_minus_sign: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["WORKFLOW_TASK_FAILED"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.WorkflowTaskFailedAttributes](../models/workflowtaskfailedattributes.md) | :heavy_check_mark: | Attributes for workflow task failed events. |
\ No newline at end of file
diff --git a/docs/models/workflowtaskfailedresponse.md b/docs/models/workflowtaskfailedresponse.md
new file mode 100644
index 00000000..731ed219
--- /dev/null
+++ b/docs/models/workflowtaskfailedresponse.md
@@ -0,0 +1,21 @@
+# WorkflowTaskFailedResponse
+
+Emitted when a workflow task fails.
+
+This indicates an error occurred during workflow task execution,
+which may trigger a retry depending on configuration.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *int* | :heavy_check_mark: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *Nullable[str]* | :heavy_check_mark: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["WORKFLOW_TASK_FAILED"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.WorkflowTaskFailedAttributes](../models/workflowtaskfailedattributes.md) | :heavy_check_mark: | Attributes for workflow task failed events. |
\ No newline at end of file
diff --git a/docs/models/workflowtasktimedoutattributes.md b/docs/models/workflowtasktimedoutattributes.md
new file mode 100644
index 00000000..7f298ea2
--- /dev/null
+++ b/docs/models/workflowtasktimedoutattributes.md
@@ -0,0 +1,11 @@
+# WorkflowTaskTimedOutAttributes
+
+Attributes for workflow task timed out events.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- |
+| `task_id` | *str* | :heavy_check_mark: | Unique identifier for the task within the workflow execution. |
+| `timeout_type` | *OptionalNullable[str]* | :heavy_minus_sign: | The type of timeout that occurred (e.g., 'START_TO_CLOSE', 'SCHEDULE_TO_START'). |
\ No newline at end of file
diff --git a/docs/models/workflowtasktimedoutrequest.md b/docs/models/workflowtasktimedoutrequest.md
new file mode 100644
index 00000000..6c87b8ce
--- /dev/null
+++ b/docs/models/workflowtasktimedoutrequest.md
@@ -0,0 +1,21 @@
+# WorkflowTaskTimedOutRequest
+
+Emitted when a workflow task times out.
+
+This indicates the workflow task (a unit of workflow execution) exceeded
+its configured timeout.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *Optional[int]* | :heavy_minus_sign: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["WORKFLOW_TASK_TIMED_OUT"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.WorkflowTaskTimedOutAttributes](../models/workflowtasktimedoutattributes.md) | :heavy_check_mark: | Attributes for workflow task timed out events. |
\ No newline at end of file
diff --git a/docs/models/workflowtasktimedoutresponse.md b/docs/models/workflowtasktimedoutresponse.md
new file mode 100644
index 00000000..ea3edade
--- /dev/null
+++ b/docs/models/workflowtasktimedoutresponse.md
@@ -0,0 +1,21 @@
+# WorkflowTaskTimedOutResponse
+
+Emitted when a workflow task times out.
+
+This indicates the workflow task (a unit of workflow execution) exceeded
+its configured timeout.
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
+| `event_id` | *str* | :heavy_check_mark: | Unique identifier for this event instance. |
+| `event_timestamp` | *int* | :heavy_check_mark: | Unix timestamp in nanoseconds when the event was created. |
+| `root_workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the root workflow that initiated this execution chain. |
+| `parent_workflow_exec_id` | *Nullable[str]* | :heavy_check_mark: | Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set. |
+| `workflow_exec_id` | *str* | :heavy_check_mark: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *str* | :heavy_check_mark: | Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same. |
+| `workflow_name` | *str* | :heavy_check_mark: | The registered name of the workflow that emitted this event. |
+| `event_type` | *Optional[Literal["WORKFLOW_TASK_TIMED_OUT"]]* | :heavy_minus_sign: | Event type discriminator. |
+| `attributes` | [models.WorkflowTaskTimedOutAttributes](../models/workflowtasktimedoutattributes.md) | :heavy_check_mark: | Attributes for workflow task timed out events. |
\ No newline at end of file
diff --git a/docs/models/workflowtype.md b/docs/models/workflowtype.md
new file mode 100644
index 00000000..93aa31f1
--- /dev/null
+++ b/docs/models/workflowtype.md
@@ -0,0 +1,13 @@
+# WorkflowType
+
+## Example Usage
+
+```python
+from mistralai.client.models import WorkflowType
+value: WorkflowType = "code"
+```
+
+
+## Values
+
+- `"code"`
diff --git a/docs/models/workflowunarchiveresponse.md b/docs/models/workflowunarchiveresponse.md
new file mode 100644
index 00000000..667c9387
--- /dev/null
+++ b/docs/models/workflowunarchiveresponse.md
@@ -0,0 +1,8 @@
+# WorkflowUnarchiveResponse
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- |
+| `workflow` | [models.Workflow](../models/workflow.md) | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/workflowupdaterequest.md b/docs/models/workflowupdaterequest.md
new file mode 100644
index 00000000..f8ce41ed
--- /dev/null
+++ b/docs/models/workflowupdaterequest.md
@@ -0,0 +1,10 @@
+# WorkflowUpdateRequest
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ |
+| `display_name` | *OptionalNullable[str]* | :heavy_minus_sign: | New display name value |
+| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | New description value |
+| `available_in_chat_assistant` | *OptionalNullable[bool]* | :heavy_minus_sign: | Whether to make the workflow available in the chat assistant |
\ No newline at end of file
diff --git a/docs/models/workflowupdateresponse.md b/docs/models/workflowupdateresponse.md
new file mode 100644
index 00000000..68faf7c0
--- /dev/null
+++ b/docs/models/workflowupdateresponse.md
@@ -0,0 +1,8 @@
+# WorkflowUpdateResponse
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- |
+| `workflow` | [models.Workflow](../models/workflow.md) | :heavy_check_mark: | N/A |
\ No newline at end of file
diff --git a/docs/models/workflowwithworkerstatus.md b/docs/models/workflowwithworkerstatus.md
new file mode 100644
index 00000000..ac6efb9c
--- /dev/null
+++ b/docs/models/workflowwithworkerstatus.md
@@ -0,0 +1,19 @@
+# WorkflowWithWorkerStatus
+
+
+## Fields
+
+| Field | Type | Required | Description |
+| --------------------------------------------------------------------------- | --------------------------------------------------------------------------- | --------------------------------------------------------------------------- | --------------------------------------------------------------------------- |
+| `id` | *str* | :heavy_check_mark: | Unique identifier of the workflow |
+| `name` | *str* | :heavy_check_mark: | Name of the workflow |
+| `display_name` | *str* | :heavy_check_mark: | Display name of the workflow |
+| `type` | [models.WorkflowType](../models/workflowtype.md) | :heavy_check_mark: | N/A |
+| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the workflow |
+| `customer_id` | *str* | :heavy_check_mark: | Customer ID of the workflow |
+| `workspace_id` | *str* | :heavy_check_mark: | Workspace ID of the workflow |
+| `shared_namespace` | *OptionalNullable[str]* | :heavy_minus_sign: | Reserved namespace for shared workflows (e.g., 'shared:my-shared-workflow') |
+| `available_in_chat_assistant` | *Optional[bool]* | :heavy_minus_sign: | Whether the workflow is available in chat assistant |
+| `is_technical` | *Optional[bool]* | :heavy_minus_sign: | Whether the workflow is technical (e.g. SDK-managed) |
+| `archived` | *Optional[bool]* | :heavy_minus_sign: | Whether the workflow is archived |
+| `active` | *bool* | :heavy_check_mark: | Whether the workflow is active |
\ No newline at end of file
diff --git a/docs/sdks/batchjobs/README.md b/docs/sdks/batchjobs/README.md
index 3633fe4e..3e082340 100644
--- a/docs/sdks/batchjobs/README.md
+++ b/docs/sdks/batchjobs/README.md
@@ -7,6 +7,7 @@
* [list](#list) - Get Batch Jobs
* [create](#create) - Create Batch Job
* [get](#get) - Get Batch Job
+* [delete](#delete) - Delete Batch Job
* [cancel](#cancel) - Cancel Batch Job
## list
@@ -147,6 +148,46 @@ with Mistral(
| --------------- | --------------- | --------------- |
| errors.SDKError | 4XX, 5XX | \*/\* |
+## delete
+
+Request the deletion of a batch job.
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.batch.jobs.delete(job_id="d9e71426-5791-49ad-b8d1-cf0d90d1b7d0")
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- |
+| `job_id` | *str* | :heavy_check_mark: | N/A |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.DeleteBatchJobResponse](../../models/deletebatchjobresponse.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| --------------- | --------------- | --------------- |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
## cancel
Request the cancellation of a batch job.
diff --git a/docs/sdks/connectors/README.md b/docs/sdks/connectors/README.md
index 99892188..f4a0a097 100644
--- a/docs/sdks/connectors/README.md
+++ b/docs/sdks/connectors/README.md
@@ -177,6 +177,7 @@ with Mistral(
| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- |
| `tool_name` | *str* | :heavy_check_mark: | N/A |
| `connector_id_or_name` | *str* | :heavy_check_mark: | N/A |
+| `credentials_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A |
| `arguments` | Dict[str, *Any*] | :heavy_minus_sign: | N/A |
| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
@@ -227,7 +228,7 @@ with Mistral(
### Response
-**[models.ResponseConnectorListToolsV12](../../models/responseconnectorlisttoolsv12.md)**
+**[models.ResponseConnectorListToolsV1](../../models/responseconnectorlisttoolsv1.md)**
### Errors
diff --git a/docs/sdks/deployments/README.md b/docs/sdks/deployments/README.md
new file mode 100644
index 00000000..941b5538
--- /dev/null
+++ b/docs/sdks/deployments/README.md
@@ -0,0 +1,91 @@
+# Workflows.Deployments
+
+## Overview
+
+### Available Operations
+
+* [list_deployments](#list_deployments) - List Deployments
+* [get_deployment](#get_deployment) - Get Deployment
+
+## list_deployments
+
+List Deployments
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.deployments.list_deployments(active_only=True)
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- |
+| `active_only` | *Optional[bool]* | :heavy_minus_sign: | N/A |
+| `workflow_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.DeploymentListResponse](../../models/deploymentlistresponse.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## get_deployment
+
+Get Deployment
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.deployments.get_deployment(name="")
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- |
+| `name` | *str* | :heavy_check_mark: | N/A |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.DeploymentDetailResponse](../../models/deploymentdetailresponse.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
\ No newline at end of file
diff --git a/docs/sdks/events/README.md b/docs/sdks/events/README.md
new file mode 100644
index 00000000..1de37e5c
--- /dev/null
+++ b/docs/sdks/events/README.md
@@ -0,0 +1,230 @@
+# Events
+
+## Overview
+
+### Available Operations
+
+* [receive_workflow_event](#receive_workflow_event) - Receive Workflow Event
+* [receive_workflow_events_batch](#receive_workflow_events_batch) - Receive Workflow Events Batch
+* [get_stream_events](#get_stream_events) - Get Stream Events
+* [get_workflow_events](#get_workflow_events) - Get Workflow Events
+
+## receive_workflow_event
+
+Receive workflow events from workers.
+
+Events are published to NATS for real-time streaming and persisted in the database.
+
+For shared workers, the actual execution owner is resolved from the execution record,
+ensuring events are streamed to the correct user's namespace.
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.events.receive_workflow_event(event={
+ "event_id": "",
+ "root_workflow_exec_id": "",
+ "workflow_exec_id": "",
+ "workflow_run_id": "",
+ "workflow_name": "",
+ "event_type": "CUSTOM_TASK_STARTED",
+ "attributes": {
+ "custom_task_id": "",
+ "custom_task_type": "",
+ },
+ })
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- |
+| `event` | [models.WorkflowEventRequestEvent](../../models/workfloweventrequestevent.md) | :heavy_check_mark: | The workflow event payload. |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.WorkflowEventResponse](../../models/workfloweventresponse.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## receive_workflow_events_batch
+
+Receive multiple workflow events from workers in a single batch.
+
+Events are published to NATS for real-time streaming and persisted in the database.
+This endpoint processes events sequentially to maintain ordering guarantees.
+
+For shared workers, the actual execution owner is resolved from the execution record,
+ensuring events are streamed to the correct user's namespace.
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.events.receive_workflow_events_batch(events=[
+ {
+ "event_id": "",
+ "root_workflow_exec_id": "",
+ "workflow_exec_id": "",
+ "workflow_run_id": "",
+ "workflow_name": "",
+ "event_type": "WORKFLOW_EXECUTION_STARTED",
+ "attributes": {
+ "task_id": "",
+ "workflow_name": "",
+ "input": {
+ "type": "json",
+ "value": "",
+ },
+ },
+ },
+ ])
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- |
+| `events` | List[[models.WorkflowEventBatchRequestEvent](../../models/workfloweventbatchrequestevent.md)] | :heavy_check_mark: | List of workflow events to send. |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.WorkflowEventBatchResponse](../../models/workfloweventbatchresponse.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## get_stream_events
+
+Get Stream Events
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.events.get_stream_events(scope="*", activity_name="*", activity_id="*", workflow_name="*", workflow_exec_id="*", root_workflow_exec_id="*", parent_workflow_exec_id="*", stream="*", start_seq=0)
+
+ with res as event_stream:
+ for event in event_stream:
+ # handle event
+ print(event, flush=True)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- |
+| `scope` | [Optional[models.Scope]](../../models/scope.md) | :heavy_minus_sign: | N/A |
+| `activity_name` | *Optional[str]* | :heavy_minus_sign: | N/A |
+| `activity_id` | *Optional[str]* | :heavy_minus_sign: | N/A |
+| `workflow_name` | *Optional[str]* | :heavy_minus_sign: | N/A |
+| `workflow_exec_id` | *Optional[str]* | :heavy_minus_sign: | N/A |
+| `root_workflow_exec_id` | *Optional[str]* | :heavy_minus_sign: | N/A |
+| `parent_workflow_exec_id` | *Optional[str]* | :heavy_minus_sign: | N/A |
+| `stream` | *Optional[str]* | :heavy_minus_sign: | N/A |
+| `start_seq` | *Optional[int]* | :heavy_minus_sign: | N/A |
+| `metadata_filters` | Dict[str, *Any*] | :heavy_minus_sign: | N/A |
+| `workflow_event_types` | List[[models.WorkflowEventType](../../models/workfloweventtype.md)] | :heavy_minus_sign: | N/A |
+| `last_event_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[Union[eventstreaming.EventStream[models.GetStreamEventsV1WorkflowsEventsStreamGetResponseBody], eventstreaming.EventStreamAsync[models.GetStreamEventsV1WorkflowsEventsStreamGetResponseBody]]](../../models/.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## get_workflow_events
+
+Get Workflow Events
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.events.get_workflow_events(limit=100)
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- |
+| `root_workflow_exec_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Execution ID of the root workflow that initiated this execution chain. |
+| `workflow_exec_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Run ID of the workflow that emitted this event. |
+| `limit` | *Optional[int]* | :heavy_minus_sign: | Maximum number of events to return. |
+| `cursor` | *OptionalNullable[str]* | :heavy_minus_sign: | Cursor for pagination. |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.ListWorkflowEventResponse](../../models/listworkfloweventresponse.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
\ No newline at end of file
diff --git a/docs/sdks/executions/README.md b/docs/sdks/executions/README.md
new file mode 100644
index 00000000..a2f4aeb0
--- /dev/null
+++ b/docs/sdks/executions/README.md
@@ -0,0 +1,598 @@
+# Workflows.Executions
+
+## Overview
+
+### Available Operations
+
+* [get_workflow_execution](#get_workflow_execution) - Get Workflow Execution
+* [get_workflow_execution_history](#get_workflow_execution_history) - Get Workflow Execution History
+* [signal_workflow_execution](#signal_workflow_execution) - Signal Workflow Execution
+* [query_workflow_execution](#query_workflow_execution) - Query Workflow Execution
+* [terminate_workflow_execution](#terminate_workflow_execution) - Terminate Workflow Execution
+* [batch_terminate_workflow_executions](#batch_terminate_workflow_executions) - Batch Terminate Workflow Executions
+* [cancel_workflow_execution](#cancel_workflow_execution) - Cancel Workflow Execution
+* [batch_cancel_workflow_executions](#batch_cancel_workflow_executions) - Batch Cancel Workflow Executions
+* [reset_workflow](#reset_workflow) - Reset Workflow
+* [update_workflow_execution](#update_workflow_execution) - Update Workflow Execution
+* [get_workflow_execution_trace_otel](#get_workflow_execution_trace_otel) - Get Workflow Execution Trace Otel
+* [get_workflow_execution_trace_summary](#get_workflow_execution_trace_summary) - Get Workflow Execution Trace Summary
+* [get_workflow_execution_trace_events](#get_workflow_execution_trace_events) - Get Workflow Execution Trace Events
+* [stream](#stream) - Stream
+
+## get_workflow_execution
+
+Get Workflow Execution
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.executions.get_workflow_execution(execution_id="")
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- |
+| `execution_id` | *str* | :heavy_check_mark: | N/A |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.WorkflowExecutionResponse](../../models/workflowexecutionresponse.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## get_workflow_execution_history
+
+Get Workflow Execution History
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.executions.get_workflow_execution_history(execution_id="")
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- |
+| `execution_id` | *str* | :heavy_check_mark: | N/A |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[Any](../../models/.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## signal_workflow_execution
+
+Signal Workflow Execution
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.executions.signal_workflow_execution(execution_id="", name="")
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ----------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- |
+| `execution_id` | *str* | :heavy_check_mark: | N/A |
+| `name` | *str* | :heavy_check_mark: | The name of the signal to send |
+| `input` | [OptionalNullable[models.SignalInvocationBodyInput]](../../models/signalinvocationbodyinput.md) | :heavy_minus_sign: | Input data for the signal, matching its schema |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.SignalWorkflowResponse](../../models/signalworkflowresponse.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## query_workflow_execution
+
+Query Workflow Execution
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.executions.query_workflow_execution(execution_id="", name="")
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- |
+| `execution_id` | *str* | :heavy_check_mark: | N/A |
+| `name` | *str* | :heavy_check_mark: | The name of the query to request |
+| `input` | [OptionalNullable[models.QueryInvocationBodyInput]](../../models/queryinvocationbodyinput.md) | :heavy_minus_sign: | Input data for the query, matching its schema |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.QueryWorkflowResponse](../../models/queryworkflowresponse.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## terminate_workflow_execution
+
+Terminate Workflow Execution
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ mistral.workflows.executions.terminate_workflow_execution(execution_id="")
+
+ # Use the SDK ...
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- |
+| `execution_id` | *str* | :heavy_check_mark: | N/A |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## batch_terminate_workflow_executions
+
+Batch Terminate Workflow Executions
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.executions.batch_terminate_workflow_executions(execution_ids=[
+ "",
+ "",
+ ])
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- |
+| `execution_ids` | List[*str*] | :heavy_check_mark: | List of execution IDs to process |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.BatchExecutionResponse](../../models/batchexecutionresponse.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## cancel_workflow_execution
+
+Cancel Workflow Execution
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ mistral.workflows.executions.cancel_workflow_execution(execution_id="")
+
+ # Use the SDK ...
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- |
+| `execution_id` | *str* | :heavy_check_mark: | N/A |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## batch_cancel_workflow_executions
+
+Batch Cancel Workflow Executions
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.executions.batch_cancel_workflow_executions(execution_ids=[])
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- |
+| `execution_ids` | List[*str*] | :heavy_check_mark: | List of execution IDs to process |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.BatchExecutionResponse](../../models/batchexecutionresponse.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## reset_workflow
+
+Reset Workflow
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ mistral.workflows.executions.reset_workflow(execution_id="", event_id=24149, exclude_signals=False, exclude_updates=False)
+
+ # Use the SDK ...
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- |
+| `execution_id` | *str* | :heavy_check_mark: | N/A |
+| `event_id` | *int* | :heavy_check_mark: | The event ID to reset the workflow execution to |
+| `reason` | *OptionalNullable[str]* | :heavy_minus_sign: | Reason for resetting the workflow execution |
+| `exclude_signals` | *Optional[bool]* | :heavy_minus_sign: | Whether to exclude signals that happened after the reset point |
+| `exclude_updates` | *Optional[bool]* | :heavy_minus_sign: | Whether to exclude updates that happened after the reset point |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## update_workflow_execution
+
+Update Workflow Execution
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.executions.update_workflow_execution(execution_id="", name="")
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ----------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- |
+| `execution_id` | *str* | :heavy_check_mark: | N/A |
+| `name` | *str* | :heavy_check_mark: | The name of the update to request |
+| `input` | [OptionalNullable[models.UpdateInvocationBodyInput]](../../models/updateinvocationbodyinput.md) | :heavy_minus_sign: | Input data for the update, matching its schema |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.UpdateWorkflowResponse](../../models/updateworkflowresponse.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## get_workflow_execution_trace_otel
+
+Get Workflow Execution Trace Otel
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.executions.get_workflow_execution_trace_otel(execution_id="")
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- |
+| `execution_id` | *str* | :heavy_check_mark: | N/A |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.WorkflowExecutionTraceOTelResponse](../../models/workflowexecutiontraceotelresponse.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## get_workflow_execution_trace_summary
+
+Get Workflow Execution Trace Summary
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.executions.get_workflow_execution_trace_summary(execution_id="")
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- |
+| `execution_id` | *str* | :heavy_check_mark: | N/A |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.WorkflowExecutionTraceSummaryResponse](../../models/workflowexecutiontracesummaryresponse.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## get_workflow_execution_trace_events
+
+Get Workflow Execution Trace Events
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.executions.get_workflow_execution_trace_events(execution_id="", merge_same_id_events=False, include_internal_events=False)
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- |
+| `execution_id` | *str* | :heavy_check_mark: | N/A |
+| `merge_same_id_events` | *Optional[bool]* | :heavy_minus_sign: | N/A |
+| `include_internal_events` | *Optional[bool]* | :heavy_minus_sign: | N/A |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.WorkflowExecutionTraceEventsResponse](../../models/workflowexecutiontraceeventsresponse.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## stream
+
+Stream
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.executions.stream(execution_id="")
+
+ with res as event_stream:
+ for event in event_stream:
+ # handle event
+ print(event, flush=True)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- |
+| `execution_id` | *str* | :heavy_check_mark: | N/A |
+| `event_source` | [OptionalNullable[models.EventSource]](../../models/eventsource.md) | :heavy_minus_sign: | N/A |
+| `last_event_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[Union[eventstreaming.EventStream[models.StreamV1WorkflowsExecutionsExecutionIDStreamGetResponseBody], eventstreaming.EventStreamAsync[models.StreamV1WorkflowsExecutionsExecutionIDStreamGetResponseBody]]](../../models/.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
\ No newline at end of file
diff --git a/docs/sdks/libraries/README.md b/docs/sdks/libraries/README.md
index 6a514e1f..25cd04b2 100644
--- a/docs/sdks/libraries/README.md
+++ b/docs/sdks/libraries/README.md
@@ -28,7 +28,7 @@ with Mistral(
api_key=os.getenv("MISTRAL_API_KEY", ""),
) as mistral:
- res = mistral.beta.libraries.list()
+ res = mistral.beta.libraries.list(page_size=100, page=0)
# Handle response
print(res)
@@ -39,6 +39,8 @@ with Mistral(
| Parameter | Type | Required | Description |
| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- |
+| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A |
+| `page` | *Optional[int]* | :heavy_minus_sign: | N/A |
| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
### Response
@@ -47,9 +49,10 @@ with Mistral(
### Errors
-| Error Type | Status Code | Content Type |
-| --------------- | --------------- | --------------- |
-| errors.SDKError | 4XX, 5XX | \*/\* |
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
## create
diff --git a/docs/sdks/metrics/README.md b/docs/sdks/metrics/README.md
new file mode 100644
index 00000000..08a5ccb9
--- /dev/null
+++ b/docs/sdks/metrics/README.md
@@ -0,0 +1,69 @@
+# Workflows.Metrics
+
+## Overview
+
+### Available Operations
+
+* [get_workflow_metrics](#get_workflow_metrics) - Get Workflow Metrics
+
+## get_workflow_metrics
+
+Get comprehensive metrics for a specific workflow.
+
+Args:
+ workflow_name: The name of the workflow type to get metrics for
+ start_time: Optional start time filter (ISO 8601 format)
+ end_time: Optional end time filter (ISO 8601 format)
+
+Returns:
+ WorkflowMetrics: Dictionary containing metrics:
+ - execution_count: Total number of executions
+ - success_count: Number of successful executions
+ - error_count: Number of failed/terminated executions
+ - average_latency_ms: Average execution duration in milliseconds
+ - retry_rate: Proportion of workflows with retries
+ - latency_over_time: Time-series data of execution durations
+
+Example:
+ GET /v1/workflows/MyWorkflow/metrics
+ GET /v1/workflows/MyWorkflow/metrics?start_time=2025-01-01T00:00:00Z
+ GET /v1/workflows/MyWorkflow/metrics?start_time=2025-01-01T00:00:00Z&end_time=2025-12-31T23:59:59Z
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.metrics.get_workflow_metrics(workflow_name="")
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- |
+| `workflow_name` | *str* | :heavy_check_mark: | N/A |
+| `start_time` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | Filter workflows started after this time (ISO 8601) |
+| `end_time` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | Filter workflows started before this time (ISO 8601) |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.WorkflowMetrics](../../models/workflowmetrics.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
\ No newline at end of file
diff --git a/docs/sdks/runs/README.md b/docs/sdks/runs/README.md
new file mode 100644
index 00000000..bd786544
--- /dev/null
+++ b/docs/sdks/runs/README.md
@@ -0,0 +1,138 @@
+# Workflows.Runs
+
+## Overview
+
+### Available Operations
+
+* [list_runs](#list_runs) - List Runs
+* [get_run](#get_run) - Get Run
+* [get_run_history](#get_run_history) - Get Run History
+
+## list_runs
+
+List Runs
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.runs.list_runs(page_size=50)
+
+ while res is not None:
+ # Handle items
+
+ res = res.next()
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- |
+| `workflow_identifier` | *OptionalNullable[str]* | :heavy_minus_sign: | Filter by workflow name or id |
+| `search` | *OptionalNullable[str]* | :heavy_minus_sign: | Search by workflow name, display name or id |
+| `status` | [OptionalNullable[models.ListRunsV1WorkflowsRunsGetStatus]](../../models/listrunsv1workflowsrunsgetstatus.md) | :heavy_minus_sign: | Filter by workflow status |
+| `page_size` | *Optional[int]* | :heavy_minus_sign: | Number of items per page |
+| `next_page_token` | *OptionalNullable[str]* | :heavy_minus_sign: | Token for the next page of results |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.ListRunsV1WorkflowsRunsGetResponse](../../models/listrunsv1workflowsrunsgetresponse.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## get_run
+
+Get Run
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.runs.get_run(run_id="553b071e-3d04-46aa-aa9a-0fca61dc60fa")
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- |
+| `run_id` | *str* | :heavy_check_mark: | N/A |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.WorkflowExecutionResponse](../../models/workflowexecutionresponse.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## get_run_history
+
+Get Run History
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.runs.get_run_history(run_id="f7296489-0212-4239-9e35-12fabfe8cd11")
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- |
+| `run_id` | *str* | :heavy_check_mark: | N/A |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[Any](../../models/.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
\ No newline at end of file
diff --git a/docs/sdks/schedules/README.md b/docs/sdks/schedules/README.md
new file mode 100644
index 00000000..dd1565e0
--- /dev/null
+++ b/docs/sdks/schedules/README.md
@@ -0,0 +1,133 @@
+# Workflows.Schedules
+
+## Overview
+
+### Available Operations
+
+* [get_schedules](#get_schedules) - Get Schedules
+* [schedule_workflow](#schedule_workflow) - Schedule Workflow
+* [unschedule_workflow](#unschedule_workflow) - Unschedule Workflow
+
+## get_schedules
+
+Get Schedules
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.schedules.get_schedules()
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.WorkflowScheduleListResponse](../../models/workflowschedulelistresponse.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| --------------- | --------------- | --------------- |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## schedule_workflow
+
+Schedule Workflow
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.schedules.schedule_workflow(schedule={
+ "input": "",
+ })
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `schedule` | [models.ScheduleDefinition](../../models/scheduledefinition.md) | :heavy_check_mark: | Specification of the times scheduled actions may occur.
The times are the union of :py:attr:`calendars`, :py:attr:`intervals`, and
:py:attr:`cron_expressions` excluding anything in :py:attr:`skip`.
Used for input where schedule_id is optional (can be provided or auto-generated). |
+| `workflow_registration_id` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the workflow registration to schedule |
+| `workflow_version_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Deprecated: use workflow_registration_id |
+| `workflow_identifier` | *OptionalNullable[str]* | :heavy_minus_sign: | The name or ID of the workflow to schedule |
+| `workflow_task_queue` | *OptionalNullable[str]* | :heavy_minus_sign: | : warning: ** DEPRECATED **: This will be removed in a future release, please migrate away from it as soon as possible.
Deprecated. Use deployment_name instead. |
+| `schedule_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Allows you to specify a custom schedule ID. If not provided, a random ID will be generated. |
+| `deployment_name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name of the deployment to route this schedule to |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.WorkflowScheduleResponse](../../models/workflowscheduleresponse.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## unschedule_workflow
+
+Unschedule Workflow
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ mistral.workflows.schedules.unschedule_workflow(schedule_id="")
+
+ # Use the SDK ...
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- |
+| `schedule_id` | *str* | :heavy_check_mark: | N/A |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
\ No newline at end of file
diff --git a/docs/sdks/voices/README.md b/docs/sdks/voices/README.md
index 3383e617..3b4d135f 100644
--- a/docs/sdks/voices/README.md
+++ b/docs/sdks/voices/README.md
@@ -27,7 +27,7 @@ with Mistral(
api_key=os.getenv("MISTRAL_API_KEY", ""),
) as mistral:
- res = mistral.audio.voices.list(limit=10, offset=0)
+ res = mistral.audio.voices.list(limit=10, offset=0, type_="all")
# Handle response
print(res)
@@ -36,11 +36,12 @@ with Mistral(
### Parameters
-| Parameter | Type | Required | Description |
-| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- |
-| `limit` | *Optional[int]* | :heavy_minus_sign: | Maximum number of voices to return |
-| `offset` | *Optional[int]* | :heavy_minus_sign: | Offset for pagination |
-| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+| Parameter | Type | Required | Description |
+| ------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------- |
+| `limit` | *Optional[int]* | :heavy_minus_sign: | Maximum number of voices to return |
+| `offset` | *Optional[int]* | :heavy_minus_sign: | Offset for pagination |
+| `type` | [Optional[models.ListVoicesV1AudioVoicesGetType]](../../models/listvoicesv1audiovoicesgettype.md) | :heavy_minus_sign: | Filter the voices between customs and presets |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
### Response
diff --git a/docs/sdks/workflows/README.md b/docs/sdks/workflows/README.md
new file mode 100644
index 00000000..e65383ca
--- /dev/null
+++ b/docs/sdks/workflows/README.md
@@ -0,0 +1,423 @@
+# Workflows
+
+## Overview
+
+### Available Operations
+
+* [get_workflows](#get_workflows) - Get Workflows
+* [get_workflow_registrations](#get_workflow_registrations) - Get Workflow Registrations
+* [execute_workflow](#execute_workflow) - Execute Workflow
+* [~~execute_workflow_registration~~](#execute_workflow_registration) - Execute Workflow Registration :warning: **Deprecated**
+* [get_workflow](#get_workflow) - Get Workflow
+* [update_workflow](#update_workflow) - Update Workflow
+* [get_workflow_registration](#get_workflow_registration) - Get Workflow Registration
+* [archive_workflow](#archive_workflow) - Archive Workflow
+* [unarchive_workflow](#unarchive_workflow) - Unarchive Workflow
+
+## get_workflows
+
+Get Workflows
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.get_workflows(active_only=False, include_shared=True, limit=50)
+
+ while res is not None:
+ # Handle items
+
+ res = res.next()
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- |
+| `active_only` | *Optional[bool]* | :heavy_minus_sign: | Whether to only return active workflows |
+| `include_shared` | *Optional[bool]* | :heavy_minus_sign: | Whether to include shared workflows |
+| `available_in_chat_assistant` | *OptionalNullable[bool]* | :heavy_minus_sign: | Whether to only return workflows compatible with chat assistant |
+| `archived` | *OptionalNullable[bool]* | :heavy_minus_sign: | Filter by archived state. False=exclude archived, True=only archived, None=include all |
+| `cursor` | *OptionalNullable[str]* | :heavy_minus_sign: | The cursor for pagination |
+| `limit` | *Optional[int]* | :heavy_minus_sign: | The maximum number of workflows to return |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.GetWorkflowsV1WorkflowsGetResponse](../../models/getworkflowsv1workflowsgetresponse.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## get_workflow_registrations
+
+Get Workflow Registrations
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.get_workflow_registrations(active_only=False, include_shared=True, with_workflow=False, limit=50)
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- |
+| `workflow_id` | *OptionalNullable[str]* | :heavy_minus_sign: | The workflow ID to filter by |
+| `task_queue` | *OptionalNullable[str]* | :heavy_minus_sign: | The task queue to filter by |
+| `active_only` | *Optional[bool]* | :heavy_minus_sign: | Whether to only return active workflows versions |
+| `include_shared` | *Optional[bool]* | :heavy_minus_sign: | Whether to include shared workflow versions |
+| `workflow_search` | *OptionalNullable[str]* | :heavy_minus_sign: | The workflow name to filter by |
+| `archived` | *OptionalNullable[bool]* | :heavy_minus_sign: | Filter by archived state. False=exclude archived, True=only archived, None=include all |
+| `with_workflow` | *Optional[bool]* | :heavy_minus_sign: | Whether to include the workflow definition |
+| `available_in_chat_assistant` | *OptionalNullable[bool]* | :heavy_minus_sign: | Whether to only return workflows compatible with chat assistant |
+| `limit` | *Optional[int]* | :heavy_minus_sign: | The maximum number of workflows versions to return |
+| `cursor` | *OptionalNullable[str]* | :heavy_minus_sign: | The cursor for pagination |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.WorkflowRegistrationListResponse](../../models/workflowregistrationlistresponse.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## execute_workflow
+
+Execute Workflow
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.execute_workflow(workflow_identifier="", wait_for_result=False)
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `workflow_identifier` | *str* | :heavy_check_mark: | N/A |
+| `execution_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Allows you to specify a custom execution ID. If not provided, a random ID will be generated. |
+| `input` | Dict[str, *Any*] | :heavy_minus_sign: | The input to the workflow. This should be a dictionary that matches the workflow's input schema. |
+| `encoded_input` | [OptionalNullable[models.NetworkEncodedInput]](../../models/networkencodedinput.md) | :heavy_minus_sign: | Encoded input to the workflow, used when payload encoding is enabled. |
+| `wait_for_result` | *Optional[bool]* | :heavy_minus_sign: | If true, wait for the workflow to complete and return the result directly. |
+| `timeout_seconds` | *OptionalNullable[float]* | :heavy_minus_sign: | Maximum time to wait for completion when wait_for_result is true. |
+| `custom_tracing_attributes` | Dict[str, *str*] | :heavy_minus_sign: | N/A |
+| `task_queue` | *OptionalNullable[str]* | :heavy_minus_sign: | : warning: ** DEPRECATED **: This will be removed in a future release, please migrate away from it as soon as possible.
Deprecated. Use deployment_name instead. |
+| `deployment_name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name of the deployment to route this execution to |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.ResponseExecuteWorkflowV1WorkflowsWorkflowIdentifierExecutePost](../../models/responseexecuteworkflowv1workflowsworkflowidentifierexecutepost.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## ~~execute_workflow_registration~~
+
+Execute Workflow Registration
+
+> :warning: **DEPRECATED**: This will be removed in a future release, please migrate away from it as soon as possible.
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.execute_workflow_registration(workflow_registration_id="de11d76a-e0fb-44dd-abd9-2e75fc275b94", wait_for_result=False)
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `workflow_registration_id` | *str* | :heavy_check_mark: | N/A |
+| `execution_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Allows you to specify a custom execution ID. If not provided, a random ID will be generated. |
+| `input` | Dict[str, *Any*] | :heavy_minus_sign: | The input to the workflow. This should be a dictionary that matches the workflow's input schema. |
+| `encoded_input` | [OptionalNullable[models.NetworkEncodedInput]](../../models/networkencodedinput.md) | :heavy_minus_sign: | Encoded input to the workflow, used when payload encoding is enabled. |
+| `wait_for_result` | *Optional[bool]* | :heavy_minus_sign: | If true, wait for the workflow to complete and return the result directly. |
+| `timeout_seconds` | *OptionalNullable[float]* | :heavy_minus_sign: | Maximum time to wait for completion when wait_for_result is true. |
+| `custom_tracing_attributes` | Dict[str, *str*] | :heavy_minus_sign: | N/A |
+| `task_queue` | *OptionalNullable[str]* | :heavy_minus_sign: | : warning: ** DEPRECATED **: This will be removed in a future release, please migrate away from it as soon as possible.
Deprecated. Use deployment_name instead. |
+| `deployment_name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name of the deployment to route this execution to |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.ResponseExecuteWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDExecutePost](../../models/responseexecuteworkflowregistrationv1workflowsregistrationsworkflowregistrationidexecutepost.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## get_workflow
+
+Get Workflow
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.get_workflow(workflow_identifier="")
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- |
+| `workflow_identifier` | *str* | :heavy_check_mark: | N/A |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.WorkflowGetResponse](../../models/workflowgetresponse.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## update_workflow
+
+Update Workflow
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.update_workflow(workflow_identifier="")
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- |
+| `workflow_identifier` | *str* | :heavy_check_mark: | N/A |
+| `display_name` | *OptionalNullable[str]* | :heavy_minus_sign: | New display name value |
+| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | New description value |
+| `available_in_chat_assistant` | *OptionalNullable[bool]* | :heavy_minus_sign: | Whether to make the workflow available in the chat assistant |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.WorkflowUpdateResponse](../../models/workflowupdateresponse.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## get_workflow_registration
+
+Get Workflow Registration
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.get_workflow_registration(workflow_registration_id="c4d86c40-960f-4e9a-9d6f-ad8342d7aa83", with_workflow=False, include_shared=True)
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- |
+| `workflow_registration_id` | *str* | :heavy_check_mark: | N/A |
+| `with_workflow` | *Optional[bool]* | :heavy_minus_sign: | Whether to include the workflow definition |
+| `include_shared` | *Optional[bool]* | :heavy_minus_sign: | Whether to include shared workflow versions |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.WorkflowRegistrationGetResponse](../../models/workflowregistrationgetresponse.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## archive_workflow
+
+Archive Workflow
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.archive_workflow(workflow_identifier="")
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- |
+| `workflow_identifier` | *str* | :heavy_check_mark: | N/A |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.WorkflowArchiveResponse](../../models/workflowarchiveresponse.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## unarchive_workflow
+
+Unarchive Workflow
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.unarchive_workflow(workflow_identifier="")
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- |
+| `workflow_identifier` | *str* | :heavy_check_mark: | N/A |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.WorkflowUnarchiveResponse](../../models/workflowunarchiveresponse.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
\ No newline at end of file
diff --git a/docs/sdks/workflowsevents/README.md b/docs/sdks/workflowsevents/README.md
new file mode 100644
index 00000000..bae0cb90
--- /dev/null
+++ b/docs/sdks/workflowsevents/README.md
@@ -0,0 +1,230 @@
+# Workflows.Events
+
+## Overview
+
+### Available Operations
+
+* [receive_workflow_event](#receive_workflow_event) - Receive Workflow Event
+* [receive_workflow_events_batch](#receive_workflow_events_batch) - Receive Workflow Events Batch
+* [get_stream_events](#get_stream_events) - Get Stream Events
+* [get_workflow_events](#get_workflow_events) - Get Workflow Events
+
+## receive_workflow_event
+
+Receive workflow events from workers.
+
+Events are published to NATS for real-time streaming and persisted in the database.
+
+For shared workers, the actual execution owner is resolved from the execution record,
+ensuring events are streamed to the correct user's namespace.
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.events.receive_workflow_event(event={
+ "event_id": "",
+ "root_workflow_exec_id": "",
+ "workflow_exec_id": "",
+ "workflow_run_id": "",
+ "workflow_name": "",
+ "event_type": "CUSTOM_TASK_STARTED",
+ "attributes": {
+ "custom_task_id": "",
+ "custom_task_type": "",
+ },
+ })
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- |
+| `event` | [models.WorkflowEventRequestEvent](../../models/workfloweventrequestevent.md) | :heavy_check_mark: | The workflow event payload. |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.WorkflowEventResponse](../../models/workfloweventresponse.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## receive_workflow_events_batch
+
+Receive multiple workflow events from workers in a single batch.
+
+Events are published to NATS for real-time streaming and persisted in the database.
+This endpoint processes events sequentially to maintain ordering guarantees.
+
+For shared workers, the actual execution owner is resolved from the execution record,
+ensuring events are streamed to the correct user's namespace.
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.events.receive_workflow_events_batch(events=[
+ {
+ "event_id": "",
+ "root_workflow_exec_id": "",
+ "workflow_exec_id": "",
+ "workflow_run_id": "",
+ "workflow_name": "",
+ "event_type": "WORKFLOW_EXECUTION_STARTED",
+ "attributes": {
+ "task_id": "",
+ "workflow_name": "",
+ "input": {
+ "type": "json",
+ "value": "",
+ },
+ },
+ },
+ ])
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- |
+| `events` | List[[models.WorkflowEventBatchRequestEvent](../../models/workfloweventbatchrequestevent.md)] | :heavy_check_mark: | List of workflow events to send. |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.WorkflowEventBatchResponse](../../models/workfloweventbatchresponse.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## get_stream_events
+
+Get Stream Events
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.events.get_stream_events(scope="*", activity_name="*", activity_id="*", workflow_name="*", workflow_exec_id="*", root_workflow_exec_id="*", parent_workflow_exec_id="*", stream="*", start_seq=0)
+
+ with res as event_stream:
+ for event in event_stream:
+ # handle event
+ print(event, flush=True)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- |
+| `scope` | [Optional[models.Scope]](../../models/scope.md) | :heavy_minus_sign: | N/A |
+| `activity_name` | *Optional[str]* | :heavy_minus_sign: | N/A |
+| `activity_id` | *Optional[str]* | :heavy_minus_sign: | N/A |
+| `workflow_name` | *Optional[str]* | :heavy_minus_sign: | N/A |
+| `workflow_exec_id` | *Optional[str]* | :heavy_minus_sign: | N/A |
+| `root_workflow_exec_id` | *Optional[str]* | :heavy_minus_sign: | N/A |
+| `parent_workflow_exec_id` | *Optional[str]* | :heavy_minus_sign: | N/A |
+| `stream` | *Optional[str]* | :heavy_minus_sign: | N/A |
+| `start_seq` | *Optional[int]* | :heavy_minus_sign: | N/A |
+| `metadata_filters` | Dict[str, *Any*] | :heavy_minus_sign: | N/A |
+| `workflow_event_types` | List[[models.WorkflowEventType](../../models/workfloweventtype.md)] | :heavy_minus_sign: | N/A |
+| `last_event_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[Union[eventstreaming.EventStream[models.GetStreamEventsV1WorkflowsEventsStreamGetResponseBody], eventstreaming.EventStreamAsync[models.GetStreamEventsV1WorkflowsEventsStreamGetResponseBody]]](../../models/.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
+
+## get_workflow_events
+
+Get Workflow Events
+
+### Example Usage
+
+
+```python
+from mistralai.client import Mistral
+import os
+
+
+with Mistral(
+ api_key=os.getenv("MISTRAL_API_KEY", ""),
+) as mistral:
+
+ res = mistral.workflows.events.get_workflow_events(limit=100)
+
+ # Handle response
+ print(res)
+
+```
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- |
+| `root_workflow_exec_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Execution ID of the root workflow that initiated this execution chain. |
+| `workflow_exec_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Execution ID of the workflow that emitted this event. |
+| `workflow_run_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Run ID of the workflow that emitted this event. |
+| `limit` | *Optional[int]* | :heavy_minus_sign: | Maximum number of events to return. |
+| `cursor` | *OptionalNullable[str]* | :heavy_minus_sign: | Cursor for pagination. |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. |
+
+### Response
+
+**[models.ListWorkflowEventResponse](../../models/listworkfloweventresponse.md)**
+
+### Errors
+
+| Error Type | Status Code | Content Type |
+| -------------------------- | -------------------------- | -------------------------- |
+| errors.HTTPValidationError | 422 | application/json |
+| errors.SDKError | 4XX, 5XX | \*/\* |
\ No newline at end of file
diff --git a/examples/mistral/workflows/async_workflow_execute.py b/examples/mistral/workflows/async_workflow_execute.py
new file mode 100644
index 00000000..737253ab
--- /dev/null
+++ b/examples/mistral/workflows/async_workflow_execute.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+import asyncio
+import os
+
+from mistralai.client import Mistral
+
+WORKFLOW_NAME = "example-hello-world-workflow"
+
+
+async def main():
+ api_key = os.environ["MISTRAL_API_KEY"]
+
+ client = Mistral(api_key=api_key)
+
+ # Execute workflow and wait for result using wait_for_result parameter
+ response = await client.workflows.execute_workflow_async(
+ workflow_identifier=WORKFLOW_NAME,
+ input={"document_title": "hello world"},
+ wait_for_result=True,
+ timeout_seconds=60.0,
+ )
+
+ print(f"Workflow: {response.workflow_name}")
+ print(f"Execution ID: {response.execution_id}")
+ print(f"Result: {response.result}")
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/examples/mistral/workflows/async_workflow_execute_and_wait.py b/examples/mistral/workflows/async_workflow_execute_and_wait.py
new file mode 100644
index 00000000..964bc75a
--- /dev/null
+++ b/examples/mistral/workflows/async_workflow_execute_and_wait.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+
+import asyncio
+import os
+
+from mistralai.client import Mistral
+
+WORKFLOW_NAME = "example-hello-world-workflow"
+
+
+async def main():
+ api_key = os.environ["MISTRAL_API_KEY"]
+
+ client = Mistral(api_key=api_key)
+
+ # Example 1: Using API sync mode (server-side waiting)
+ result = await client.workflows.execute_workflow_and_wait_async(
+ workflow_identifier=WORKFLOW_NAME,
+ input={"document_title": "hello world"},
+ use_api_sync=True,
+ timeout_seconds=60.0,
+ )
+ print(f"Result (API sync): {result}")
+
+ # Example 2: Using polling mode (client-side waiting)
+ result = await client.workflows.execute_workflow_and_wait_async(
+ workflow_identifier=WORKFLOW_NAME,
+ input={"document_title": "hello world"},
+ use_api_sync=False,
+ polling_interval=5,
+ max_attempts=12, # 12 attempts * 5 seconds = 60 seconds max
+ )
+ print(f"Result (polling): {result}")
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/examples/mistral/workflows/workflow_execute.py b/examples/mistral/workflows/workflow_execute.py
new file mode 100644
index 00000000..650f9352
--- /dev/null
+++ b/examples/mistral/workflows/workflow_execute.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+
+import os
+
+from mistralai.client import Mistral
+
+WORKFLOW_NAME = "example-hello-world-workflow"
+
+
+def main():
+ api_key = os.environ["MISTRAL_API_KEY"]
+
+ client = Mistral(api_key=api_key)
+
+ # Execute workflow and wait for result using wait_for_result parameter
+ response = client.workflows.execute_workflow(
+ workflow_identifier=WORKFLOW_NAME,
+ input={"document_title": "hello world"},
+ wait_for_result=True,
+ timeout_seconds=60.0,
+ )
+
+ print(f"Workflow: {response.workflow_name}")
+ print(f"Execution ID: {response.execution_id}")
+ print(f"Result: {response.result}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/mistral/workflows/workflow_execute_and_wait.py b/examples/mistral/workflows/workflow_execute_and_wait.py
new file mode 100644
index 00000000..6a50aa88
--- /dev/null
+++ b/examples/mistral/workflows/workflow_execute_and_wait.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+
+import os
+
+from mistralai.client import Mistral
+
+WORKFLOW_NAME = "example-hello-world-workflow"
+
+
+def main():
+ api_key = os.environ["MISTRAL_API_KEY"]
+
+ client = Mistral(api_key=api_key)
+
+ # Example 1: Using API sync mode (server-side waiting)
+ result = client.workflows.execute_workflow_and_wait(
+ workflow_identifier=WORKFLOW_NAME,
+ input={"document_title": "hello world"},
+ use_api_sync=True,
+ timeout_seconds=60.0,
+ )
+ print(f"Result (API sync): {result}")
+
+ # Example 2: Using polling mode (client-side waiting)
+ result = client.workflows.execute_workflow_and_wait(
+ workflow_identifier=WORKFLOW_NAME,
+ input={"document_title": "hello world"},
+ use_api_sync=False,
+ polling_interval=5,
+ max_attempts=12, # 12 attempts * 5 seconds = 60 seconds max
+ )
+ print(f"Result (polling): {result}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/pyproject.toml b/pyproject.toml
index 7be2c9e6..c15a2e80 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "mistralai"
-version = "2.1.3"
+version = "2.2.0rc1"
description = "Python Client SDK for the Mistral AI API."
authors = [{ name = "Mistral" }]
requires-python = ">=3.10"
diff --git a/scripts/run_examples.sh b/scripts/run_examples.sh
index eca854b4..83b866fb 100755
--- a/scripts/run_examples.sh
+++ b/scripts/run_examples.sh
@@ -49,6 +49,10 @@ exclude_files=(
"examples/mistral/audio/async_realtime_transcription_microphone.py"
"examples/mistral/audio/async_realtime_transcription_stream.py"
"examples/mistral/audio/async_realtime_transcription_dual_delay_microphone.py"
+ "examples/mistral/workflows/workflow_execute_and_wait.py"
+ "examples/mistral/workflows/async_workflow_execute_and_wait.py"
+ "examples/mistral/workflows/workflow_execute.py"
+ "examples/mistral/workflows/async_workflow_execute.py"
)
# Files that require extra dependencies (agents, mcp, audio, etc.)
diff --git a/src/mistralai/client/_version.py b/src/mistralai/client/_version.py
index 68da4e56..5cebd130 100644
--- a/src/mistralai/client/_version.py
+++ b/src/mistralai/client/_version.py
@@ -4,10 +4,10 @@
import importlib.metadata
__title__: str = "mistralai"
-__version__: str = "2.1.3"
+__version__: str = "2.2.0rc1"
__openapi_doc_version__: str = "1.0.0"
__gen_version__: str = "2.862.0"
-__user_agent__: str = "speakeasy-sdk/python 2.1.3 2.862.0 1.0.0 mistralai"
+__user_agent__: str = "speakeasy-sdk/python 2.2.0rc1 2.862.0 1.0.0 mistralai"
try:
if __package__ is not None:
diff --git a/src/mistralai/client/batch_jobs.py b/src/mistralai/client/batch_jobs.py
index 0e135b30..dc378cde 100644
--- a/src/mistralai/client/batch_jobs.py
+++ b/src/mistralai/client/batch_jobs.py
@@ -626,6 +626,174 @@ async def get_async(
raise errors.SDKError("Unexpected response received", http_res)
+ def delete(
+ self,
+ *,
+ job_id: str,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.DeleteBatchJobResponse:
+ r"""Delete Batch Job
+
+ Request the deletion of a batch job.
+
+ :param job_id:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.JobsAPIRoutesBatchDeleteBatchJobRequest(
+ job_id=job_id,
+ )
+
+ req = self._build_request(
+ method="DELETE",
+ path="/v1/batch/jobs/{job_id}",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="jobs_api_routes_batch_delete_batch_job",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.DeleteBatchJobResponse, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def delete_async(
+ self,
+ *,
+ job_id: str,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.DeleteBatchJobResponse:
+ r"""Delete Batch Job
+
+ Request the deletion of a batch job.
+
+ :param job_id:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.JobsAPIRoutesBatchDeleteBatchJobRequest(
+ job_id=job_id,
+ )
+
+ req = self._build_request_async(
+ method="DELETE",
+ path="/v1/batch/jobs/{job_id}",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="jobs_api_routes_batch_delete_batch_job",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.DeleteBatchJobResponse, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
def cancel(
self,
*,
diff --git a/src/mistralai/client/connectors.py b/src/mistralai/client/connectors.py
index e0c4793e..238f9277 100644
--- a/src/mistralai/client/connectors.py
+++ b/src/mistralai/client/connectors.py
@@ -640,6 +640,7 @@ def call_tool(
*,
tool_name: str,
connector_id_or_name: str,
+ credentials_name: OptionalNullable[str] = UNSET,
arguments: Optional[Dict[str, Any]] = None,
retries: OptionalNullable[utils.RetryConfig] = UNSET,
server_url: Optional[str] = None,
@@ -652,6 +653,7 @@ def call_tool(
:param tool_name:
:param connector_id_or_name:
+ :param credentials_name:
:param arguments:
:param retries: Override the default retry configuration for this method
:param server_url: Override the default server URL for this method
@@ -670,6 +672,7 @@ def call_tool(
request = models.ConnectorCallToolV1Request(
tool_name=tool_name,
+ credentials_name=credentials_name,
connector_id_or_name=connector_id_or_name,
connector_call_tool_request=models.ConnectorCallToolRequest(
arguments=arguments,
@@ -745,6 +748,7 @@ async def call_tool_async(
*,
tool_name: str,
connector_id_or_name: str,
+ credentials_name: OptionalNullable[str] = UNSET,
arguments: Optional[Dict[str, Any]] = None,
retries: OptionalNullable[utils.RetryConfig] = UNSET,
server_url: Optional[str] = None,
@@ -757,6 +761,7 @@ async def call_tool_async(
:param tool_name:
:param connector_id_or_name:
+ :param credentials_name:
:param arguments:
:param retries: Override the default retry configuration for this method
:param server_url: Override the default server URL for this method
@@ -775,6 +780,7 @@ async def call_tool_async(
request = models.ConnectorCallToolV1Request(
tool_name=tool_name,
+ credentials_name=credentials_name,
connector_id_or_name=connector_id_or_name,
connector_call_tool_request=models.ConnectorCallToolRequest(
arguments=arguments,
@@ -857,7 +863,7 @@ def list_tools(
server_url: Optional[str] = None,
timeout_ms: Optional[int] = None,
http_headers: Optional[Mapping[str, str]] = None,
- ) -> models.ResponseConnectorListToolsV12:
+ ) -> models.ResponseConnectorListToolsV1:
r"""List tools for a connector.
List all tools available for an MCP connector.
@@ -933,7 +939,7 @@ def list_tools(
response_data: Any = None
if utils.match_response(http_res, "200", "application/json"):
return unmarshal_json_response(
- models.ResponseConnectorListToolsV12, http_res
+ models.ResponseConnectorListToolsV1, http_res
)
if utils.match_response(http_res, "422", "application/json"):
response_data = unmarshal_json_response(
@@ -961,7 +967,7 @@ async def list_tools_async(
server_url: Optional[str] = None,
timeout_ms: Optional[int] = None,
http_headers: Optional[Mapping[str, str]] = None,
- ) -> models.ResponseConnectorListToolsV12:
+ ) -> models.ResponseConnectorListToolsV1:
r"""List tools for a connector.
List all tools available for an MCP connector.
@@ -1037,7 +1043,7 @@ async def list_tools_async(
response_data: Any = None
if utils.match_response(http_res, "200", "application/json"):
return unmarshal_json_response(
- models.ResponseConnectorListToolsV12, http_res
+ models.ResponseConnectorListToolsV1, http_res
)
if utils.match_response(http_res, "422", "application/json"):
response_data = unmarshal_json_response(
diff --git a/src/mistralai/client/deployments.py b/src/mistralai/client/deployments.py
new file mode 100644
index 00000000..a428c61c
--- /dev/null
+++ b/src/mistralai/client/deployments.py
@@ -0,0 +1,370 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: a874b267fcb9
+
+from .basesdk import BaseSDK
+from mistralai.client import errors, models, utils
+from mistralai.client._hooks import HookContext
+from mistralai.client.types import OptionalNullable, UNSET
+from mistralai.client.utils import get_security_from_env
+from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response
+from typing import Any, Mapping, Optional
+
+
+class Deployments(BaseSDK):
+ def list_deployments(
+ self,
+ *,
+ active_only: Optional[bool] = True,
+ workflow_name: OptionalNullable[str] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.DeploymentListResponse:
+ r"""List Deployments
+
+ :param active_only:
+ :param workflow_name:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.ListDeploymentsV1WorkflowsDeploymentsGetRequest(
+ active_only=active_only,
+ workflow_name=workflow_name,
+ )
+
+ req = self._build_request(
+ method="GET",
+ path="/v1/workflows/deployments",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="list_deployments_v1_workflows_deployments_get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.DeploymentListResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def list_deployments_async(
+ self,
+ *,
+ active_only: Optional[bool] = True,
+ workflow_name: OptionalNullable[str] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.DeploymentListResponse:
+ r"""List Deployments
+
+ :param active_only:
+ :param workflow_name:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.ListDeploymentsV1WorkflowsDeploymentsGetRequest(
+ active_only=active_only,
+ workflow_name=workflow_name,
+ )
+
+ req = self._build_request_async(
+ method="GET",
+ path="/v1/workflows/deployments",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="list_deployments_v1_workflows_deployments_get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.DeploymentListResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ def get_deployment(
+ self,
+ *,
+ name: str,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.DeploymentDetailResponse:
+ r"""Get Deployment
+
+ :param name:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetDeploymentV1WorkflowsDeploymentsNameGetRequest(
+ name=name,
+ )
+
+ req = self._build_request(
+ method="GET",
+ path="/v1/workflows/deployments/{name}",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_deployment_v1_workflows_deployments__name__get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.DeploymentDetailResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def get_deployment_async(
+ self,
+ *,
+ name: str,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.DeploymentDetailResponse:
+ r"""Get Deployment
+
+ :param name:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetDeploymentV1WorkflowsDeploymentsNameGetRequest(
+ name=name,
+ )
+
+ req = self._build_request_async(
+ method="GET",
+ path="/v1/workflows/deployments/{name}",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_deployment_v1_workflows_deployments__name__get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.DeploymentDetailResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
diff --git a/src/mistralai/client/events.py b/src/mistralai/client/events.py
new file mode 100644
index 00000000..d44c0692
--- /dev/null
+++ b/src/mistralai/client/events.py
@@ -0,0 +1,886 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: ac9b961cc70d
+
+from .basesdk import BaseSDK
+from mistralai.client import errors, models, utils
+from mistralai.client._hooks import HookContext
+from mistralai.client.types import OptionalNullable, UNSET
+from mistralai.client.utils import eventstreaming, get_security_from_env
+from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response
+from typing import Any, Dict, List, Mapping, Optional, Union
+
+
+class Events(BaseSDK):
+ def receive_workflow_event(
+ self,
+ *,
+ event: Union[
+ models.WorkflowEventRequestEvent, models.WorkflowEventRequestEventTypedDict
+ ],
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowEventResponse:
+ r"""Receive Workflow Event
+
+ Receive workflow events from workers.
+
+ Events are published to NATS for real-time streaming and persisted in the database.
+
+ For shared workers, the actual execution owner is resolved from the execution record,
+ ensuring events are streamed to the correct user's namespace.
+
+ :param event: The workflow event payload.
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.WorkflowEventRequest(
+ event=utils.get_pydantic_model(event, models.WorkflowEventRequestEvent),
+ )
+
+ req = self._build_request(
+ method="POST",
+ path="/v1/workflows/events",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=True,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ get_serialized_body=lambda: utils.serialize_request_body(
+ request, False, False, "json", models.WorkflowEventRequest
+ ),
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="receive_workflow_event_v1_workflows_events_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.WorkflowEventResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def receive_workflow_event_async(
+ self,
+ *,
+ event: Union[
+ models.WorkflowEventRequestEvent, models.WorkflowEventRequestEventTypedDict
+ ],
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowEventResponse:
+ r"""Receive Workflow Event
+
+ Receive workflow events from workers.
+
+ Events are published to NATS for real-time streaming and persisted in the database.
+
+ For shared workers, the actual execution owner is resolved from the execution record,
+ ensuring events are streamed to the correct user's namespace.
+
+ :param event: The workflow event payload.
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.WorkflowEventRequest(
+ event=utils.get_pydantic_model(event, models.WorkflowEventRequestEvent),
+ )
+
+ req = self._build_request_async(
+ method="POST",
+ path="/v1/workflows/events",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=True,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ get_serialized_body=lambda: utils.serialize_request_body(
+ request, False, False, "json", models.WorkflowEventRequest
+ ),
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="receive_workflow_event_v1_workflows_events_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.WorkflowEventResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ def receive_workflow_events_batch(
+ self,
+ *,
+ events: Union[
+ List[models.WorkflowEventBatchRequestEvent],
+ List[models.WorkflowEventBatchRequestEventTypedDict],
+ ],
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowEventBatchResponse:
+ r"""Receive Workflow Events Batch
+
+ Receive multiple workflow events from workers in a single batch.
+
+ Events are published to NATS for real-time streaming and persisted in the database.
+ This endpoint processes events sequentially to maintain ordering guarantees.
+
+ For shared workers, the actual execution owner is resolved from the execution record,
+ ensuring events are streamed to the correct user's namespace.
+
+ :param events: List of workflow events to send.
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.WorkflowEventBatchRequest(
+ events=utils.get_pydantic_model(
+ events, List[models.WorkflowEventBatchRequestEvent]
+ ),
+ )
+
+ req = self._build_request(
+ method="POST",
+ path="/v1/workflows/events/batch",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=True,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ get_serialized_body=lambda: utils.serialize_request_body(
+ request, False, False, "json", models.WorkflowEventBatchRequest
+ ),
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="receive_workflow_events_batch_v1_workflows_events_batch_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.WorkflowEventBatchResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def receive_workflow_events_batch_async(
+ self,
+ *,
+ events: Union[
+ List[models.WorkflowEventBatchRequestEvent],
+ List[models.WorkflowEventBatchRequestEventTypedDict],
+ ],
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowEventBatchResponse:
+ r"""Receive Workflow Events Batch
+
+ Receive multiple workflow events from workers in a single batch.
+
+ Events are published to NATS for real-time streaming and persisted in the database.
+ This endpoint processes events sequentially to maintain ordering guarantees.
+
+ For shared workers, the actual execution owner is resolved from the execution record,
+ ensuring events are streamed to the correct user's namespace.
+
+ :param events: List of workflow events to send.
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.WorkflowEventBatchRequest(
+ events=utils.get_pydantic_model(
+ events, List[models.WorkflowEventBatchRequestEvent]
+ ),
+ )
+
+ req = self._build_request_async(
+ method="POST",
+ path="/v1/workflows/events/batch",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=True,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ get_serialized_body=lambda: utils.serialize_request_body(
+ request, False, False, "json", models.WorkflowEventBatchRequest
+ ),
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="receive_workflow_events_batch_v1_workflows_events_batch_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.WorkflowEventBatchResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ def get_stream_events(
+ self,
+ *,
+ scope: Optional[models.Scope] = "*",
+ activity_name: Optional[str] = "*",
+ activity_id: Optional[str] = "*",
+ workflow_name: Optional[str] = "*",
+ workflow_exec_id: Optional[str] = "*",
+ root_workflow_exec_id: Optional[str] = "*",
+ parent_workflow_exec_id: Optional[str] = "*",
+ stream: Optional[str] = "*",
+ start_seq: Optional[int] = 0,
+ metadata_filters: OptionalNullable[Dict[str, Any]] = UNSET,
+ workflow_event_types: OptionalNullable[List[models.WorkflowEventType]] = UNSET,
+ last_event_id: OptionalNullable[str] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> eventstreaming.EventStream[
+ models.GetStreamEventsV1WorkflowsEventsStreamGetResponseBody
+ ]:
+ r"""Get Stream Events
+
+ :param scope:
+ :param activity_name:
+ :param activity_id:
+ :param workflow_name:
+ :param workflow_exec_id:
+ :param root_workflow_exec_id:
+ :param parent_workflow_exec_id:
+ :param stream:
+ :param start_seq:
+ :param metadata_filters:
+ :param workflow_event_types:
+ :param last_event_id:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetStreamEventsV1WorkflowsEventsStreamGetRequest(
+ scope=scope,
+ activity_name=activity_name,
+ activity_id=activity_id,
+ workflow_name=workflow_name,
+ workflow_exec_id=workflow_exec_id,
+ root_workflow_exec_id=root_workflow_exec_id,
+ parent_workflow_exec_id=parent_workflow_exec_id,
+ stream=stream,
+ start_seq=start_seq,
+ metadata_filters=metadata_filters,
+ workflow_event_types=workflow_event_types,
+ last_event_id=last_event_id,
+ )
+
+ req = self._build_request(
+ method="GET",
+ path="/v1/workflows/events/stream",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="text/event-stream",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_stream_events_v1_workflows_events_stream_get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ stream=True,
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "text/event-stream"):
+ return eventstreaming.EventStream(
+ http_res,
+ lambda raw: utils.unmarshal_json(
+ raw, models.GetStreamEventsV1WorkflowsEventsStreamGetResponseBody
+ ),
+ client_ref=self,
+ data_required=False,
+ )
+ if utils.match_response(http_res, "422", "application/json"):
+ http_res_text = utils.stream_to_text(http_res)
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res, http_res_text
+ )
+ raise errors.HTTPValidationError(response_data, http_res, http_res_text)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("Unexpected response received", http_res, http_res_text)
+
+ async def get_stream_events_async(
+ self,
+ *,
+ scope: Optional[models.Scope] = "*",
+ activity_name: Optional[str] = "*",
+ activity_id: Optional[str] = "*",
+ workflow_name: Optional[str] = "*",
+ workflow_exec_id: Optional[str] = "*",
+ root_workflow_exec_id: Optional[str] = "*",
+ parent_workflow_exec_id: Optional[str] = "*",
+ stream: Optional[str] = "*",
+ start_seq: Optional[int] = 0,
+ metadata_filters: OptionalNullable[Dict[str, Any]] = UNSET,
+ workflow_event_types: OptionalNullable[List[models.WorkflowEventType]] = UNSET,
+ last_event_id: OptionalNullable[str] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> eventstreaming.EventStreamAsync[
+ models.GetStreamEventsV1WorkflowsEventsStreamGetResponseBody
+ ]:
+ r"""Get Stream Events
+
+ :param scope:
+ :param activity_name:
+ :param activity_id:
+ :param workflow_name:
+ :param workflow_exec_id:
+ :param root_workflow_exec_id:
+ :param parent_workflow_exec_id:
+ :param stream:
+ :param start_seq:
+ :param metadata_filters:
+ :param workflow_event_types:
+ :param last_event_id:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetStreamEventsV1WorkflowsEventsStreamGetRequest(
+ scope=scope,
+ activity_name=activity_name,
+ activity_id=activity_id,
+ workflow_name=workflow_name,
+ workflow_exec_id=workflow_exec_id,
+ root_workflow_exec_id=root_workflow_exec_id,
+ parent_workflow_exec_id=parent_workflow_exec_id,
+ stream=stream,
+ start_seq=start_seq,
+ metadata_filters=metadata_filters,
+ workflow_event_types=workflow_event_types,
+ last_event_id=last_event_id,
+ )
+
+ req = self._build_request_async(
+ method="GET",
+ path="/v1/workflows/events/stream",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="text/event-stream",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_stream_events_v1_workflows_events_stream_get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ stream=True,
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "text/event-stream"):
+ return eventstreaming.EventStreamAsync(
+ http_res,
+ lambda raw: utils.unmarshal_json(
+ raw, models.GetStreamEventsV1WorkflowsEventsStreamGetResponseBody
+ ),
+ client_ref=self,
+ data_required=False,
+ )
+ if utils.match_response(http_res, "422", "application/json"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res, http_res_text
+ )
+ raise errors.HTTPValidationError(response_data, http_res, http_res_text)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("Unexpected response received", http_res, http_res_text)
+
+ def get_workflow_events(
+ self,
+ *,
+ root_workflow_exec_id: OptionalNullable[str] = UNSET,
+ workflow_exec_id: OptionalNullable[str] = UNSET,
+ workflow_run_id: OptionalNullable[str] = UNSET,
+ limit: Optional[int] = 100,
+ cursor: OptionalNullable[str] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.ListWorkflowEventResponse:
+ r"""Get Workflow Events
+
+ :param root_workflow_exec_id: Execution ID of the root workflow that initiated this execution chain.
+ :param workflow_exec_id: Execution ID of the workflow that emitted this event.
+ :param workflow_run_id: Run ID of the workflow that emitted this event.
+ :param limit: Maximum number of events to return.
+ :param cursor: Cursor for pagination.
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetWorkflowEventsV1WorkflowsEventsListGetRequest(
+ root_workflow_exec_id=root_workflow_exec_id,
+ workflow_exec_id=workflow_exec_id,
+ workflow_run_id=workflow_run_id,
+ limit=limit,
+ cursor=cursor,
+ )
+
+ req = self._build_request(
+ method="GET",
+ path="/v1/workflows/events/list",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_workflow_events_v1_workflows_events_list_get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.ListWorkflowEventResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def get_workflow_events_async(
+ self,
+ *,
+ root_workflow_exec_id: OptionalNullable[str] = UNSET,
+ workflow_exec_id: OptionalNullable[str] = UNSET,
+ workflow_run_id: OptionalNullable[str] = UNSET,
+ limit: Optional[int] = 100,
+ cursor: OptionalNullable[str] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.ListWorkflowEventResponse:
+ r"""Get Workflow Events
+
+ :param root_workflow_exec_id: Execution ID of the root workflow that initiated this execution chain.
+ :param workflow_exec_id: Execution ID of the workflow that emitted this event.
+ :param workflow_run_id: Run ID of the workflow that emitted this event.
+ :param limit: Maximum number of events to return.
+ :param cursor: Cursor for pagination.
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetWorkflowEventsV1WorkflowsEventsListGetRequest(
+ root_workflow_exec_id=root_workflow_exec_id,
+ workflow_exec_id=workflow_exec_id,
+ workflow_run_id=workflow_run_id,
+ limit=limit,
+ cursor=cursor,
+ )
+
+ req = self._build_request_async(
+ method="GET",
+ path="/v1/workflows/events/list",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_workflow_events_v1_workflows_events_list_get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.ListWorkflowEventResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
diff --git a/src/mistralai/client/executions.py b/src/mistralai/client/executions.py
new file mode 100644
index 00000000..7112dc1a
--- /dev/null
+++ b/src/mistralai/client/executions.py
@@ -0,0 +1,2724 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 974004d347a2
+
+from .basesdk import BaseSDK
+from mistralai.client import errors, models, utils
+from mistralai.client._hooks import HookContext
+from mistralai.client.types import OptionalNullable, UNSET
+from mistralai.client.utils import eventstreaming, get_security_from_env
+from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response
+from typing import Any, List, Mapping, Optional, Union
+
+
+class Executions(BaseSDK):
+ def get_workflow_execution(
+ self,
+ *,
+ execution_id: str,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowExecutionResponse:
+ r"""Get Workflow Execution
+
+ :param execution_id:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetWorkflowExecutionV1WorkflowsExecutionsExecutionIDGetRequest(
+ execution_id=execution_id,
+ )
+
+ req = self._build_request(
+ method="GET",
+ path="/v1/workflows/executions/{execution_id}",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_workflow_execution_v1_workflows_executions__execution_id__get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.WorkflowExecutionResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def get_workflow_execution_async(
+ self,
+ *,
+ execution_id: str,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowExecutionResponse:
+ r"""Get Workflow Execution
+
+ :param execution_id:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetWorkflowExecutionV1WorkflowsExecutionsExecutionIDGetRequest(
+ execution_id=execution_id,
+ )
+
+ req = self._build_request_async(
+ method="GET",
+ path="/v1/workflows/executions/{execution_id}",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_workflow_execution_v1_workflows_executions__execution_id__get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.WorkflowExecutionResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ def get_workflow_execution_history(
+ self,
+ *,
+ execution_id: str,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> Any:
+ r"""Get Workflow Execution History
+
+ :param execution_id:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetWorkflowExecutionHistoryV1WorkflowsExecutionsExecutionIDHistoryGetRequest(
+ execution_id=execution_id,
+ )
+
+ req = self._build_request(
+ method="GET",
+ path="/v1/workflows/executions/{execution_id}/history",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_workflow_execution_history_v1_workflows_executions__execution_id__history_get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(Any, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def get_workflow_execution_history_async(
+ self,
+ *,
+ execution_id: str,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> Any:
+ r"""Get Workflow Execution History
+
+ :param execution_id:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetWorkflowExecutionHistoryV1WorkflowsExecutionsExecutionIDHistoryGetRequest(
+ execution_id=execution_id,
+ )
+
+ req = self._build_request_async(
+ method="GET",
+ path="/v1/workflows/executions/{execution_id}/history",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_workflow_execution_history_v1_workflows_executions__execution_id__history_get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(Any, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ def signal_workflow_execution(
+ self,
+ *,
+ execution_id: str,
+ name: str,
+ input: OptionalNullable[
+ Union[
+ models.SignalInvocationBodyInput,
+ models.SignalInvocationBodyInputTypedDict,
+ ]
+ ] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.SignalWorkflowResponse:
+ r"""Signal Workflow Execution
+
+ :param execution_id:
+ :param name: The name of the signal to send
+ :param input: Input data for the signal, matching its schema
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.SignalWorkflowExecutionV1WorkflowsExecutionsExecutionIDSignalsPostRequest(
+ execution_id=execution_id,
+ signal_invocation_body=models.SignalInvocationBody(
+ name=name,
+ input=utils.get_pydantic_model(
+ input, OptionalNullable[models.SignalInvocationBodyInput]
+ ),
+ ),
+ )
+
+ req = self._build_request(
+ method="POST",
+ path="/v1/workflows/executions/{execution_id}/signals",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=True,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ get_serialized_body=lambda: utils.serialize_request_body(
+ request.signal_invocation_body,
+ False,
+ False,
+ "json",
+ models.SignalInvocationBody,
+ ),
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="signal_workflow_execution_v1_workflows_executions__execution_id__signals_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "202", "application/json"):
+ return unmarshal_json_response(models.SignalWorkflowResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def signal_workflow_execution_async(
+ self,
+ *,
+ execution_id: str,
+ name: str,
+ input: OptionalNullable[
+ Union[
+ models.SignalInvocationBodyInput,
+ models.SignalInvocationBodyInputTypedDict,
+ ]
+ ] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.SignalWorkflowResponse:
+ r"""Signal Workflow Execution
+
+ :param execution_id:
+ :param name: The name of the signal to send
+ :param input: Input data for the signal, matching its schema
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.SignalWorkflowExecutionV1WorkflowsExecutionsExecutionIDSignalsPostRequest(
+ execution_id=execution_id,
+ signal_invocation_body=models.SignalInvocationBody(
+ name=name,
+ input=utils.get_pydantic_model(
+ input, OptionalNullable[models.SignalInvocationBodyInput]
+ ),
+ ),
+ )
+
+ req = self._build_request_async(
+ method="POST",
+ path="/v1/workflows/executions/{execution_id}/signals",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=True,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ get_serialized_body=lambda: utils.serialize_request_body(
+ request.signal_invocation_body,
+ False,
+ False,
+ "json",
+ models.SignalInvocationBody,
+ ),
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="signal_workflow_execution_v1_workflows_executions__execution_id__signals_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "202", "application/json"):
+ return unmarshal_json_response(models.SignalWorkflowResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ def query_workflow_execution(
+ self,
+ *,
+ execution_id: str,
+ name: str,
+ input: OptionalNullable[
+ Union[
+ models.QueryInvocationBodyInput,
+ models.QueryInvocationBodyInputTypedDict,
+ ]
+ ] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.QueryWorkflowResponse:
+ r"""Query Workflow Execution
+
+ :param execution_id:
+ :param name: The name of the query to request
+ :param input: Input data for the query, matching its schema
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.QueryWorkflowExecutionV1WorkflowsExecutionsExecutionIDQueriesPostRequest(
+ execution_id=execution_id,
+ query_invocation_body=models.QueryInvocationBody(
+ name=name,
+ input=utils.get_pydantic_model(
+ input, OptionalNullable[models.QueryInvocationBodyInput]
+ ),
+ ),
+ )
+
+ req = self._build_request(
+ method="POST",
+ path="/v1/workflows/executions/{execution_id}/queries",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=True,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ get_serialized_body=lambda: utils.serialize_request_body(
+ request.query_invocation_body,
+ False,
+ False,
+ "json",
+ models.QueryInvocationBody,
+ ),
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="query_workflow_execution_v1_workflows_executions__execution_id__queries_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.QueryWorkflowResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def query_workflow_execution_async(
+ self,
+ *,
+ execution_id: str,
+ name: str,
+ input: OptionalNullable[
+ Union[
+ models.QueryInvocationBodyInput,
+ models.QueryInvocationBodyInputTypedDict,
+ ]
+ ] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.QueryWorkflowResponse:
+ r"""Query Workflow Execution
+
+ :param execution_id:
+ :param name: The name of the query to request
+ :param input: Input data for the query, matching its schema
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.QueryWorkflowExecutionV1WorkflowsExecutionsExecutionIDQueriesPostRequest(
+ execution_id=execution_id,
+ query_invocation_body=models.QueryInvocationBody(
+ name=name,
+ input=utils.get_pydantic_model(
+ input, OptionalNullable[models.QueryInvocationBodyInput]
+ ),
+ ),
+ )
+
+ req = self._build_request_async(
+ method="POST",
+ path="/v1/workflows/executions/{execution_id}/queries",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=True,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ get_serialized_body=lambda: utils.serialize_request_body(
+ request.query_invocation_body,
+ False,
+ False,
+ "json",
+ models.QueryInvocationBody,
+ ),
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="query_workflow_execution_v1_workflows_executions__execution_id__queries_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.QueryWorkflowResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ def terminate_workflow_execution(
+ self,
+ *,
+ execution_id: str,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ):
+ r"""Terminate Workflow Execution
+
+ :param execution_id:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.TerminateWorkflowExecutionV1WorkflowsExecutionsExecutionIDTerminatePostRequest(
+ execution_id=execution_id,
+ )
+
+ req = self._build_request(
+ method="POST",
+ path="/v1/workflows/executions/{execution_id}/terminate",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="terminate_workflow_execution_v1_workflows_executions__execution_id__terminate_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "204", "*"):
+ return
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def terminate_workflow_execution_async(
+ self,
+ *,
+ execution_id: str,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ):
+ r"""Terminate Workflow Execution
+
+ :param execution_id:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.TerminateWorkflowExecutionV1WorkflowsExecutionsExecutionIDTerminatePostRequest(
+ execution_id=execution_id,
+ )
+
+ req = self._build_request_async(
+ method="POST",
+ path="/v1/workflows/executions/{execution_id}/terminate",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="terminate_workflow_execution_v1_workflows_executions__execution_id__terminate_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "204", "*"):
+ return
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ def batch_terminate_workflow_executions(
+ self,
+ *,
+ execution_ids: List[str],
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.BatchExecutionResponse:
+ r"""Batch Terminate Workflow Executions
+
+ :param execution_ids: List of execution IDs to process
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.BatchExecutionBody(
+ execution_ids=execution_ids,
+ )
+
+ req = self._build_request(
+ method="POST",
+ path="/v1/workflows/executions/terminate",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=True,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ get_serialized_body=lambda: utils.serialize_request_body(
+ request, False, False, "json", models.BatchExecutionBody
+ ),
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="batch_terminate_workflow_executions_v1_workflows_executions_terminate_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.BatchExecutionResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def batch_terminate_workflow_executions_async(
+ self,
+ *,
+ execution_ids: List[str],
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.BatchExecutionResponse:
+ r"""Batch Terminate Workflow Executions
+
+ :param execution_ids: List of execution IDs to process
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.BatchExecutionBody(
+ execution_ids=execution_ids,
+ )
+
+ req = self._build_request_async(
+ method="POST",
+ path="/v1/workflows/executions/terminate",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=True,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ get_serialized_body=lambda: utils.serialize_request_body(
+ request, False, False, "json", models.BatchExecutionBody
+ ),
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="batch_terminate_workflow_executions_v1_workflows_executions_terminate_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.BatchExecutionResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ def cancel_workflow_execution(
+ self,
+ *,
+ execution_id: str,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ):
+ r"""Cancel Workflow Execution
+
+ :param execution_id:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.CancelWorkflowExecutionV1WorkflowsExecutionsExecutionIDCancelPostRequest(
+ execution_id=execution_id,
+ )
+
+ req = self._build_request(
+ method="POST",
+ path="/v1/workflows/executions/{execution_id}/cancel",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="cancel_workflow_execution_v1_workflows_executions__execution_id__cancel_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "204", "*"):
+ return
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def cancel_workflow_execution_async(
+ self,
+ *,
+ execution_id: str,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ):
+ r"""Cancel Workflow Execution
+
+ :param execution_id:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.CancelWorkflowExecutionV1WorkflowsExecutionsExecutionIDCancelPostRequest(
+ execution_id=execution_id,
+ )
+
+ req = self._build_request_async(
+ method="POST",
+ path="/v1/workflows/executions/{execution_id}/cancel",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="cancel_workflow_execution_v1_workflows_executions__execution_id__cancel_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "204", "*"):
+ return
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ def batch_cancel_workflow_executions(
+ self,
+ *,
+ execution_ids: List[str],
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.BatchExecutionResponse:
+ r"""Batch Cancel Workflow Executions
+
+ :param execution_ids: List of execution IDs to process
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.BatchExecutionBody(
+ execution_ids=execution_ids,
+ )
+
+ req = self._build_request(
+ method="POST",
+ path="/v1/workflows/executions/cancel",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=True,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ get_serialized_body=lambda: utils.serialize_request_body(
+ request, False, False, "json", models.BatchExecutionBody
+ ),
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="batch_cancel_workflow_executions_v1_workflows_executions_cancel_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.BatchExecutionResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def batch_cancel_workflow_executions_async(
+ self,
+ *,
+ execution_ids: List[str],
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.BatchExecutionResponse:
+ r"""Batch Cancel Workflow Executions
+
+ :param execution_ids: List of execution IDs to process
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.BatchExecutionBody(
+ execution_ids=execution_ids,
+ )
+
+ req = self._build_request_async(
+ method="POST",
+ path="/v1/workflows/executions/cancel",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=True,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ get_serialized_body=lambda: utils.serialize_request_body(
+ request, False, False, "json", models.BatchExecutionBody
+ ),
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="batch_cancel_workflow_executions_v1_workflows_executions_cancel_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.BatchExecutionResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ def reset_workflow(
+ self,
+ *,
+ execution_id: str,
+ event_id: int,
+ reason: OptionalNullable[str] = UNSET,
+ exclude_signals: Optional[bool] = False,
+ exclude_updates: Optional[bool] = False,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ):
+ r"""Reset Workflow
+
+ :param execution_id:
+ :param event_id: The event ID to reset the workflow execution to
+ :param reason: Reason for resetting the workflow execution
+ :param exclude_signals: Whether to exclude signals that happened after the reset point
+ :param exclude_updates: Whether to exclude updates that happened after the reset point
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.ResetWorkflowV1WorkflowsExecutionsExecutionIDResetPostRequest(
+ execution_id=execution_id,
+ reset_invocation_body=models.ResetInvocationBody(
+ event_id=event_id,
+ reason=reason,
+ exclude_signals=exclude_signals,
+ exclude_updates=exclude_updates,
+ ),
+ )
+
+ req = self._build_request(
+ method="POST",
+ path="/v1/workflows/executions/{execution_id}/reset",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=True,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ get_serialized_body=lambda: utils.serialize_request_body(
+ request.reset_invocation_body,
+ False,
+ False,
+ "json",
+ models.ResetInvocationBody,
+ ),
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="reset_workflow_v1_workflows_executions__execution_id__reset_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "204", "*"):
+ return
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def reset_workflow_async(
+ self,
+ *,
+ execution_id: str,
+ event_id: int,
+ reason: OptionalNullable[str] = UNSET,
+ exclude_signals: Optional[bool] = False,
+ exclude_updates: Optional[bool] = False,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ):
+ r"""Reset Workflow
+
+ :param execution_id:
+ :param event_id: The event ID to reset the workflow execution to
+ :param reason: Reason for resetting the workflow execution
+ :param exclude_signals: Whether to exclude signals that happened after the reset point
+ :param exclude_updates: Whether to exclude updates that happened after the reset point
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.ResetWorkflowV1WorkflowsExecutionsExecutionIDResetPostRequest(
+ execution_id=execution_id,
+ reset_invocation_body=models.ResetInvocationBody(
+ event_id=event_id,
+ reason=reason,
+ exclude_signals=exclude_signals,
+ exclude_updates=exclude_updates,
+ ),
+ )
+
+ req = self._build_request_async(
+ method="POST",
+ path="/v1/workflows/executions/{execution_id}/reset",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=True,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ get_serialized_body=lambda: utils.serialize_request_body(
+ request.reset_invocation_body,
+ False,
+ False,
+ "json",
+ models.ResetInvocationBody,
+ ),
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="reset_workflow_v1_workflows_executions__execution_id__reset_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "204", "*"):
+ return
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ def update_workflow_execution(
+ self,
+ *,
+ execution_id: str,
+ name: str,
+ input: OptionalNullable[
+ Union[
+ models.UpdateInvocationBodyInput,
+ models.UpdateInvocationBodyInputTypedDict,
+ ]
+ ] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.UpdateWorkflowResponse:
+ r"""Update Workflow Execution
+
+ :param execution_id:
+ :param name: The name of the update to request
+ :param input: Input data for the update, matching its schema
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.UpdateWorkflowExecutionV1WorkflowsExecutionsExecutionIDUpdatesPostRequest(
+ execution_id=execution_id,
+ update_invocation_body=models.UpdateInvocationBody(
+ name=name,
+ input=utils.get_pydantic_model(
+ input, OptionalNullable[models.UpdateInvocationBodyInput]
+ ),
+ ),
+ )
+
+ req = self._build_request(
+ method="POST",
+ path="/v1/workflows/executions/{execution_id}/updates",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=True,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ get_serialized_body=lambda: utils.serialize_request_body(
+ request.update_invocation_body,
+ False,
+ False,
+ "json",
+ models.UpdateInvocationBody,
+ ),
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="update_workflow_execution_v1_workflows_executions__execution_id__updates_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.UpdateWorkflowResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def update_workflow_execution_async(
+ self,
+ *,
+ execution_id: str,
+ name: str,
+ input: OptionalNullable[
+ Union[
+ models.UpdateInvocationBodyInput,
+ models.UpdateInvocationBodyInputTypedDict,
+ ]
+ ] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.UpdateWorkflowResponse:
+ r"""Update Workflow Execution
+
+ :param execution_id:
+ :param name: The name of the update to request
+ :param input: Input data for the update, matching its schema
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.UpdateWorkflowExecutionV1WorkflowsExecutionsExecutionIDUpdatesPostRequest(
+ execution_id=execution_id,
+ update_invocation_body=models.UpdateInvocationBody(
+ name=name,
+ input=utils.get_pydantic_model(
+ input, OptionalNullable[models.UpdateInvocationBodyInput]
+ ),
+ ),
+ )
+
+ req = self._build_request_async(
+ method="POST",
+ path="/v1/workflows/executions/{execution_id}/updates",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=True,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ get_serialized_body=lambda: utils.serialize_request_body(
+ request.update_invocation_body,
+ False,
+ False,
+ "json",
+ models.UpdateInvocationBody,
+ ),
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="update_workflow_execution_v1_workflows_executions__execution_id__updates_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.UpdateWorkflowResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ def get_workflow_execution_trace_otel(
+ self,
+ *,
+ execution_id: str,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowExecutionTraceOTelResponse:
+ r"""Get Workflow Execution Trace Otel
+
+ :param execution_id:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetWorkflowExecutionTraceOtelRequest(
+ execution_id=execution_id,
+ )
+
+ req = self._build_request(
+ method="GET",
+ path="/v1/workflows/executions/{execution_id}/trace/otel",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_workflow_execution_trace_otel",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(
+ models.WorkflowExecutionTraceOTelResponse, http_res
+ )
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def get_workflow_execution_trace_otel_async(
+ self,
+ *,
+ execution_id: str,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowExecutionTraceOTelResponse:
+ r"""Get Workflow Execution Trace Otel
+
+ :param execution_id:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetWorkflowExecutionTraceOtelRequest(
+ execution_id=execution_id,
+ )
+
+ req = self._build_request_async(
+ method="GET",
+ path="/v1/workflows/executions/{execution_id}/trace/otel",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_workflow_execution_trace_otel",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(
+ models.WorkflowExecutionTraceOTelResponse, http_res
+ )
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ def get_workflow_execution_trace_summary(
+ self,
+ *,
+ execution_id: str,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowExecutionTraceSummaryResponse:
+ r"""Get Workflow Execution Trace Summary
+
+ :param execution_id:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetWorkflowExecutionTraceSummaryRequest(
+ execution_id=execution_id,
+ )
+
+ req = self._build_request(
+ method="GET",
+ path="/v1/workflows/executions/{execution_id}/trace/summary",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_workflow_execution_trace_summary",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(
+ models.WorkflowExecutionTraceSummaryResponse, http_res
+ )
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def get_workflow_execution_trace_summary_async(
+ self,
+ *,
+ execution_id: str,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowExecutionTraceSummaryResponse:
+ r"""Get Workflow Execution Trace Summary
+
+ :param execution_id:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetWorkflowExecutionTraceSummaryRequest(
+ execution_id=execution_id,
+ )
+
+ req = self._build_request_async(
+ method="GET",
+ path="/v1/workflows/executions/{execution_id}/trace/summary",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_workflow_execution_trace_summary",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(
+ models.WorkflowExecutionTraceSummaryResponse, http_res
+ )
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ def get_workflow_execution_trace_events(
+ self,
+ *,
+ execution_id: str,
+ merge_same_id_events: Optional[bool] = False,
+ include_internal_events: Optional[bool] = False,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowExecutionTraceEventsResponse:
+ r"""Get Workflow Execution Trace Events
+
+ :param execution_id:
+ :param merge_same_id_events:
+ :param include_internal_events:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetWorkflowExecutionTraceEventsRequest(
+ execution_id=execution_id,
+ merge_same_id_events=merge_same_id_events,
+ include_internal_events=include_internal_events,
+ )
+
+ req = self._build_request(
+ method="GET",
+ path="/v1/workflows/executions/{execution_id}/trace/events",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_workflow_execution_trace_events",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(
+ models.WorkflowExecutionTraceEventsResponse, http_res
+ )
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def get_workflow_execution_trace_events_async(
+ self,
+ *,
+ execution_id: str,
+ merge_same_id_events: Optional[bool] = False,
+ include_internal_events: Optional[bool] = False,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowExecutionTraceEventsResponse:
+ r"""Get Workflow Execution Trace Events
+
+ :param execution_id:
+ :param merge_same_id_events:
+ :param include_internal_events:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetWorkflowExecutionTraceEventsRequest(
+ execution_id=execution_id,
+ merge_same_id_events=merge_same_id_events,
+ include_internal_events=include_internal_events,
+ )
+
+ req = self._build_request_async(
+ method="GET",
+ path="/v1/workflows/executions/{execution_id}/trace/events",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_workflow_execution_trace_events",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(
+ models.WorkflowExecutionTraceEventsResponse, http_res
+ )
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ def stream(
+ self,
+ *,
+ execution_id: str,
+ event_source: OptionalNullable[models.EventSource] = UNSET,
+ last_event_id: OptionalNullable[str] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> eventstreaming.EventStream[
+ models.StreamV1WorkflowsExecutionsExecutionIDStreamGetResponseBody
+ ]:
+ r"""Stream
+
+ :param execution_id:
+ :param event_source:
+ :param last_event_id:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.StreamV1WorkflowsExecutionsExecutionIDStreamGetRequest(
+ execution_id=execution_id,
+ event_source=event_source,
+ last_event_id=last_event_id,
+ )
+
+ req = self._build_request(
+ method="GET",
+ path="/v1/workflows/executions/{execution_id}/stream",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="text/event-stream",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="stream_v1_workflows_executions__execution_id__stream_get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ stream=True,
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "text/event-stream"):
+ return eventstreaming.EventStream(
+ http_res,
+ lambda raw: utils.unmarshal_json(
+ raw,
+ models.StreamV1WorkflowsExecutionsExecutionIDStreamGetResponseBody,
+ ),
+ client_ref=self,
+ data_required=False,
+ )
+ if utils.match_response(http_res, "422", "application/json"):
+ http_res_text = utils.stream_to_text(http_res)
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res, http_res_text
+ )
+ raise errors.HTTPValidationError(response_data, http_res, http_res_text)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("Unexpected response received", http_res, http_res_text)
+
+ async def stream_async(
+ self,
+ *,
+ execution_id: str,
+ event_source: OptionalNullable[models.EventSource] = UNSET,
+ last_event_id: OptionalNullable[str] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> eventstreaming.EventStreamAsync[
+ models.StreamV1WorkflowsExecutionsExecutionIDStreamGetResponseBody
+ ]:
+ r"""Stream
+
+ :param execution_id:
+ :param event_source:
+ :param last_event_id:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.StreamV1WorkflowsExecutionsExecutionIDStreamGetRequest(
+ execution_id=execution_id,
+ event_source=event_source,
+ last_event_id=last_event_id,
+ )
+
+ req = self._build_request_async(
+ method="GET",
+ path="/v1/workflows/executions/{execution_id}/stream",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="text/event-stream",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="stream_v1_workflows_executions__execution_id__stream_get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ stream=True,
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "text/event-stream"):
+ return eventstreaming.EventStreamAsync(
+ http_res,
+ lambda raw: utils.unmarshal_json(
+ raw,
+ models.StreamV1WorkflowsExecutionsExecutionIDStreamGetResponseBody,
+ ),
+ client_ref=self,
+ data_required=False,
+ )
+ if utils.match_response(http_res, "422", "application/json"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res, http_res_text
+ )
+ raise errors.HTTPValidationError(response_data, http_res, http_res_text)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("Unexpected response received", http_res, http_res_text)
diff --git a/src/mistralai/client/libraries.py b/src/mistralai/client/libraries.py
index b8728362..84624c4d 100644
--- a/src/mistralai/client/libraries.py
+++ b/src/mistralai/client/libraries.py
@@ -35,6 +35,8 @@ def _init_sdks(self):
def list(
self,
*,
+ page_size: Optional[int] = 100,
+ page: Optional[int] = 0,
retries: OptionalNullable[utils.RetryConfig] = UNSET,
server_url: Optional[str] = None,
timeout_ms: Optional[int] = None,
@@ -44,6 +46,8 @@ def list(
List all libraries that you have created or have been shared with you.
+ :param page_size:
+ :param page:
:param retries: Override the default retry configuration for this method
:param server_url: Override the default server URL for this method
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -58,12 +62,18 @@ def list(
base_url = server_url
else:
base_url = self._get_url(base_url, url_variables)
+
+ request = models.LibrariesListV1Request(
+ page_size=page_size,
+ page=page,
+ )
+
req = self._build_request(
method="GET",
path="/v1/libraries",
base_url=base_url,
url_variables=url_variables,
- request=None,
+ request=request,
request_body_required=False,
request_has_path_params=False,
request_has_query_params=True,
@@ -94,12 +104,18 @@ def list(
),
),
request=req,
- error_status_codes=["4XX", "5XX"],
+ error_status_codes=["422", "4XX", "5XX"],
retry_config=retry_config,
)
+ response_data: Any = None
if utils.match_response(http_res, "200", "application/json"):
return unmarshal_json_response(models.ListLibrariesResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
if utils.match_response(http_res, "4XX", "*"):
http_res_text = utils.stream_to_text(http_res)
raise errors.SDKError("API error occurred", http_res, http_res_text)
@@ -112,6 +128,8 @@ def list(
async def list_async(
self,
*,
+ page_size: Optional[int] = 100,
+ page: Optional[int] = 0,
retries: OptionalNullable[utils.RetryConfig] = UNSET,
server_url: Optional[str] = None,
timeout_ms: Optional[int] = None,
@@ -121,6 +139,8 @@ async def list_async(
List all libraries that you have created or have been shared with you.
+ :param page_size:
+ :param page:
:param retries: Override the default retry configuration for this method
:param server_url: Override the default server URL for this method
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -135,12 +155,18 @@ async def list_async(
base_url = server_url
else:
base_url = self._get_url(base_url, url_variables)
+
+ request = models.LibrariesListV1Request(
+ page_size=page_size,
+ page=page,
+ )
+
req = self._build_request_async(
method="GET",
path="/v1/libraries",
base_url=base_url,
url_variables=url_variables,
- request=None,
+ request=request,
request_body_required=False,
request_has_path_params=False,
request_has_query_params=True,
@@ -171,12 +197,18 @@ async def list_async(
),
),
request=req,
- error_status_codes=["4XX", "5XX"],
+ error_status_codes=["422", "4XX", "5XX"],
retry_config=retry_config,
)
+ response_data: Any = None
if utils.match_response(http_res, "200", "application/json"):
return unmarshal_json_response(models.ListLibrariesResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
if utils.match_response(http_res, "4XX", "*"):
http_res_text = await utils.stream_to_text_async(http_res)
raise errors.SDKError("API error occurred", http_res, http_res_text)
diff --git a/src/mistralai/client/metrics.py b/src/mistralai/client/metrics.py
new file mode 100644
index 00000000..3df1ca56
--- /dev/null
+++ b/src/mistralai/client/metrics.py
@@ -0,0 +1,243 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 937cb03f8130
+
+from .basesdk import BaseSDK
+from datetime import datetime
+from mistralai.client import errors, models, utils
+from mistralai.client._hooks import HookContext
+from mistralai.client.types import OptionalNullable, UNSET
+from mistralai.client.utils import get_security_from_env
+from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response
+from typing import Any, Mapping, Optional
+
+
+class Metrics(BaseSDK):
+ def get_workflow_metrics(
+ self,
+ *,
+ workflow_name: str,
+ start_time: OptionalNullable[datetime] = UNSET,
+ end_time: OptionalNullable[datetime] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowMetrics:
+ r"""Get Workflow Metrics
+
+ Get comprehensive metrics for a specific workflow.
+
+ Args:
+ workflow_name: The name of the workflow type to get metrics for
+ start_time: Optional start time filter (ISO 8601 format)
+ end_time: Optional end time filter (ISO 8601 format)
+
+ Returns:
+ WorkflowMetrics: Dictionary containing metrics:
+ - execution_count: Total number of executions
+ - success_count: Number of successful executions
+ - error_count: Number of failed/terminated executions
+ - average_latency_ms: Average execution duration in milliseconds
+ - retry_rate: Proportion of workflows with retries
+ - latency_over_time: Time-series data of execution durations
+
+ Example:
+ GET /v1/workflows/MyWorkflow/metrics
+ GET /v1/workflows/MyWorkflow/metrics?start_time=2025-01-01T00:00:00Z
+ GET /v1/workflows/MyWorkflow/metrics?start_time=2025-01-01T00:00:00Z&end_time=2025-12-31T23:59:59Z
+
+ :param workflow_name:
+ :param start_time: Filter workflows started after this time (ISO 8601)
+ :param end_time: Filter workflows started before this time (ISO 8601)
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetWorkflowMetricsV1WorkflowsWorkflowNameMetricsGetRequest(
+ workflow_name=workflow_name,
+ start_time=start_time,
+ end_time=end_time,
+ )
+
+ req = self._build_request(
+ method="GET",
+ path="/v1/workflows/{workflow_name}/metrics",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_workflow_metrics_v1_workflows__workflow_name__metrics_get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.WorkflowMetrics, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def get_workflow_metrics_async(
+ self,
+ *,
+ workflow_name: str,
+ start_time: OptionalNullable[datetime] = UNSET,
+ end_time: OptionalNullable[datetime] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowMetrics:
+ r"""Get Workflow Metrics
+
+ Get comprehensive metrics for a specific workflow.
+
+ Args:
+ workflow_name: The name of the workflow type to get metrics for
+ start_time: Optional start time filter (ISO 8601 format)
+ end_time: Optional end time filter (ISO 8601 format)
+
+ Returns:
+ WorkflowMetrics: Dictionary containing metrics:
+ - execution_count: Total number of executions
+ - success_count: Number of successful executions
+ - error_count: Number of failed/terminated executions
+ - average_latency_ms: Average execution duration in milliseconds
+ - retry_rate: Proportion of workflows with retries
+ - latency_over_time: Time-series data of execution durations
+
+ Example:
+ GET /v1/workflows/MyWorkflow/metrics
+ GET /v1/workflows/MyWorkflow/metrics?start_time=2025-01-01T00:00:00Z
+ GET /v1/workflows/MyWorkflow/metrics?start_time=2025-01-01T00:00:00Z&end_time=2025-12-31T23:59:59Z
+
+ :param workflow_name:
+ :param start_time: Filter workflows started after this time (ISO 8601)
+ :param end_time: Filter workflows started before this time (ISO 8601)
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetWorkflowMetricsV1WorkflowsWorkflowNameMetricsGetRequest(
+ workflow_name=workflow_name,
+ start_time=start_time,
+ end_time=end_time,
+ )
+
+ req = self._build_request_async(
+ method="GET",
+ path="/v1/workflows/{workflow_name}/metrics",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_workflow_metrics_v1_workflows__workflow_name__metrics_get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.WorkflowMetrics, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
diff --git a/src/mistralai/client/models/__init__.py b/src/mistralai/client/models/__init__.py
index 3ace7584..a6ac62a6 100644
--- a/src/mistralai/client/models/__init__.py
+++ b/src/mistralai/client/models/__init__.py
@@ -14,6 +14,62 @@
from mistralai.client.utils.dynamic_imports import lazy_getattr, lazy_dir
if TYPE_CHECKING:
+ from .activitytaskcompletedattributesrequest import (
+ ActivityTaskCompletedAttributesRequest,
+ ActivityTaskCompletedAttributesRequestTypedDict,
+ )
+ from .activitytaskcompletedattributesresponse import (
+ ActivityTaskCompletedAttributesResponse,
+ ActivityTaskCompletedAttributesResponseTypedDict,
+ )
+ from .activitytaskcompletedrequest import (
+ ActivityTaskCompletedRequest,
+ ActivityTaskCompletedRequestTypedDict,
+ )
+ from .activitytaskcompletedresponse import (
+ ActivityTaskCompletedResponse,
+ ActivityTaskCompletedResponseTypedDict,
+ )
+ from .activitytaskfailedattributes import (
+ ActivityTaskFailedAttributes,
+ ActivityTaskFailedAttributesTypedDict,
+ )
+ from .activitytaskfailedrequest import (
+ ActivityTaskFailedRequest,
+ ActivityTaskFailedRequestTypedDict,
+ )
+ from .activitytaskfailedresponse import (
+ ActivityTaskFailedResponse,
+ ActivityTaskFailedResponseTypedDict,
+ )
+ from .activitytaskretryingattributes import (
+ ActivityTaskRetryingAttributes,
+ ActivityTaskRetryingAttributesTypedDict,
+ )
+ from .activitytaskretryingrequest import (
+ ActivityTaskRetryingRequest,
+ ActivityTaskRetryingRequestTypedDict,
+ )
+ from .activitytaskretryingresponse import (
+ ActivityTaskRetryingResponse,
+ ActivityTaskRetryingResponseTypedDict,
+ )
+ from .activitytaskstartedattributesrequest import (
+ ActivityTaskStartedAttributesRequest,
+ ActivityTaskStartedAttributesRequestTypedDict,
+ )
+ from .activitytaskstartedattributesresponse import (
+ ActivityTaskStartedAttributesResponse,
+ ActivityTaskStartedAttributesResponseTypedDict,
+ )
+ from .activitytaskstartedrequest import (
+ ActivityTaskStartedRequest,
+ ActivityTaskStartedRequestTypedDict,
+ )
+ from .activitytaskstartedresponse import (
+ ActivityTaskStartedResponse,
+ ActivityTaskStartedResponseTypedDict,
+ )
from .agent import (
Agent,
AgentTool,
@@ -142,6 +198,10 @@
from .annotations import Annotations, AnnotationsTypedDict, Audience
from .apiendpoint import APIEndpoint
from .apikeyauth import APIKeyAuth, APIKeyAuthTypedDict
+ from .archive_workflow_v1_workflows_workflow_identifier_archive_putop import (
+ ArchiveWorkflowV1WorkflowsWorkflowIdentifierArchivePutRequest,
+ ArchiveWorkflowV1WorkflowsWorkflowIdentifierArchivePutRequestTypedDict,
+ )
from .archivemodelresponse import (
ArchiveModelResponse,
ArchiveModelResponseTypedDict,
@@ -168,13 +228,22 @@
from .authurlresponse import AuthURLResponse, AuthURLResponseTypedDict
from .basefielddefinition import (
BaseFieldDefinition,
+ BaseFieldDefinitionType,
BaseFieldDefinitionTypedDict,
SupportedOperator,
- TypeEnum,
)
from .basemodelcard import BaseModelCard, BaseModelCardTypedDict
from .basetaskstatus import BaseTaskStatus
from .batcherror import BatchError, BatchErrorTypedDict
+ from .batchexecutionbody import BatchExecutionBody, BatchExecutionBodyTypedDict
+ from .batchexecutionresponse import (
+ BatchExecutionResponse,
+ BatchExecutionResponseTypedDict,
+ )
+ from .batchexecutionresult import (
+ BatchExecutionResult,
+ BatchExecutionResultTypedDict,
+ )
from .batchjob import BatchJob, BatchJobTypedDict
from .batchjobstatus import BatchJobStatus
from .batchrequest import BatchRequest, BatchRequestTypedDict
@@ -184,6 +253,10 @@
)
from .builtinconnectors import BuiltInConnectors
from .campaign import Campaign, CampaignTypedDict
+ from .cancel_workflow_execution_v1_workflows_executions_execution_id_cancel_postop import (
+ CancelWorkflowExecutionV1WorkflowsExecutionsExecutionIDCancelPostRequest,
+ CancelWorkflowExecutionV1WorkflowsExecutionsExecutionIDCancelPostRequestTypedDict,
+ )
from .chatclassificationrequest import (
ChatClassificationRequest,
ChatClassificationRequestTypedDict,
@@ -347,10 +420,8 @@
from .connector_list_tools_v1op import (
ConnectorListToolsV1Request,
ConnectorListToolsV1RequestTypedDict,
- ResponseConnectorListToolsV11,
- ResponseConnectorListToolsV11TypedDict,
- ResponseConnectorListToolsV12,
- ResponseConnectorListToolsV12TypedDict,
+ ResponseConnectorListToolsV1,
+ ResponseConnectorListToolsV1TypedDict,
)
from .connector_list_v1op import (
ConnectorListV1Request,
@@ -520,6 +591,95 @@
CustomConnectorTypedDict,
UnknownAuthorization,
)
+ from .customtaskcanceledattributes import (
+ CustomTaskCanceledAttributes,
+ CustomTaskCanceledAttributesTypedDict,
+ )
+ from .customtaskcanceledrequest import (
+ CustomTaskCanceledRequest,
+ CustomTaskCanceledRequestTypedDict,
+ )
+ from .customtaskcanceledresponse import (
+ CustomTaskCanceledResponse,
+ CustomTaskCanceledResponseTypedDict,
+ )
+ from .customtaskcompletedattributesrequest import (
+ CustomTaskCompletedAttributesRequest,
+ CustomTaskCompletedAttributesRequestTypedDict,
+ )
+ from .customtaskcompletedattributesresponse import (
+ CustomTaskCompletedAttributesResponse,
+ CustomTaskCompletedAttributesResponseTypedDict,
+ )
+ from .customtaskcompletedrequest import (
+ CustomTaskCompletedRequest,
+ CustomTaskCompletedRequestTypedDict,
+ )
+ from .customtaskcompletedresponse import (
+ CustomTaskCompletedResponse,
+ CustomTaskCompletedResponseTypedDict,
+ )
+ from .customtaskfailedattributes import (
+ CustomTaskFailedAttributes,
+ CustomTaskFailedAttributesTypedDict,
+ )
+ from .customtaskfailedrequest import (
+ CustomTaskFailedRequest,
+ CustomTaskFailedRequestTypedDict,
+ )
+ from .customtaskfailedresponse import (
+ CustomTaskFailedResponse,
+ CustomTaskFailedResponseTypedDict,
+ )
+ from .customtaskinprogressattributesrequest import (
+ CustomTaskInProgressAttributesRequest,
+ CustomTaskInProgressAttributesRequestPayload,
+ CustomTaskInProgressAttributesRequestPayloadTypedDict,
+ CustomTaskInProgressAttributesRequestTypedDict,
+ )
+ from .customtaskinprogressattributesresponse import (
+ CustomTaskInProgressAttributesResponse,
+ CustomTaskInProgressAttributesResponsePayload,
+ CustomTaskInProgressAttributesResponsePayloadTypedDict,
+ CustomTaskInProgressAttributesResponseTypedDict,
+ UnknownCustomTaskInProgressAttributesResponsePayload,
+ )
+ from .customtaskinprogressrequest import (
+ CustomTaskInProgressRequest,
+ CustomTaskInProgressRequestTypedDict,
+ )
+ from .customtaskinprogressresponse import (
+ CustomTaskInProgressResponse,
+ CustomTaskInProgressResponseTypedDict,
+ )
+ from .customtaskstartedattributesrequest import (
+ CustomTaskStartedAttributesRequest,
+ CustomTaskStartedAttributesRequestTypedDict,
+ )
+ from .customtaskstartedattributesresponse import (
+ CustomTaskStartedAttributesResponse,
+ CustomTaskStartedAttributesResponseTypedDict,
+ )
+ from .customtaskstartedrequest import (
+ CustomTaskStartedRequest,
+ CustomTaskStartedRequestTypedDict,
+ )
+ from .customtaskstartedresponse import (
+ CustomTaskStartedResponse,
+ CustomTaskStartedResponseTypedDict,
+ )
+ from .customtasktimedoutattributes import (
+ CustomTaskTimedOutAttributes,
+ CustomTaskTimedOutAttributesTypedDict,
+ )
+ from .customtasktimedoutrequest import (
+ CustomTaskTimedOutRequest,
+ CustomTaskTimedOutRequestTypedDict,
+ )
+ from .customtasktimedoutresponse import (
+ CustomTaskTimedOutResponse,
+ CustomTaskTimedOutResponseTypedDict,
+ )
from .dataset import Dataset, DatasetTypedDict
from .datasetimporttask import DatasetImportTask, DatasetImportTaskTypedDict
from .datasetpreview import DatasetPreview, DatasetPreviewTypedDict
@@ -548,6 +708,10 @@
DeleteVoiceV1AudioVoicesVoiceIDDeleteRequest,
DeleteVoiceV1AudioVoicesVoiceIDDeleteRequestTypedDict,
)
+ from .deletebatchjobresponse import (
+ DeleteBatchJobResponse,
+ DeleteBatchJobResponseTypedDict,
+ )
from .deletedatasetrecordsrequest import (
DeleteDatasetRecordsRequest,
DeleteDatasetRecordsRequestTypedDict,
@@ -560,6 +724,19 @@
DeltaMessageContentTypedDict,
DeltaMessageTypedDict,
)
+ from .deploymentdetailresponse import (
+ DeploymentDetailResponse,
+ DeploymentDetailResponseTypedDict,
+ )
+ from .deploymentlistresponse import (
+ DeploymentListResponse,
+ DeploymentListResponseTypedDict,
+ )
+ from .deploymentresponse import DeploymentResponse, DeploymentResponseTypedDict
+ from .deploymentworkerresponse import (
+ DeploymentWorkerResponse,
+ DeploymentWorkerResponseTypedDict,
+ )
from .document import Document, DocumentTypedDict
from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict
from .documenttextcontent import DocumentTextContent, DocumentTextContentTypedDict
@@ -582,9 +759,25 @@
EmbeddingResponseData,
EmbeddingResponseDataTypedDict,
)
+ from .encodedpayloadoptions import EncodedPayloadOptions
from .encodingformat import EncodingFormat
from .entitytype import EntityType
from .event import Event, EventTypedDict
+ from .eventprogressstatus import EventProgressStatus
+ from .eventsource import EventSource
+ from .eventtype import EventType
+ from .execute_workflow_registration_v1_workflows_registrations_workflow_registration_id_execute_postop import (
+ ExecuteWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDExecutePostRequest,
+ ExecuteWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDExecutePostRequestTypedDict,
+ ResponseExecuteWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDExecutePost,
+ ResponseExecuteWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDExecutePostTypedDict,
+ )
+ from .execute_workflow_v1_workflows_workflow_identifier_execute_postop import (
+ ExecuteWorkflowV1WorkflowsWorkflowIdentifierExecutePostRequest,
+ ExecuteWorkflowV1WorkflowsWorkflowIdentifierExecutePostRequestTypedDict,
+ ResponseExecuteWorkflowV1WorkflowsWorkflowIdentifierExecutePost,
+ ResponseExecuteWorkflowV1WorkflowsWorkflowIdentifierExecutePostTypedDict,
+ )
from .executionconfig import ExecutionConfig, ExecutionConfigTypedDict
from .export_dataset_to_jsonl_v1_observability_datasets_dataset_id_exports_to_jsonl_getop import (
ExportDatasetToJsonlV1ObservabilityDatasetsDatasetIDExportsToJsonlGetRequest,
@@ -594,6 +787,7 @@
ExportDatasetResponse,
ExportDatasetResponseTypedDict,
)
+ from .failure import Failure, FailureTypedDict
from .feedresultchatcompletioneventpreview import (
FeedResultChatCompletionEventPreview,
FeedResultChatCompletionEventPreviewTypedDict,
@@ -762,6 +956,10 @@
GetDatasetsV1ObservabilityDatasetsGetRequest,
GetDatasetsV1ObservabilityDatasetsGetRequestTypedDict,
)
+ from .get_deployment_v1_workflows_deployments_name_getop import (
+ GetDeploymentV1WorkflowsDeploymentsNameGetRequest,
+ GetDeploymentV1WorkflowsDeploymentsNameGetRequestTypedDict,
+ )
from .get_judge_by_id_v1_observability_judges_judge_id_getop import (
GetJudgeByIDV1ObservabilityJudgesJudgeIDGetRequest,
GetJudgeByIDV1ObservabilityJudgesJudgeIDGetRequestTypedDict,
@@ -770,10 +968,25 @@
GetJudgesV1ObservabilityJudgesGetRequest,
GetJudgesV1ObservabilityJudgesGetRequestTypedDict,
)
+ from .get_run_history_v1_workflows_runs_run_id_history_getop import (
+ GetRunHistoryV1WorkflowsRunsRunIDHistoryGetRequest,
+ GetRunHistoryV1WorkflowsRunsRunIDHistoryGetRequestTypedDict,
+ )
+ from .get_run_v1_workflows_runs_run_id_getop import (
+ GetRunV1WorkflowsRunsRunIDGetRequest,
+ GetRunV1WorkflowsRunsRunIDGetRequestTypedDict,
+ )
from .get_similar_chat_completion_events_v1_observability_chat_completion_events_event_id_similar_events_getop import (
GetSimilarChatCompletionEventsV1ObservabilityChatCompletionEventsEventIDSimilarEventsGetRequest,
GetSimilarChatCompletionEventsV1ObservabilityChatCompletionEventsEventIDSimilarEventsGetRequestTypedDict,
)
+ from .get_stream_events_v1_workflows_events_stream_getop import (
+ GetStreamEventsV1WorkflowsEventsStreamGetRequest,
+ GetStreamEventsV1WorkflowsEventsStreamGetRequestTypedDict,
+ GetStreamEventsV1WorkflowsEventsStreamGetResponseBody,
+ GetStreamEventsV1WorkflowsEventsStreamGetResponseBodyTypedDict,
+ Scope,
+ )
from .get_voice_sample_audio_v1_audio_voices_voice_id_sample_getop import (
GetVoiceSampleAudioV1AudioVoicesVoiceIDSampleGetRequest,
GetVoiceSampleAudioV1AudioVoicesVoiceIDSampleGetRequestTypedDict,
@@ -782,6 +995,52 @@
GetVoiceV1AudioVoicesVoiceIDGetRequest,
GetVoiceV1AudioVoicesVoiceIDGetRequestTypedDict,
)
+ from .get_workflow_events_v1_workflows_events_list_getop import (
+ GetWorkflowEventsV1WorkflowsEventsListGetRequest,
+ GetWorkflowEventsV1WorkflowsEventsListGetRequestTypedDict,
+ )
+ from .get_workflow_execution_history_v1_workflows_executions_execution_id_history_getop import (
+ GetWorkflowExecutionHistoryV1WorkflowsExecutionsExecutionIDHistoryGetRequest,
+ GetWorkflowExecutionHistoryV1WorkflowsExecutionsExecutionIDHistoryGetRequestTypedDict,
+ )
+ from .get_workflow_execution_trace_eventsop import (
+ GetWorkflowExecutionTraceEventsRequest,
+ GetWorkflowExecutionTraceEventsRequestTypedDict,
+ )
+ from .get_workflow_execution_trace_otelop import (
+ GetWorkflowExecutionTraceOtelRequest,
+ GetWorkflowExecutionTraceOtelRequestTypedDict,
+ )
+ from .get_workflow_execution_trace_summaryop import (
+ GetWorkflowExecutionTraceSummaryRequest,
+ GetWorkflowExecutionTraceSummaryRequestTypedDict,
+ )
+ from .get_workflow_execution_v1_workflows_executions_execution_id_getop import (
+ GetWorkflowExecutionV1WorkflowsExecutionsExecutionIDGetRequest,
+ GetWorkflowExecutionV1WorkflowsExecutionsExecutionIDGetRequestTypedDict,
+ )
+ from .get_workflow_metrics_v1_workflows_workflow_name_metrics_getop import (
+ GetWorkflowMetricsV1WorkflowsWorkflowNameMetricsGetRequest,
+ GetWorkflowMetricsV1WorkflowsWorkflowNameMetricsGetRequestTypedDict,
+ )
+ from .get_workflow_registration_v1_workflows_registrations_workflow_registration_id_getop import (
+ GetWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDGetRequest,
+ GetWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDGetRequestTypedDict,
+ )
+ from .get_workflow_registrations_v1_workflows_registrations_getop import (
+ GetWorkflowRegistrationsV1WorkflowsRegistrationsGetRequest,
+ GetWorkflowRegistrationsV1WorkflowsRegistrationsGetRequestTypedDict,
+ )
+ from .get_workflow_v1_workflows_workflow_identifier_getop import (
+ GetWorkflowV1WorkflowsWorkflowIdentifierGetRequest,
+ GetWorkflowV1WorkflowsWorkflowIdentifierGetRequestTypedDict,
+ )
+ from .get_workflows_v1_workflows_getop import (
+ GetWorkflowsV1WorkflowsGetRequest,
+ GetWorkflowsV1WorkflowsGetRequestTypedDict,
+ GetWorkflowsV1WorkflowsGetResponse,
+ GetWorkflowsV1WorkflowsGetResponseTypedDict,
+ )
from .getfileresponse import GetFileResponse, GetFileResponseTypedDict
from .getsignedurlresponse import (
GetSignedURLResponse,
@@ -838,6 +1097,10 @@
JobsAPIRoutesBatchCancelBatchJobRequest,
JobsAPIRoutesBatchCancelBatchJobRequestTypedDict,
)
+ from .jobs_api_routes_batch_delete_batch_jobop import (
+ JobsAPIRoutesBatchDeleteBatchJobRequest,
+ JobsAPIRoutesBatchDeleteBatchJobRequestTypedDict,
+ )
from .jobs_api_routes_batch_get_batch_jobop import (
JobsAPIRoutesBatchGetBatchJobRequest,
JobsAPIRoutesBatchGetBatchJobRequestTypedDict,
@@ -895,6 +1158,25 @@
JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict,
UnknownJobsAPIRoutesFineTuningUpdateFineTunedModelResponse,
)
+ from .jsonpatchadd import JSONPatchAdd, JSONPatchAddTypedDict
+ from .jsonpatchappend import JSONPatchAppend, JSONPatchAppendTypedDict
+ from .jsonpatchpayloadrequest import (
+ JSONPatchPayloadRequest,
+ JSONPatchPayloadRequestTypedDict,
+ JSONPatchPayloadRequestValue,
+ JSONPatchPayloadRequestValueTypedDict,
+ )
+ from .jsonpatchpayloadresponse import (
+ JSONPatchPayloadResponse,
+ JSONPatchPayloadResponseTypedDict,
+ JSONPatchPayloadResponseValue,
+ JSONPatchPayloadResponseValueTypedDict,
+ UnknownJSONPatchPayloadResponseValue,
+ )
+ from .jsonpatchremove import JSONPatchRemove, JSONPatchRemoveTypedDict
+ from .jsonpatchreplace import JSONPatchReplace, JSONPatchReplaceTypedDict
+ from .jsonpayloadrequest import JSONPayloadRequest, JSONPayloadRequestTypedDict
+ from .jsonpayloadresponse import JSONPayloadResponse, JSONPayloadResponseTypedDict
from .jsonschema import JSONSchema, JSONSchemaTypedDict
from .judge import (
Judge,
@@ -992,6 +1274,10 @@
LibrariesGetV1Request,
LibrariesGetV1RequestTypedDict,
)
+ from .libraries_list_v1op import (
+ LibrariesListV1Request,
+ LibrariesListV1RequestTypedDict,
+ )
from .libraries_share_create_v1op import (
LibrariesShareCreateV1Request,
LibrariesShareCreateV1RequestTypedDict,
@@ -1009,13 +1295,26 @@
LibrariesUpdateV1RequestTypedDict,
)
from .library import Library, LibraryTypedDict
+ from .list_deployments_v1_workflows_deployments_getop import (
+ ListDeploymentsV1WorkflowsDeploymentsGetRequest,
+ ListDeploymentsV1WorkflowsDeploymentsGetRequestTypedDict,
+ )
from .list_models_v1_models_getop import (
ListModelsV1ModelsGetRequest,
ListModelsV1ModelsGetRequestTypedDict,
)
+ from .list_runs_v1_workflows_runs_getop import (
+ ListRunsV1WorkflowsRunsGetRequest,
+ ListRunsV1WorkflowsRunsGetRequestTypedDict,
+ ListRunsV1WorkflowsRunsGetResponse,
+ ListRunsV1WorkflowsRunsGetResponseTypedDict,
+ ListRunsV1WorkflowsRunsGetStatus,
+ ListRunsV1WorkflowsRunsGetStatusTypedDict,
+ )
from .list_voices_v1_audio_voices_getop import (
ListVoicesV1AudioVoicesGetRequest,
ListVoicesV1AudioVoicesGetRequestTypedDict,
+ ListVoicesV1AudioVoicesGetType,
)
from .listbatchjobsresponse import (
ListBatchJobsResponse,
@@ -1063,6 +1362,12 @@
ListLibrariesResponseTypedDict,
)
from .listsharingresponse import ListSharingResponse, ListSharingResponseTypedDict
+ from .listworkfloweventresponse import (
+ ListWorkflowEventResponse,
+ ListWorkflowEventResponseEvent,
+ ListWorkflowEventResponseEventTypedDict,
+ ListWorkflowEventResponseTypedDict,
+ )
from .mcpservericon import MCPServerIcon, MCPServerIconTypedDict
from .mcptool import MCPTool, MCPToolTypedDict
from .mcptoolmeta import MCPToolMeta, MCPToolMetaTypedDict
@@ -1136,6 +1441,7 @@
)
from .moderationobject import ModerationObject, ModerationObjectTypedDict
from .moderationresponse import ModerationResponse, ModerationResponseTypedDict
+ from .networkencodedinput import NetworkEncodedInput, NetworkEncodedInputTypedDict
from .oauth2tokenauth import OAuth2TokenAuth, OAuth2TokenAuthTypedDict
from .observabilityerrorcode import ObservabilityErrorCode
from .observabilityerrordetail import (
@@ -1207,6 +1513,21 @@
from .processingstatus import ProcessingStatus, ProcessingStatusTypedDict
from .processstatus import ProcessStatus
from .prompttokensdetails import PromptTokensDetails, PromptTokensDetailsTypedDict
+ from .query_workflow_execution_v1_workflows_executions_execution_id_queries_postop import (
+ QueryWorkflowExecutionV1WorkflowsExecutionsExecutionIDQueriesPostRequest,
+ QueryWorkflowExecutionV1WorkflowsExecutionsExecutionIDQueriesPostRequestTypedDict,
+ )
+ from .querydefinition import QueryDefinition, QueryDefinitionTypedDict
+ from .queryinvocationbody import (
+ QueryInvocationBody,
+ QueryInvocationBodyInput,
+ QueryInvocationBodyInputTypedDict,
+ QueryInvocationBodyTypedDict,
+ )
+ from .queryworkflowresponse import (
+ QueryWorkflowResponse,
+ QueryWorkflowResponseTypedDict,
+ )
from .realtimetranscriptionerror import (
RealtimeTranscriptionError,
RealtimeTranscriptionErrorTypedDict,
@@ -1257,6 +1578,11 @@
ReferenceIDTypedDict,
)
from .requestsource import RequestSource
+ from .reset_workflow_v1_workflows_executions_execution_id_reset_postop import (
+ ResetWorkflowV1WorkflowsExecutionsExecutionIDResetPostRequest,
+ ResetWorkflowV1WorkflowsExecutionsExecutionIDResetPostRequestTypedDict,
+ )
+ from .resetinvocationbody import ResetInvocationBody, ResetInvocationBodyTypedDict
from .resourcelink import ResourceLink, ResourceLinkTypedDict
from .resourcevisibility import ResourceVisibility
from .responsedoneevent import ResponseDoneEvent, ResponseDoneEventTypedDict
@@ -1276,6 +1602,22 @@
)
from .roles import Roles
from .sampletype import SampleType
+ from .scalarmetric import (
+ ScalarMetric,
+ ScalarMetricTypedDict,
+ ScalarMetricValue,
+ ScalarMetricValueTypedDict,
+ )
+ from .schedulecalendar import ScheduleCalendar, ScheduleCalendarTypedDict
+ from .scheduledefinition import ScheduleDefinition, ScheduleDefinitionTypedDict
+ from .scheduledefinitionoutput import (
+ ScheduleDefinitionOutput,
+ ScheduleDefinitionOutputTypedDict,
+ )
+ from .scheduleinterval import ScheduleInterval, ScheduleIntervalTypedDict
+ from .scheduleoverlappolicy import ScheduleOverlapPolicy
+ from .schedulepolicy import SchedulePolicy, SchedulePolicyTypedDict
+ from .schedulerange import ScheduleRange, ScheduleRangeTypedDict
from .searchchatcompletioneventidsrequest import (
SearchChatCompletionEventIdsRequest,
SearchChatCompletionEventIdsRequestTypedDict,
@@ -1297,6 +1639,23 @@
from .sharing import Sharing, SharingTypedDict
from .sharingdelete import SharingDelete, SharingDeleteTypedDict
from .sharingrequest import SharingRequest, SharingRequestTypedDict
+ from .signal_workflow_execution_v1_workflows_executions_execution_id_signals_postop import (
+ SignalWorkflowExecutionV1WorkflowsExecutionsExecutionIDSignalsPostRequest,
+ SignalWorkflowExecutionV1WorkflowsExecutionsExecutionIDSignalsPostRequestTypedDict,
+ )
+ from .signaldefinition import SignalDefinition, SignalDefinitionTypedDict
+ from .signalinvocationbody import (
+ SignalInvocationBody,
+ SignalInvocationBodyInput,
+ SignalInvocationBodyInputTypedDict,
+ SignalInvocationBodyNetworkEncodedInput,
+ SignalInvocationBodyNetworkEncodedInputTypedDict,
+ SignalInvocationBodyTypedDict,
+ )
+ from .signalworkflowresponse import (
+ SignalWorkflowResponse,
+ SignalWorkflowResponseTypedDict,
+ )
from .source import Source
from .speech_v1_audio_speech_postop import (
SpeechResponse,
@@ -1318,6 +1677,22 @@
from .speechstreamdone import SpeechStreamDone, SpeechStreamDoneTypedDict
from .speechstreameventtypes import SpeechStreamEventTypes
from .ssetypes import SSETypes
+ from .stream_v1_workflows_executions_execution_id_stream_getop import (
+ StreamV1WorkflowsExecutionsExecutionIDStreamGetRequest,
+ StreamV1WorkflowsExecutionsExecutionIDStreamGetRequestTypedDict,
+ StreamV1WorkflowsExecutionsExecutionIDStreamGetResponseBody,
+ StreamV1WorkflowsExecutionsExecutionIDStreamGetResponseBodyTypedDict,
+ )
+ from .streameventssepayload import (
+ StreamEventSsePayload,
+ StreamEventSsePayloadData,
+ StreamEventSsePayloadDataTypedDict,
+ StreamEventSsePayloadTypedDict,
+ )
+ from .streameventworkflowcontext import (
+ StreamEventWorkflowContext,
+ StreamEventWorkflowContextTypedDict,
+ )
from .systemmessage import (
SystemMessage,
SystemMessageContent,
@@ -1328,6 +1703,39 @@
SystemMessageContentChunks,
SystemMessageContentChunksTypedDict,
)
+ from .tempogettraceresponse import (
+ TempoGetTraceResponse,
+ TempoGetTraceResponseTypedDict,
+ )
+ from .tempotraceattribute import (
+ TempoTraceAttribute,
+ TempoTraceAttributeTypedDict,
+ TempoTraceAttributeValue,
+ TempoTraceAttributeValueTypedDict,
+ )
+ from .tempotraceattributeboolvalue import (
+ TempoTraceAttributeBoolValue,
+ TempoTraceAttributeBoolValueTypedDict,
+ )
+ from .tempotraceattributeintvalue import (
+ TempoTraceAttributeIntValue,
+ TempoTraceAttributeIntValueTypedDict,
+ )
+ from .tempotraceattributestringvalue import (
+ TempoTraceAttributeStringValue,
+ TempoTraceAttributeStringValueTypedDict,
+ )
+ from .tempotracebatch import TempoTraceBatch, TempoTraceBatchTypedDict
+ from .tempotraceevent import TempoTraceEvent, TempoTraceEventTypedDict
+ from .tempotraceresource import TempoTraceResource, TempoTraceResourceTypedDict
+ from .tempotracescope import TempoTraceScope, TempoTraceScopeTypedDict
+ from .tempotracescopekind import TempoTraceScopeKind
+ from .tempotracescopespan import TempoTraceScopeSpan, TempoTraceScopeSpanTypedDict
+ from .tempotracespan import TempoTraceSpan, TempoTraceSpanTypedDict
+ from .terminate_workflow_execution_v1_workflows_executions_execution_id_terminate_postop import (
+ TerminateWorkflowExecutionV1WorkflowsExecutionsExecutionIDTerminatePostRequest,
+ TerminateWorkflowExecutionV1WorkflowsExecutionsExecutionIDTerminatePostRequestTypedDict,
+ )
from .textchunk import TextChunk, TextChunkTypedDict
from .textcontent import TextContent, TextContentTypedDict
from .textresourcecontents import (
@@ -1335,6 +1743,14 @@
TextResourceContentsTypedDict,
)
from .thinkchunk import ThinkChunk, ThinkChunkTypedDict, Thinking, ThinkingTypedDict
+ from .timeseriesmetric import (
+ TimeSeriesMetric,
+ TimeSeriesMetricTypedDict,
+ TimeSeriesMetricValue1,
+ TimeSeriesMetricValue1TypedDict,
+ TimeSeriesMetricValue2,
+ TimeSeriesMetricValue2TypedDict,
+ )
from .timestampgranularity import TimestampGranularity
from .tool import Tool, ToolTypedDict
from .toolannotations import ToolAnnotations, ToolAnnotationsTypedDict
@@ -1427,10 +1843,18 @@
)
from .turbinetoollocale import TurbineToolLocale, TurbineToolLocaleTypedDict
from .turbinetoolmeta import TurbineToolMeta, TurbineToolMetaTypedDict
+ from .unarchive_workflow_v1_workflows_workflow_identifier_unarchive_putop import (
+ UnarchiveWorkflowV1WorkflowsWorkflowIdentifierUnarchivePutRequest,
+ UnarchiveWorkflowV1WorkflowsWorkflowIdentifierUnarchivePutRequestTypedDict,
+ )
from .unarchivemodelresponse import (
UnarchiveModelResponse,
UnarchiveModelResponseTypedDict,
)
+ from .unschedule_workflow_v1_workflows_schedules_schedule_id_deleteop import (
+ UnscheduleWorkflowV1WorkflowsSchedulesScheduleIDDeleteRequest,
+ UnscheduleWorkflowV1WorkflowsSchedulesScheduleIDDeleteRequestTypedDict,
+ )
from .update_dataset_record_payload_v1_observability_dataset_records_dataset_record_id_payload_putop import (
UpdateDatasetRecordPayloadV1ObservabilityDatasetRecordsDatasetRecordIDPayloadPutRequest,
UpdateDatasetRecordPayloadV1ObservabilityDatasetRecordsDatasetRecordIDPayloadPutRequestTypedDict,
@@ -1451,6 +1875,14 @@
UpdateVoiceV1AudioVoicesVoiceIDPatchRequest,
UpdateVoiceV1AudioVoicesVoiceIDPatchRequestTypedDict,
)
+ from .update_workflow_execution_v1_workflows_executions_execution_id_updates_postop import (
+ UpdateWorkflowExecutionV1WorkflowsExecutionsExecutionIDUpdatesPostRequest,
+ UpdateWorkflowExecutionV1WorkflowsExecutionsExecutionIDUpdatesPostRequestTypedDict,
+ )
+ from .update_workflow_v1_workflows_workflow_identifier_putop import (
+ UpdateWorkflowV1WorkflowsWorkflowIdentifierPutRequest,
+ UpdateWorkflowV1WorkflowsWorkflowIdentifierPutRequestTypedDict,
+ )
from .updateagentrequest import (
UpdateAgentRequest,
UpdateAgentRequestTool,
@@ -1473,12 +1905,19 @@
UpdateDatasetRequest,
UpdateDatasetRequestTypedDict,
)
+ from .updatedefinition import UpdateDefinition, UpdateDefinitionTypedDict
from .updatedocumentrequest import (
Attributes,
AttributesTypedDict,
UpdateDocumentRequest,
UpdateDocumentRequestTypedDict,
)
+ from .updateinvocationbody import (
+ UpdateInvocationBody,
+ UpdateInvocationBodyInput,
+ UpdateInvocationBodyInputTypedDict,
+ UpdateInvocationBodyTypedDict,
+ )
from .updatejudgerequest import (
UpdateJudgeRequest,
UpdateJudgeRequestOutput,
@@ -1490,6 +1929,10 @@
UpdateLibraryRequestTypedDict,
)
from .updatemodelrequest import UpdateModelRequest, UpdateModelRequestTypedDict
+ from .updateworkflowresponse import (
+ UpdateWorkflowResponse,
+ UpdateWorkflowResponseTypedDict,
+ )
from .usageinfo import UsageInfo, UsageInfoTypedDict
from .usageinfo_dollar_defs import UsageInfoDollarDefs, UsageInfoDollarDefsTypedDict
from .usermessage import (
@@ -1519,9 +1962,241 @@
WebSearchPremiumTool,
WebSearchPremiumToolTypedDict,
)
- from .websearchtool import (
- WebSearchTool,
- WebSearchToolTypedDict,
+ from .websearchtool import WebSearchTool, WebSearchToolTypedDict
+ from .workflow import Workflow, WorkflowTypedDict
+ from .workflowarchiveresponse import (
+ WorkflowArchiveResponse,
+ WorkflowArchiveResponseTypedDict,
+ )
+ from .workflowbasicdefinition import (
+ WorkflowBasicDefinition,
+ WorkflowBasicDefinitionTypedDict,
+ )
+ from .workflowcodedefinition import (
+ WorkflowCodeDefinition,
+ WorkflowCodeDefinitionTypedDict,
+ )
+ from .workfloweventbatchrequest import (
+ WorkflowEventBatchRequest,
+ WorkflowEventBatchRequestEvent,
+ WorkflowEventBatchRequestEventTypedDict,
+ WorkflowEventBatchRequestTypedDict,
+ )
+ from .workfloweventbatchresponse import (
+ WorkflowEventBatchResponse,
+ WorkflowEventBatchResponseStatus,
+ WorkflowEventBatchResponseTypedDict,
+ )
+ from .workfloweventrequest import (
+ WorkflowEventRequest,
+ WorkflowEventRequestEvent,
+ WorkflowEventRequestEventTypedDict,
+ WorkflowEventRequestTypedDict,
+ )
+ from .workfloweventresponse import (
+ WorkflowEventResponse,
+ WorkflowEventResponseStatus,
+ WorkflowEventResponseTypedDict,
+ )
+ from .workfloweventtype import WorkflowEventType
+ from .workflowexecutioncanceledattributes import (
+ WorkflowExecutionCanceledAttributes,
+ WorkflowExecutionCanceledAttributesTypedDict,
+ )
+ from .workflowexecutioncanceledrequest import (
+ WorkflowExecutionCanceledRequest,
+ WorkflowExecutionCanceledRequestTypedDict,
+ )
+ from .workflowexecutioncanceledresponse import (
+ WorkflowExecutionCanceledResponse,
+ WorkflowExecutionCanceledResponseTypedDict,
+ )
+ from .workflowexecutioncompletedattributesrequest import (
+ WorkflowExecutionCompletedAttributesRequest,
+ WorkflowExecutionCompletedAttributesRequestTypedDict,
+ )
+ from .workflowexecutioncompletedattributesresponse import (
+ WorkflowExecutionCompletedAttributesResponse,
+ WorkflowExecutionCompletedAttributesResponseTypedDict,
+ )
+ from .workflowexecutioncompletedrequest import (
+ WorkflowExecutionCompletedRequest,
+ WorkflowExecutionCompletedRequestTypedDict,
+ )
+ from .workflowexecutioncompletedresponse import (
+ WorkflowExecutionCompletedResponse,
+ WorkflowExecutionCompletedResponseTypedDict,
+ )
+ from .workflowexecutioncontinuedasnewattributesrequest import (
+ WorkflowExecutionContinuedAsNewAttributesRequest,
+ WorkflowExecutionContinuedAsNewAttributesRequestTypedDict,
+ )
+ from .workflowexecutioncontinuedasnewattributesresponse import (
+ WorkflowExecutionContinuedAsNewAttributesResponse,
+ WorkflowExecutionContinuedAsNewAttributesResponseTypedDict,
+ )
+ from .workflowexecutioncontinuedasnewrequest import (
+ WorkflowExecutionContinuedAsNewRequest,
+ WorkflowExecutionContinuedAsNewRequestTypedDict,
+ )
+ from .workflowexecutioncontinuedasnewresponse import (
+ WorkflowExecutionContinuedAsNewResponse,
+ WorkflowExecutionContinuedAsNewResponseTypedDict,
+ )
+ from .workflowexecutionfailedattributes import (
+ WorkflowExecutionFailedAttributes,
+ WorkflowExecutionFailedAttributesTypedDict,
+ )
+ from .workflowexecutionfailedrequest import (
+ WorkflowExecutionFailedRequest,
+ WorkflowExecutionFailedRequestTypedDict,
+ )
+ from .workflowexecutionfailedresponse import (
+ WorkflowExecutionFailedResponse,
+ WorkflowExecutionFailedResponseTypedDict,
+ )
+ from .workflowexecutionlistresponse import (
+ WorkflowExecutionListResponse,
+ WorkflowExecutionListResponseTypedDict,
+ )
+ from .workflowexecutionprogresstraceevent import (
+ WorkflowExecutionProgressTraceEvent,
+ WorkflowExecutionProgressTraceEventTypedDict,
+ )
+ from .workflowexecutionrequest import (
+ WorkflowExecutionRequest,
+ WorkflowExecutionRequestTypedDict,
+ )
+ from .workflowexecutionresponse import (
+ WorkflowExecutionResponse,
+ WorkflowExecutionResponseTypedDict,
+ )
+ from .workflowexecutionstartedattributesrequest import (
+ WorkflowExecutionStartedAttributesRequest,
+ WorkflowExecutionStartedAttributesRequestTypedDict,
+ )
+ from .workflowexecutionstartedattributesresponse import (
+ WorkflowExecutionStartedAttributesResponse,
+ WorkflowExecutionStartedAttributesResponseTypedDict,
+ )
+ from .workflowexecutionstartedrequest import (
+ WorkflowExecutionStartedRequest,
+ WorkflowExecutionStartedRequestTypedDict,
+ )
+ from .workflowexecutionstartedresponse import (
+ WorkflowExecutionStartedResponse,
+ WorkflowExecutionStartedResponseTypedDict,
+ )
+ from .workflowexecutionstatus import WorkflowExecutionStatus
+ from .workflowexecutionsyncresponse import (
+ WorkflowExecutionSyncResponse,
+ WorkflowExecutionSyncResponseTypedDict,
+ )
+ from .workflowexecutiontraceevent import (
+ WorkflowExecutionTraceEvent,
+ WorkflowExecutionTraceEventTypedDict,
+ )
+ from .workflowexecutiontraceeventsresponse import (
+ WorkflowExecutionTraceEventsResponse,
+ WorkflowExecutionTraceEventsResponseEvent,
+ WorkflowExecutionTraceEventsResponseEventTypedDict,
+ WorkflowExecutionTraceEventsResponseTypedDict,
+ )
+ from .workflowexecutiontraceotelresponse import (
+ WorkflowExecutionTraceOTelResponse,
+ WorkflowExecutionTraceOTelResponseTypedDict,
+ )
+ from .workflowexecutiontracesummaryattributesvalues import (
+ WorkflowExecutionTraceSummaryAttributesValues,
+ WorkflowExecutionTraceSummaryAttributesValuesTypedDict,
+ )
+ from .workflowexecutiontracesummaryresponse import (
+ WorkflowExecutionTraceSummaryResponse,
+ WorkflowExecutionTraceSummaryResponseTypedDict,
+ )
+ from .workflowexecutiontracesummaryspan import (
+ WorkflowExecutionTraceSummarySpan,
+ WorkflowExecutionTraceSummarySpanTypedDict,
+ )
+ from .workflowexecutionwithoutresultresponse import (
+ WorkflowExecutionWithoutResultResponse,
+ WorkflowExecutionWithoutResultResponseTypedDict,
+ )
+ from .workflowgetresponse import WorkflowGetResponse, WorkflowGetResponseTypedDict
+ from .workflowlistresponse import (
+ WorkflowListResponse,
+ WorkflowListResponseTypedDict,
+ )
+ from .workflowmetadata import WorkflowMetadata, WorkflowMetadataTypedDict
+ from .workflowmetrics import WorkflowMetrics, WorkflowMetricsTypedDict
+ from .workflowregistration import (
+ WorkflowRegistration,
+ WorkflowRegistrationTypedDict,
+ )
+ from .workflowregistrationgetresponse import (
+ WorkflowRegistrationGetResponse,
+ WorkflowRegistrationGetResponseTypedDict,
+ )
+ from .workflowregistrationlistresponse import (
+ WorkflowRegistrationListResponse,
+ WorkflowRegistrationListResponseTypedDict,
+ )
+ from .workflowregistrationwithworkerstatus import (
+ WorkflowRegistrationWithWorkerStatus,
+ WorkflowRegistrationWithWorkerStatusTypedDict,
+ )
+ from .workflowschedulelistresponse import (
+ WorkflowScheduleListResponse,
+ WorkflowScheduleListResponseTypedDict,
+ )
+ from .workflowschedulerequest import (
+ WorkflowScheduleRequest,
+ WorkflowScheduleRequestTypedDict,
+ )
+ from .workflowscheduleresponse import (
+ WorkflowScheduleResponse,
+ WorkflowScheduleResponseTypedDict,
+ )
+ from .workflowtaskfailedattributes import (
+ WorkflowTaskFailedAttributes,
+ WorkflowTaskFailedAttributesTypedDict,
+ )
+ from .workflowtaskfailedrequest import (
+ WorkflowTaskFailedRequest,
+ WorkflowTaskFailedRequestTypedDict,
+ )
+ from .workflowtaskfailedresponse import (
+ WorkflowTaskFailedResponse,
+ WorkflowTaskFailedResponseTypedDict,
+ )
+ from .workflowtasktimedoutattributes import (
+ WorkflowTaskTimedOutAttributes,
+ WorkflowTaskTimedOutAttributesTypedDict,
+ )
+ from .workflowtasktimedoutrequest import (
+ WorkflowTaskTimedOutRequest,
+ WorkflowTaskTimedOutRequestTypedDict,
+ )
+ from .workflowtasktimedoutresponse import (
+ WorkflowTaskTimedOutResponse,
+ WorkflowTaskTimedOutResponseTypedDict,
+ )
+ from .workflowtype import WorkflowType
+ from .workflowunarchiveresponse import (
+ WorkflowUnarchiveResponse,
+ WorkflowUnarchiveResponseTypedDict,
+ )
+ from .workflowupdaterequest import (
+ WorkflowUpdateRequest,
+ WorkflowUpdateRequestTypedDict,
+ )
+ from .workflowupdateresponse import (
+ WorkflowUpdateResponse,
+ WorkflowUpdateResponseTypedDict,
+ )
+ from .workflowwithworkerstatus import (
+ WorkflowWithWorkerStatus,
+ WorkflowWithWorkerStatusTypedDict,
) # Pydantic models with forward references
FilterGroup.model_rebuild()
@@ -1530,6 +2205,34 @@
"APIEndpoint",
"APIKeyAuth",
"APIKeyAuthTypedDict",
+ "ActivityTaskCompletedAttributesRequest",
+ "ActivityTaskCompletedAttributesRequestTypedDict",
+ "ActivityTaskCompletedAttributesResponse",
+ "ActivityTaskCompletedAttributesResponseTypedDict",
+ "ActivityTaskCompletedRequest",
+ "ActivityTaskCompletedRequestTypedDict",
+ "ActivityTaskCompletedResponse",
+ "ActivityTaskCompletedResponseTypedDict",
+ "ActivityTaskFailedAttributes",
+ "ActivityTaskFailedAttributesTypedDict",
+ "ActivityTaskFailedRequest",
+ "ActivityTaskFailedRequestTypedDict",
+ "ActivityTaskFailedResponse",
+ "ActivityTaskFailedResponseTypedDict",
+ "ActivityTaskRetryingAttributes",
+ "ActivityTaskRetryingAttributesTypedDict",
+ "ActivityTaskRetryingRequest",
+ "ActivityTaskRetryingRequestTypedDict",
+ "ActivityTaskRetryingResponse",
+ "ActivityTaskRetryingResponseTypedDict",
+ "ActivityTaskStartedAttributesRequest",
+ "ActivityTaskStartedAttributesRequestTypedDict",
+ "ActivityTaskStartedAttributesResponse",
+ "ActivityTaskStartedAttributesResponseTypedDict",
+ "ActivityTaskStartedRequest",
+ "ActivityTaskStartedRequestTypedDict",
+ "ActivityTaskStartedResponse",
+ "ActivityTaskStartedResponseTypedDict",
"Agent",
"AgentAliasResponse",
"AgentAliasResponseTypedDict",
@@ -1612,6 +2315,8 @@
"AnswerTypedDict",
"ArchiveModelResponse",
"ArchiveModelResponseTypedDict",
+ "ArchiveWorkflowV1WorkflowsWorkflowIdentifierArchivePutRequest",
+ "ArchiveWorkflowV1WorkflowsWorkflowIdentifierArchivePutRequestTypedDict",
"Arguments",
"ArgumentsTypedDict",
"AssistantMessage",
@@ -1639,12 +2344,19 @@
"Authorization",
"AuthorizationTypedDict",
"BaseFieldDefinition",
+ "BaseFieldDefinitionType",
"BaseFieldDefinitionTypedDict",
"BaseModelCard",
"BaseModelCardTypedDict",
"BaseTaskStatus",
"BatchError",
"BatchErrorTypedDict",
+ "BatchExecutionBody",
+ "BatchExecutionBodyTypedDict",
+ "BatchExecutionResponse",
+ "BatchExecutionResponseTypedDict",
+ "BatchExecutionResult",
+ "BatchExecutionResultTypedDict",
"BatchJob",
"BatchJobStatus",
"BatchJobTypedDict",
@@ -1655,6 +2367,8 @@
"BuiltInConnectors",
"Campaign",
"CampaignTypedDict",
+ "CancelWorkflowExecutionV1WorkflowsExecutionsExecutionIDCancelPostRequest",
+ "CancelWorkflowExecutionV1WorkflowsExecutionsExecutionIDCancelPostRequestTypedDict",
"ChatClassificationRequest",
"ChatClassificationRequestTypedDict",
"ChatCompletionChoice",
@@ -1875,6 +2589,52 @@
"CreateLibraryRequestTypedDict",
"CustomConnector",
"CustomConnectorTypedDict",
+ "CustomTaskCanceledAttributes",
+ "CustomTaskCanceledAttributesTypedDict",
+ "CustomTaskCanceledRequest",
+ "CustomTaskCanceledRequestTypedDict",
+ "CustomTaskCanceledResponse",
+ "CustomTaskCanceledResponseTypedDict",
+ "CustomTaskCompletedAttributesRequest",
+ "CustomTaskCompletedAttributesRequestTypedDict",
+ "CustomTaskCompletedAttributesResponse",
+ "CustomTaskCompletedAttributesResponseTypedDict",
+ "CustomTaskCompletedRequest",
+ "CustomTaskCompletedRequestTypedDict",
+ "CustomTaskCompletedResponse",
+ "CustomTaskCompletedResponseTypedDict",
+ "CustomTaskFailedAttributes",
+ "CustomTaskFailedAttributesTypedDict",
+ "CustomTaskFailedRequest",
+ "CustomTaskFailedRequestTypedDict",
+ "CustomTaskFailedResponse",
+ "CustomTaskFailedResponseTypedDict",
+ "CustomTaskInProgressAttributesRequest",
+ "CustomTaskInProgressAttributesRequestPayload",
+ "CustomTaskInProgressAttributesRequestPayloadTypedDict",
+ "CustomTaskInProgressAttributesRequestTypedDict",
+ "CustomTaskInProgressAttributesResponse",
+ "CustomTaskInProgressAttributesResponsePayload",
+ "CustomTaskInProgressAttributesResponsePayloadTypedDict",
+ "CustomTaskInProgressAttributesResponseTypedDict",
+ "CustomTaskInProgressRequest",
+ "CustomTaskInProgressRequestTypedDict",
+ "CustomTaskInProgressResponse",
+ "CustomTaskInProgressResponseTypedDict",
+ "CustomTaskStartedAttributesRequest",
+ "CustomTaskStartedAttributesRequestTypedDict",
+ "CustomTaskStartedAttributesResponse",
+ "CustomTaskStartedAttributesResponseTypedDict",
+ "CustomTaskStartedRequest",
+ "CustomTaskStartedRequestTypedDict",
+ "CustomTaskStartedResponse",
+ "CustomTaskStartedResponseTypedDict",
+ "CustomTaskTimedOutAttributes",
+ "CustomTaskTimedOutAttributesTypedDict",
+ "CustomTaskTimedOutRequest",
+ "CustomTaskTimedOutRequestTypedDict",
+ "CustomTaskTimedOutResponse",
+ "CustomTaskTimedOutResponseTypedDict",
"Dataset",
"DatasetImportTask",
"DatasetImportTaskTypedDict",
@@ -1883,6 +2643,8 @@
"DatasetRecord",
"DatasetRecordTypedDict",
"DatasetTypedDict",
+ "DeleteBatchJobResponse",
+ "DeleteBatchJobResponseTypedDict",
"DeleteCampaignV1ObservabilityCampaignsCampaignIDDeleteRequest",
"DeleteCampaignV1ObservabilityCampaignsCampaignIDDeleteRequestTypedDict",
"DeleteDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDDeleteRequest",
@@ -1905,6 +2667,14 @@
"DeltaMessageContent",
"DeltaMessageContentTypedDict",
"DeltaMessageTypedDict",
+ "DeploymentDetailResponse",
+ "DeploymentDetailResponseTypedDict",
+ "DeploymentListResponse",
+ "DeploymentListResponseTypedDict",
+ "DeploymentResponse",
+ "DeploymentResponseTypedDict",
+ "DeploymentWorkerResponse",
+ "DeploymentWorkerResponseTypedDict",
"Document",
"DocumentLibraryTool",
"DocumentLibraryToolTypedDict",
@@ -1928,12 +2698,20 @@
"EmbeddingResponseData",
"EmbeddingResponseDataTypedDict",
"EmbeddingResponseTypedDict",
+ "EncodedPayloadOptions",
"EncodingFormat",
"EntityType",
"Entry",
"EntryTypedDict",
"Event",
+ "EventProgressStatus",
+ "EventSource",
+ "EventType",
"EventTypedDict",
+ "ExecuteWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDExecutePostRequest",
+ "ExecuteWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDExecutePostRequestTypedDict",
+ "ExecuteWorkflowV1WorkflowsWorkflowIdentifierExecutePostRequest",
+ "ExecuteWorkflowV1WorkflowsWorkflowIdentifierExecutePostRequestTypedDict",
"ExecutionConfig",
"ExecutionConfigTypedDict",
"ExportDatasetResponse",
@@ -1953,6 +2731,8 @@
"FTClassifierLossFunction",
"FTModelCard",
"FTModelCardTypedDict",
+ "Failure",
+ "FailureTypedDict",
"FeedResultChatCompletionEventPreview",
"FeedResultChatCompletionEventPreviewTypedDict",
"FetchCampaignStatusResponse",
@@ -2044,20 +2824,54 @@
"GetDatasetRecordsV1ObservabilityDatasetsDatasetIDRecordsGetRequestTypedDict",
"GetDatasetsV1ObservabilityDatasetsGetRequest",
"GetDatasetsV1ObservabilityDatasetsGetRequestTypedDict",
+ "GetDeploymentV1WorkflowsDeploymentsNameGetRequest",
+ "GetDeploymentV1WorkflowsDeploymentsNameGetRequestTypedDict",
"GetFileResponse",
"GetFileResponseTypedDict",
"GetJudgeByIDV1ObservabilityJudgesJudgeIDGetRequest",
"GetJudgeByIDV1ObservabilityJudgesJudgeIDGetRequestTypedDict",
"GetJudgesV1ObservabilityJudgesGetRequest",
"GetJudgesV1ObservabilityJudgesGetRequestTypedDict",
+ "GetRunHistoryV1WorkflowsRunsRunIDHistoryGetRequest",
+ "GetRunHistoryV1WorkflowsRunsRunIDHistoryGetRequestTypedDict",
+ "GetRunV1WorkflowsRunsRunIDGetRequest",
+ "GetRunV1WorkflowsRunsRunIDGetRequestTypedDict",
"GetSignedURLResponse",
"GetSignedURLResponseTypedDict",
"GetSimilarChatCompletionEventsV1ObservabilityChatCompletionEventsEventIDSimilarEventsGetRequest",
"GetSimilarChatCompletionEventsV1ObservabilityChatCompletionEventsEventIDSimilarEventsGetRequestTypedDict",
+ "GetStreamEventsV1WorkflowsEventsStreamGetRequest",
+ "GetStreamEventsV1WorkflowsEventsStreamGetRequestTypedDict",
+ "GetStreamEventsV1WorkflowsEventsStreamGetResponseBody",
+ "GetStreamEventsV1WorkflowsEventsStreamGetResponseBodyTypedDict",
"GetVoiceSampleAudioV1AudioVoicesVoiceIDSampleGetRequest",
"GetVoiceSampleAudioV1AudioVoicesVoiceIDSampleGetRequestTypedDict",
"GetVoiceV1AudioVoicesVoiceIDGetRequest",
"GetVoiceV1AudioVoicesVoiceIDGetRequestTypedDict",
+ "GetWorkflowEventsV1WorkflowsEventsListGetRequest",
+ "GetWorkflowEventsV1WorkflowsEventsListGetRequestTypedDict",
+ "GetWorkflowExecutionHistoryV1WorkflowsExecutionsExecutionIDHistoryGetRequest",
+ "GetWorkflowExecutionHistoryV1WorkflowsExecutionsExecutionIDHistoryGetRequestTypedDict",
+ "GetWorkflowExecutionTraceEventsRequest",
+ "GetWorkflowExecutionTraceEventsRequestTypedDict",
+ "GetWorkflowExecutionTraceOtelRequest",
+ "GetWorkflowExecutionTraceOtelRequestTypedDict",
+ "GetWorkflowExecutionTraceSummaryRequest",
+ "GetWorkflowExecutionTraceSummaryRequestTypedDict",
+ "GetWorkflowExecutionV1WorkflowsExecutionsExecutionIDGetRequest",
+ "GetWorkflowExecutionV1WorkflowsExecutionsExecutionIDGetRequestTypedDict",
+ "GetWorkflowMetricsV1WorkflowsWorkflowNameMetricsGetRequest",
+ "GetWorkflowMetricsV1WorkflowsWorkflowNameMetricsGetRequestTypedDict",
+ "GetWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDGetRequest",
+ "GetWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDGetRequestTypedDict",
+ "GetWorkflowRegistrationsV1WorkflowsRegistrationsGetRequest",
+ "GetWorkflowRegistrationsV1WorkflowsRegistrationsGetRequestTypedDict",
+ "GetWorkflowV1WorkflowsWorkflowIdentifierGetRequest",
+ "GetWorkflowV1WorkflowsWorkflowIdentifierGetRequestTypedDict",
+ "GetWorkflowsV1WorkflowsGetRequest",
+ "GetWorkflowsV1WorkflowsGetRequestTypedDict",
+ "GetWorkflowsV1WorkflowsGetResponse",
+ "GetWorkflowsV1WorkflowsGetResponseTypedDict",
"GithubRepository",
"GithubRepositoryTypedDict",
"GuardrailConfig",
@@ -2097,12 +2911,34 @@
"InternalMetadataAgentVersion",
"InternalMetadataAgentVersionTypedDict",
"InternalMetadataTypedDict",
+ "JSONPatchAdd",
+ "JSONPatchAddTypedDict",
+ "JSONPatchAppend",
+ "JSONPatchAppendTypedDict",
+ "JSONPatchPayloadRequest",
+ "JSONPatchPayloadRequestTypedDict",
+ "JSONPatchPayloadRequestValue",
+ "JSONPatchPayloadRequestValueTypedDict",
+ "JSONPatchPayloadResponse",
+ "JSONPatchPayloadResponseTypedDict",
+ "JSONPatchPayloadResponseValue",
+ "JSONPatchPayloadResponseValueTypedDict",
+ "JSONPatchRemove",
+ "JSONPatchRemoveTypedDict",
+ "JSONPatchReplace",
+ "JSONPatchReplaceTypedDict",
+ "JSONPayloadRequest",
+ "JSONPayloadRequestTypedDict",
+ "JSONPayloadResponse",
+ "JSONPayloadResponseTypedDict",
"JSONSchema",
"JSONSchemaTypedDict",
"JobMetadata",
"JobMetadataTypedDict",
"JobsAPIRoutesBatchCancelBatchJobRequest",
"JobsAPIRoutesBatchCancelBatchJobRequestTypedDict",
+ "JobsAPIRoutesBatchDeleteBatchJobRequest",
+ "JobsAPIRoutesBatchDeleteBatchJobRequestTypedDict",
"JobsAPIRoutesBatchGetBatchJobRequest",
"JobsAPIRoutesBatchGetBatchJobRequestTypedDict",
"JobsAPIRoutesBatchGetBatchJobsRequest",
@@ -2183,6 +3019,8 @@
"LibrariesDocumentsUploadV1RequestTypedDict",
"LibrariesGetV1Request",
"LibrariesGetV1RequestTypedDict",
+ "LibrariesListV1Request",
+ "LibrariesListV1RequestTypedDict",
"LibrariesShareCreateV1Request",
"LibrariesShareCreateV1RequestTypedDict",
"LibrariesShareDeleteV1Request",
@@ -2207,6 +3045,8 @@
"ListDatasetRecordsResponseTypedDict",
"ListDatasetsResponse",
"ListDatasetsResponseTypedDict",
+ "ListDeploymentsV1WorkflowsDeploymentsGetRequest",
+ "ListDeploymentsV1WorkflowsDeploymentsGetRequestTypedDict",
"ListDocumentsResponse",
"ListDocumentsResponseTypedDict",
"ListFilesResponse",
@@ -2221,10 +3061,21 @@
"ListLibrariesResponseTypedDict",
"ListModelsV1ModelsGetRequest",
"ListModelsV1ModelsGetRequestTypedDict",
+ "ListRunsV1WorkflowsRunsGetRequest",
+ "ListRunsV1WorkflowsRunsGetRequestTypedDict",
+ "ListRunsV1WorkflowsRunsGetResponse",
+ "ListRunsV1WorkflowsRunsGetResponseTypedDict",
+ "ListRunsV1WorkflowsRunsGetStatus",
+ "ListRunsV1WorkflowsRunsGetStatusTypedDict",
"ListSharingResponse",
"ListSharingResponseTypedDict",
"ListVoicesV1AudioVoicesGetRequest",
"ListVoicesV1AudioVoicesGetRequestTypedDict",
+ "ListVoicesV1AudioVoicesGetType",
+ "ListWorkflowEventResponse",
+ "ListWorkflowEventResponseEvent",
+ "ListWorkflowEventResponseEventTypedDict",
+ "ListWorkflowEventResponseTypedDict",
"Loc",
"LocTypedDict",
"MCPServerIcon",
@@ -2291,6 +3142,8 @@
"ModerationResponseTypedDict",
"MultiPartBodyParams",
"MultiPartBodyParamsTypedDict",
+ "NetworkEncodedInput",
+ "NetworkEncodedInputTypedDict",
"OAuth2TokenAuth",
"OAuth2TokenAuthTypedDict",
"OCRImageObject",
@@ -2354,6 +3207,16 @@
"ProcessingStatusTypedDict",
"PromptTokensDetails",
"PromptTokensDetailsTypedDict",
+ "QueryDefinition",
+ "QueryDefinitionTypedDict",
+ "QueryInvocationBody",
+ "QueryInvocationBodyInput",
+ "QueryInvocationBodyInputTypedDict",
+ "QueryInvocationBodyTypedDict",
+ "QueryWorkflowExecutionV1WorkflowsExecutionsExecutionIDQueriesPostRequest",
+ "QueryWorkflowExecutionV1WorkflowsExecutionsExecutionIDQueriesPostRequestTypedDict",
+ "QueryWorkflowResponse",
+ "QueryWorkflowResponseTypedDict",
"RealtimeTranscriptionError",
"RealtimeTranscriptionErrorDetail",
"RealtimeTranscriptionErrorDetailMessage",
@@ -2382,20 +3245,26 @@
"ReferenceID",
"ReferenceIDTypedDict",
"RequestSource",
+ "ResetInvocationBody",
+ "ResetInvocationBodyTypedDict",
+ "ResetWorkflowV1WorkflowsExecutionsExecutionIDResetPostRequest",
+ "ResetWorkflowV1WorkflowsExecutionsExecutionIDResetPostRequestTypedDict",
"Resource",
"ResourceLink",
"ResourceLinkTypedDict",
"ResourceTypedDict",
"ResourceVisibility",
"Response",
- "ResponseConnectorListToolsV11",
- "ResponseConnectorListToolsV11TypedDict",
- "ResponseConnectorListToolsV12",
- "ResponseConnectorListToolsV12TypedDict",
+ "ResponseConnectorListToolsV1",
+ "ResponseConnectorListToolsV1TypedDict",
"ResponseDoneEvent",
"ResponseDoneEventTypedDict",
"ResponseErrorEvent",
"ResponseErrorEventTypedDict",
+ "ResponseExecuteWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDExecutePost",
+ "ResponseExecuteWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDExecutePostTypedDict",
+ "ResponseExecuteWorkflowV1WorkflowsWorkflowIdentifierExecutePost",
+ "ResponseExecuteWorkflowV1WorkflowsWorkflowIdentifierExecutePostTypedDict",
"ResponseFormat",
"ResponseFormatTypedDict",
"ResponseFormats",
@@ -2412,6 +3281,24 @@
"Roles",
"SSETypes",
"SampleType",
+ "ScalarMetric",
+ "ScalarMetricTypedDict",
+ "ScalarMetricValue",
+ "ScalarMetricValueTypedDict",
+ "ScheduleCalendar",
+ "ScheduleCalendarTypedDict",
+ "ScheduleDefinition",
+ "ScheduleDefinitionOutput",
+ "ScheduleDefinitionOutputTypedDict",
+ "ScheduleDefinitionTypedDict",
+ "ScheduleInterval",
+ "ScheduleIntervalTypedDict",
+ "ScheduleOverlapPolicy",
+ "SchedulePolicy",
+ "SchedulePolicyTypedDict",
+ "ScheduleRange",
+ "ScheduleRangeTypedDict",
+ "Scope",
"SearchChatCompletionEventIdsRequest",
"SearchChatCompletionEventIdsRequestTypedDict",
"SearchChatCompletionEventIdsResponse",
@@ -2429,6 +3316,18 @@
"SharingRequest",
"SharingRequestTypedDict",
"SharingTypedDict",
+ "SignalDefinition",
+ "SignalDefinitionTypedDict",
+ "SignalInvocationBody",
+ "SignalInvocationBodyInput",
+ "SignalInvocationBodyInputTypedDict",
+ "SignalInvocationBodyNetworkEncodedInput",
+ "SignalInvocationBodyNetworkEncodedInputTypedDict",
+ "SignalInvocationBodyTypedDict",
+ "SignalWorkflowExecutionV1WorkflowsExecutionsExecutionIDSignalsPostRequest",
+ "SignalWorkflowExecutionV1WorkflowsExecutionsExecutionIDSignalsPostRequestTypedDict",
+ "SignalWorkflowResponse",
+ "SignalWorkflowResponseTypedDict",
"Source",
"SpeechOutputFormat",
"SpeechRequest",
@@ -2446,6 +3345,16 @@
"SpeechV1AudioSpeechPostDataTypedDict",
"SpeechV1AudioSpeechPostResponse",
"SpeechV1AudioSpeechPostResponseTypedDict",
+ "StreamEventSsePayload",
+ "StreamEventSsePayloadData",
+ "StreamEventSsePayloadDataTypedDict",
+ "StreamEventSsePayloadTypedDict",
+ "StreamEventWorkflowContext",
+ "StreamEventWorkflowContextTypedDict",
+ "StreamV1WorkflowsExecutionsExecutionIDStreamGetRequest",
+ "StreamV1WorkflowsExecutionsExecutionIDStreamGetRequestTypedDict",
+ "StreamV1WorkflowsExecutionsExecutionIDStreamGetResponseBody",
+ "StreamV1WorkflowsExecutionsExecutionIDStreamGetResponseBodyTypedDict",
"SupportedOperator",
"SystemMessage",
"SystemMessageContent",
@@ -2455,6 +3364,33 @@
"SystemMessageTypedDict",
"TableFormat",
"TaskSupport",
+ "TempoGetTraceResponse",
+ "TempoGetTraceResponseTypedDict",
+ "TempoTraceAttribute",
+ "TempoTraceAttributeBoolValue",
+ "TempoTraceAttributeBoolValueTypedDict",
+ "TempoTraceAttributeIntValue",
+ "TempoTraceAttributeIntValueTypedDict",
+ "TempoTraceAttributeStringValue",
+ "TempoTraceAttributeStringValueTypedDict",
+ "TempoTraceAttributeTypedDict",
+ "TempoTraceAttributeValue",
+ "TempoTraceAttributeValueTypedDict",
+ "TempoTraceBatch",
+ "TempoTraceBatchTypedDict",
+ "TempoTraceEvent",
+ "TempoTraceEventTypedDict",
+ "TempoTraceResource",
+ "TempoTraceResourceTypedDict",
+ "TempoTraceScope",
+ "TempoTraceScopeKind",
+ "TempoTraceScopeSpan",
+ "TempoTraceScopeSpanTypedDict",
+ "TempoTraceScopeTypedDict",
+ "TempoTraceSpan",
+ "TempoTraceSpanTypedDict",
+ "TerminateWorkflowExecutionV1WorkflowsExecutionsExecutionIDTerminatePostRequest",
+ "TerminateWorkflowExecutionV1WorkflowsExecutionsExecutionIDTerminatePostRequestTypedDict",
"TextChunk",
"TextChunkTypedDict",
"TextContent",
@@ -2465,6 +3401,12 @@
"ThinkChunkTypedDict",
"Thinking",
"ThinkingTypedDict",
+ "TimeSeriesMetric",
+ "TimeSeriesMetricTypedDict",
+ "TimeSeriesMetricValue1",
+ "TimeSeriesMetricValue1TypedDict",
+ "TimeSeriesMetricValue2",
+ "TimeSeriesMetricValue2TypedDict",
"TimestampGranularity",
"Tool",
"ToolAnnotations",
@@ -2534,9 +3476,10 @@
"TurbineToolLocaleTypedDict",
"TurbineToolMeta",
"TurbineToolMetaTypedDict",
- "TypeEnum",
"UnarchiveModelResponse",
"UnarchiveModelResponseTypedDict",
+ "UnarchiveWorkflowV1WorkflowsWorkflowIdentifierUnarchivePutRequest",
+ "UnarchiveWorkflowV1WorkflowsWorkflowIdentifierUnarchivePutRequestTypedDict",
"UnknownAgentTool",
"UnknownAuthorization",
"UnknownClassifierFineTuningJobDetailsIntegration",
@@ -2548,6 +3491,8 @@
"UnknownConnectorToolCallResponseContent",
"UnknownContentChunk",
"UnknownConversationEventsData",
+ "UnknownCustomTaskInProgressAttributesResponsePayload",
+ "UnknownJSONPatchPayloadResponseValue",
"UnknownJobsAPIRoutesFineTuningCancelFineTuningJobResponse",
"UnknownJobsAPIRoutesFineTuningGetFineTuningJobResponse",
"UnknownJobsAPIRoutesFineTuningStartFineTuningJobResponse",
@@ -2560,6 +3505,8 @@
"UnknownResponseRetrieveModelV1ModelsModelIDGet",
"UnknownSpeechV1AudioSpeechPostData",
"UnknownTranscriptionStreamEventsData",
+ "UnscheduleWorkflowV1WorkflowsSchedulesScheduleIDDeleteRequest",
+ "UnscheduleWorkflowV1WorkflowsSchedulesScheduleIDDeleteRequestTypedDict",
"UpdateAgentRequest",
"UpdateAgentRequestTool",
"UpdateAgentRequestToolTypedDict",
@@ -2578,8 +3525,14 @@
"UpdateDatasetRequestTypedDict",
"UpdateDatasetV1ObservabilityDatasetsDatasetIDPatchRequest",
"UpdateDatasetV1ObservabilityDatasetsDatasetIDPatchRequestTypedDict",
+ "UpdateDefinition",
+ "UpdateDefinitionTypedDict",
"UpdateDocumentRequest",
"UpdateDocumentRequestTypedDict",
+ "UpdateInvocationBody",
+ "UpdateInvocationBodyInput",
+ "UpdateInvocationBodyInputTypedDict",
+ "UpdateInvocationBodyTypedDict",
"UpdateJudgeRequest",
"UpdateJudgeRequestOutput",
"UpdateJudgeRequestOutputTypedDict",
@@ -2592,6 +3545,12 @@
"UpdateModelRequestTypedDict",
"UpdateVoiceV1AudioVoicesVoiceIDPatchRequest",
"UpdateVoiceV1AudioVoicesVoiceIDPatchRequestTypedDict",
+ "UpdateWorkflowExecutionV1WorkflowsExecutionsExecutionIDUpdatesPostRequest",
+ "UpdateWorkflowExecutionV1WorkflowsExecutionsExecutionIDUpdatesPostRequestTypedDict",
+ "UpdateWorkflowResponse",
+ "UpdateWorkflowResponseTypedDict",
+ "UpdateWorkflowV1WorkflowsWorkflowIdentifierPutRequest",
+ "UpdateWorkflowV1WorkflowsWorkflowIdentifierPutRequestTypedDict",
"UsageInfo",
"UsageInfoDollarDefs",
"UsageInfoDollarDefsTypedDict",
@@ -2619,9 +3578,166 @@
"WebSearchPremiumToolTypedDict",
"WebSearchTool",
"WebSearchToolTypedDict",
+ "Workflow",
+ "WorkflowArchiveResponse",
+ "WorkflowArchiveResponseTypedDict",
+ "WorkflowBasicDefinition",
+ "WorkflowBasicDefinitionTypedDict",
+ "WorkflowCodeDefinition",
+ "WorkflowCodeDefinitionTypedDict",
+ "WorkflowEventBatchRequest",
+ "WorkflowEventBatchRequestEvent",
+ "WorkflowEventBatchRequestEventTypedDict",
+ "WorkflowEventBatchRequestTypedDict",
+ "WorkflowEventBatchResponse",
+ "WorkflowEventBatchResponseStatus",
+ "WorkflowEventBatchResponseTypedDict",
+ "WorkflowEventRequest",
+ "WorkflowEventRequestEvent",
+ "WorkflowEventRequestEventTypedDict",
+ "WorkflowEventRequestTypedDict",
+ "WorkflowEventResponse",
+ "WorkflowEventResponseStatus",
+ "WorkflowEventResponseTypedDict",
+ "WorkflowEventType",
+ "WorkflowExecutionCanceledAttributes",
+ "WorkflowExecutionCanceledAttributesTypedDict",
+ "WorkflowExecutionCanceledRequest",
+ "WorkflowExecutionCanceledRequestTypedDict",
+ "WorkflowExecutionCanceledResponse",
+ "WorkflowExecutionCanceledResponseTypedDict",
+ "WorkflowExecutionCompletedAttributesRequest",
+ "WorkflowExecutionCompletedAttributesRequestTypedDict",
+ "WorkflowExecutionCompletedAttributesResponse",
+ "WorkflowExecutionCompletedAttributesResponseTypedDict",
+ "WorkflowExecutionCompletedRequest",
+ "WorkflowExecutionCompletedRequestTypedDict",
+ "WorkflowExecutionCompletedResponse",
+ "WorkflowExecutionCompletedResponseTypedDict",
+ "WorkflowExecutionContinuedAsNewAttributesRequest",
+ "WorkflowExecutionContinuedAsNewAttributesRequestTypedDict",
+ "WorkflowExecutionContinuedAsNewAttributesResponse",
+ "WorkflowExecutionContinuedAsNewAttributesResponseTypedDict",
+ "WorkflowExecutionContinuedAsNewRequest",
+ "WorkflowExecutionContinuedAsNewRequestTypedDict",
+ "WorkflowExecutionContinuedAsNewResponse",
+ "WorkflowExecutionContinuedAsNewResponseTypedDict",
+ "WorkflowExecutionFailedAttributes",
+ "WorkflowExecutionFailedAttributesTypedDict",
+ "WorkflowExecutionFailedRequest",
+ "WorkflowExecutionFailedRequestTypedDict",
+ "WorkflowExecutionFailedResponse",
+ "WorkflowExecutionFailedResponseTypedDict",
+ "WorkflowExecutionListResponse",
+ "WorkflowExecutionListResponseTypedDict",
+ "WorkflowExecutionProgressTraceEvent",
+ "WorkflowExecutionProgressTraceEventTypedDict",
+ "WorkflowExecutionRequest",
+ "WorkflowExecutionRequestTypedDict",
+ "WorkflowExecutionResponse",
+ "WorkflowExecutionResponseTypedDict",
+ "WorkflowExecutionStartedAttributesRequest",
+ "WorkflowExecutionStartedAttributesRequestTypedDict",
+ "WorkflowExecutionStartedAttributesResponse",
+ "WorkflowExecutionStartedAttributesResponseTypedDict",
+ "WorkflowExecutionStartedRequest",
+ "WorkflowExecutionStartedRequestTypedDict",
+ "WorkflowExecutionStartedResponse",
+ "WorkflowExecutionStartedResponseTypedDict",
+ "WorkflowExecutionStatus",
+ "WorkflowExecutionSyncResponse",
+ "WorkflowExecutionSyncResponseTypedDict",
+ "WorkflowExecutionTraceEvent",
+ "WorkflowExecutionTraceEventTypedDict",
+ "WorkflowExecutionTraceEventsResponse",
+ "WorkflowExecutionTraceEventsResponseEvent",
+ "WorkflowExecutionTraceEventsResponseEventTypedDict",
+ "WorkflowExecutionTraceEventsResponseTypedDict",
+ "WorkflowExecutionTraceOTelResponse",
+ "WorkflowExecutionTraceOTelResponseTypedDict",
+ "WorkflowExecutionTraceSummaryAttributesValues",
+ "WorkflowExecutionTraceSummaryAttributesValuesTypedDict",
+ "WorkflowExecutionTraceSummaryResponse",
+ "WorkflowExecutionTraceSummaryResponseTypedDict",
+ "WorkflowExecutionTraceSummarySpan",
+ "WorkflowExecutionTraceSummarySpanTypedDict",
+ "WorkflowExecutionWithoutResultResponse",
+ "WorkflowExecutionWithoutResultResponseTypedDict",
+ "WorkflowGetResponse",
+ "WorkflowGetResponseTypedDict",
+ "WorkflowListResponse",
+ "WorkflowListResponseTypedDict",
+ "WorkflowMetadata",
+ "WorkflowMetadataTypedDict",
+ "WorkflowMetrics",
+ "WorkflowMetricsTypedDict",
+ "WorkflowRegistration",
+ "WorkflowRegistrationGetResponse",
+ "WorkflowRegistrationGetResponseTypedDict",
+ "WorkflowRegistrationListResponse",
+ "WorkflowRegistrationListResponseTypedDict",
+ "WorkflowRegistrationTypedDict",
+ "WorkflowRegistrationWithWorkerStatus",
+ "WorkflowRegistrationWithWorkerStatusTypedDict",
+ "WorkflowScheduleListResponse",
+ "WorkflowScheduleListResponseTypedDict",
+ "WorkflowScheduleRequest",
+ "WorkflowScheduleRequestTypedDict",
+ "WorkflowScheduleResponse",
+ "WorkflowScheduleResponseTypedDict",
+ "WorkflowTaskFailedAttributes",
+ "WorkflowTaskFailedAttributesTypedDict",
+ "WorkflowTaskFailedRequest",
+ "WorkflowTaskFailedRequestTypedDict",
+ "WorkflowTaskFailedResponse",
+ "WorkflowTaskFailedResponseTypedDict",
+ "WorkflowTaskTimedOutAttributes",
+ "WorkflowTaskTimedOutAttributesTypedDict",
+ "WorkflowTaskTimedOutRequest",
+ "WorkflowTaskTimedOutRequestTypedDict",
+ "WorkflowTaskTimedOutResponse",
+ "WorkflowTaskTimedOutResponseTypedDict",
+ "WorkflowType",
+ "WorkflowTypedDict",
+ "WorkflowUnarchiveResponse",
+ "WorkflowUnarchiveResponseTypedDict",
+ "WorkflowUpdateRequest",
+ "WorkflowUpdateRequestTypedDict",
+ "WorkflowUpdateResponse",
+ "WorkflowUpdateResponseTypedDict",
+ "WorkflowWithWorkerStatus",
+ "WorkflowWithWorkerStatusTypedDict",
]
_dynamic_imports: dict[str, str] = {
+ "ActivityTaskCompletedAttributesRequest": ".activitytaskcompletedattributesrequest",
+ "ActivityTaskCompletedAttributesRequestTypedDict": ".activitytaskcompletedattributesrequest",
+ "ActivityTaskCompletedAttributesResponse": ".activitytaskcompletedattributesresponse",
+ "ActivityTaskCompletedAttributesResponseTypedDict": ".activitytaskcompletedattributesresponse",
+ "ActivityTaskCompletedRequest": ".activitytaskcompletedrequest",
+ "ActivityTaskCompletedRequestTypedDict": ".activitytaskcompletedrequest",
+ "ActivityTaskCompletedResponse": ".activitytaskcompletedresponse",
+ "ActivityTaskCompletedResponseTypedDict": ".activitytaskcompletedresponse",
+ "ActivityTaskFailedAttributes": ".activitytaskfailedattributes",
+ "ActivityTaskFailedAttributesTypedDict": ".activitytaskfailedattributes",
+ "ActivityTaskFailedRequest": ".activitytaskfailedrequest",
+ "ActivityTaskFailedRequestTypedDict": ".activitytaskfailedrequest",
+ "ActivityTaskFailedResponse": ".activitytaskfailedresponse",
+ "ActivityTaskFailedResponseTypedDict": ".activitytaskfailedresponse",
+ "ActivityTaskRetryingAttributes": ".activitytaskretryingattributes",
+ "ActivityTaskRetryingAttributesTypedDict": ".activitytaskretryingattributes",
+ "ActivityTaskRetryingRequest": ".activitytaskretryingrequest",
+ "ActivityTaskRetryingRequestTypedDict": ".activitytaskretryingrequest",
+ "ActivityTaskRetryingResponse": ".activitytaskretryingresponse",
+ "ActivityTaskRetryingResponseTypedDict": ".activitytaskretryingresponse",
+ "ActivityTaskStartedAttributesRequest": ".activitytaskstartedattributesrequest",
+ "ActivityTaskStartedAttributesRequestTypedDict": ".activitytaskstartedattributesrequest",
+ "ActivityTaskStartedAttributesResponse": ".activitytaskstartedattributesresponse",
+ "ActivityTaskStartedAttributesResponseTypedDict": ".activitytaskstartedattributesresponse",
+ "ActivityTaskStartedRequest": ".activitytaskstartedrequest",
+ "ActivityTaskStartedRequestTypedDict": ".activitytaskstartedrequest",
+ "ActivityTaskStartedResponse": ".activitytaskstartedresponse",
+ "ActivityTaskStartedResponseTypedDict": ".activitytaskstartedresponse",
"Agent": ".agent",
"AgentTool": ".agent",
"AgentToolTypedDict": ".agent",
@@ -2705,6 +3821,8 @@
"APIEndpoint": ".apiendpoint",
"APIKeyAuth": ".apikeyauth",
"APIKeyAuthTypedDict": ".apikeyauth",
+ "ArchiveWorkflowV1WorkflowsWorkflowIdentifierArchivePutRequest": ".archive_workflow_v1_workflows_workflow_identifier_archive_putop",
+ "ArchiveWorkflowV1WorkflowsWorkflowIdentifierArchivePutRequestTypedDict": ".archive_workflow_v1_workflows_workflow_identifier_archive_putop",
"ArchiveModelResponse": ".archivemodelresponse",
"ArchiveModelResponseTypedDict": ".archivemodelresponse",
"AssistantMessage": ".assistantmessage",
@@ -2727,14 +3845,20 @@
"AuthURLResponse": ".authurlresponse",
"AuthURLResponseTypedDict": ".authurlresponse",
"BaseFieldDefinition": ".basefielddefinition",
+ "BaseFieldDefinitionType": ".basefielddefinition",
"BaseFieldDefinitionTypedDict": ".basefielddefinition",
"SupportedOperator": ".basefielddefinition",
- "TypeEnum": ".basefielddefinition",
"BaseModelCard": ".basemodelcard",
"BaseModelCardTypedDict": ".basemodelcard",
"BaseTaskStatus": ".basetaskstatus",
"BatchError": ".batcherror",
"BatchErrorTypedDict": ".batcherror",
+ "BatchExecutionBody": ".batchexecutionbody",
+ "BatchExecutionBodyTypedDict": ".batchexecutionbody",
+ "BatchExecutionResponse": ".batchexecutionresponse",
+ "BatchExecutionResponseTypedDict": ".batchexecutionresponse",
+ "BatchExecutionResult": ".batchexecutionresult",
+ "BatchExecutionResultTypedDict": ".batchexecutionresult",
"BatchJob": ".batchjob",
"BatchJobTypedDict": ".batchjob",
"BatchJobStatus": ".batchjobstatus",
@@ -2745,6 +3869,8 @@
"BuiltInConnectors": ".builtinconnectors",
"Campaign": ".campaign",
"CampaignTypedDict": ".campaign",
+ "CancelWorkflowExecutionV1WorkflowsExecutionsExecutionIDCancelPostRequest": ".cancel_workflow_execution_v1_workflows_executions_execution_id_cancel_postop",
+ "CancelWorkflowExecutionV1WorkflowsExecutionsExecutionIDCancelPostRequestTypedDict": ".cancel_workflow_execution_v1_workflows_executions_execution_id_cancel_postop",
"ChatClassificationRequest": ".chatclassificationrequest",
"ChatClassificationRequestTypedDict": ".chatclassificationrequest",
"ChatCompletionChoice": ".chatcompletionchoice",
@@ -2863,10 +3989,8 @@
"ConnectorGetV1RequestTypedDict": ".connector_get_v1op",
"ConnectorListToolsV1Request": ".connector_list_tools_v1op",
"ConnectorListToolsV1RequestTypedDict": ".connector_list_tools_v1op",
- "ResponseConnectorListToolsV11": ".connector_list_tools_v1op",
- "ResponseConnectorListToolsV11TypedDict": ".connector_list_tools_v1op",
- "ResponseConnectorListToolsV12": ".connector_list_tools_v1op",
- "ResponseConnectorListToolsV12TypedDict": ".connector_list_tools_v1op",
+ "ResponseConnectorListToolsV1": ".connector_list_tools_v1op",
+ "ResponseConnectorListToolsV1TypedDict": ".connector_list_tools_v1op",
"ConnectorListV1Request": ".connector_list_v1op",
"ConnectorListV1RequestTypedDict": ".connector_list_v1op",
"ConnectorUpdateV1Request": ".connector_update_v1op",
@@ -2982,6 +4106,53 @@
"CustomConnector": ".customconnector",
"CustomConnectorTypedDict": ".customconnector",
"UnknownAuthorization": ".customconnector",
+ "CustomTaskCanceledAttributes": ".customtaskcanceledattributes",
+ "CustomTaskCanceledAttributesTypedDict": ".customtaskcanceledattributes",
+ "CustomTaskCanceledRequest": ".customtaskcanceledrequest",
+ "CustomTaskCanceledRequestTypedDict": ".customtaskcanceledrequest",
+ "CustomTaskCanceledResponse": ".customtaskcanceledresponse",
+ "CustomTaskCanceledResponseTypedDict": ".customtaskcanceledresponse",
+ "CustomTaskCompletedAttributesRequest": ".customtaskcompletedattributesrequest",
+ "CustomTaskCompletedAttributesRequestTypedDict": ".customtaskcompletedattributesrequest",
+ "CustomTaskCompletedAttributesResponse": ".customtaskcompletedattributesresponse",
+ "CustomTaskCompletedAttributesResponseTypedDict": ".customtaskcompletedattributesresponse",
+ "CustomTaskCompletedRequest": ".customtaskcompletedrequest",
+ "CustomTaskCompletedRequestTypedDict": ".customtaskcompletedrequest",
+ "CustomTaskCompletedResponse": ".customtaskcompletedresponse",
+ "CustomTaskCompletedResponseTypedDict": ".customtaskcompletedresponse",
+ "CustomTaskFailedAttributes": ".customtaskfailedattributes",
+ "CustomTaskFailedAttributesTypedDict": ".customtaskfailedattributes",
+ "CustomTaskFailedRequest": ".customtaskfailedrequest",
+ "CustomTaskFailedRequestTypedDict": ".customtaskfailedrequest",
+ "CustomTaskFailedResponse": ".customtaskfailedresponse",
+ "CustomTaskFailedResponseTypedDict": ".customtaskfailedresponse",
+ "CustomTaskInProgressAttributesRequest": ".customtaskinprogressattributesrequest",
+ "CustomTaskInProgressAttributesRequestPayload": ".customtaskinprogressattributesrequest",
+ "CustomTaskInProgressAttributesRequestPayloadTypedDict": ".customtaskinprogressattributesrequest",
+ "CustomTaskInProgressAttributesRequestTypedDict": ".customtaskinprogressattributesrequest",
+ "CustomTaskInProgressAttributesResponse": ".customtaskinprogressattributesresponse",
+ "CustomTaskInProgressAttributesResponsePayload": ".customtaskinprogressattributesresponse",
+ "CustomTaskInProgressAttributesResponsePayloadTypedDict": ".customtaskinprogressattributesresponse",
+ "CustomTaskInProgressAttributesResponseTypedDict": ".customtaskinprogressattributesresponse",
+ "UnknownCustomTaskInProgressAttributesResponsePayload": ".customtaskinprogressattributesresponse",
+ "CustomTaskInProgressRequest": ".customtaskinprogressrequest",
+ "CustomTaskInProgressRequestTypedDict": ".customtaskinprogressrequest",
+ "CustomTaskInProgressResponse": ".customtaskinprogressresponse",
+ "CustomTaskInProgressResponseTypedDict": ".customtaskinprogressresponse",
+ "CustomTaskStartedAttributesRequest": ".customtaskstartedattributesrequest",
+ "CustomTaskStartedAttributesRequestTypedDict": ".customtaskstartedattributesrequest",
+ "CustomTaskStartedAttributesResponse": ".customtaskstartedattributesresponse",
+ "CustomTaskStartedAttributesResponseTypedDict": ".customtaskstartedattributesresponse",
+ "CustomTaskStartedRequest": ".customtaskstartedrequest",
+ "CustomTaskStartedRequestTypedDict": ".customtaskstartedrequest",
+ "CustomTaskStartedResponse": ".customtaskstartedresponse",
+ "CustomTaskStartedResponseTypedDict": ".customtaskstartedresponse",
+ "CustomTaskTimedOutAttributes": ".customtasktimedoutattributes",
+ "CustomTaskTimedOutAttributesTypedDict": ".customtasktimedoutattributes",
+ "CustomTaskTimedOutRequest": ".customtasktimedoutrequest",
+ "CustomTaskTimedOutRequestTypedDict": ".customtasktimedoutrequest",
+ "CustomTaskTimedOutResponse": ".customtasktimedoutresponse",
+ "CustomTaskTimedOutResponseTypedDict": ".customtasktimedoutresponse",
"Dataset": ".dataset",
"DatasetTypedDict": ".dataset",
"DatasetImportTask": ".datasetimporttask",
@@ -3002,6 +4173,8 @@
"DeleteModelV1ModelsModelIDDeleteRequestTypedDict": ".delete_model_v1_models_model_id_deleteop",
"DeleteVoiceV1AudioVoicesVoiceIDDeleteRequest": ".delete_voice_v1_audio_voices_voice_id_deleteop",
"DeleteVoiceV1AudioVoicesVoiceIDDeleteRequestTypedDict": ".delete_voice_v1_audio_voices_voice_id_deleteop",
+ "DeleteBatchJobResponse": ".deletebatchjobresponse",
+ "DeleteBatchJobResponseTypedDict": ".deletebatchjobresponse",
"DeleteDatasetRecordsRequest": ".deletedatasetrecordsrequest",
"DeleteDatasetRecordsRequestTypedDict": ".deletedatasetrecordsrequest",
"DeleteFileResponse": ".deletefileresponse",
@@ -3012,6 +4185,14 @@
"DeltaMessageContent": ".deltamessage",
"DeltaMessageContentTypedDict": ".deltamessage",
"DeltaMessageTypedDict": ".deltamessage",
+ "DeploymentDetailResponse": ".deploymentdetailresponse",
+ "DeploymentDetailResponseTypedDict": ".deploymentdetailresponse",
+ "DeploymentListResponse": ".deploymentlistresponse",
+ "DeploymentListResponseTypedDict": ".deploymentlistresponse",
+ "DeploymentResponse": ".deploymentresponse",
+ "DeploymentResponseTypedDict": ".deploymentresponse",
+ "DeploymentWorkerResponse": ".deploymentworkerresponse",
+ "DeploymentWorkerResponseTypedDict": ".deploymentworkerresponse",
"Document": ".document",
"DocumentTypedDict": ".document",
"DocumentLibraryTool": ".documentlibrarytool",
@@ -3033,16 +4214,30 @@
"EmbeddingResponseTypedDict": ".embeddingresponse",
"EmbeddingResponseData": ".embeddingresponsedata",
"EmbeddingResponseDataTypedDict": ".embeddingresponsedata",
+ "EncodedPayloadOptions": ".encodedpayloadoptions",
"EncodingFormat": ".encodingformat",
"EntityType": ".entitytype",
"Event": ".event",
"EventTypedDict": ".event",
+ "EventProgressStatus": ".eventprogressstatus",
+ "EventSource": ".eventsource",
+ "EventType": ".eventtype",
+ "ExecuteWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDExecutePostRequest": ".execute_workflow_registration_v1_workflows_registrations_workflow_registration_id_execute_postop",
+ "ExecuteWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDExecutePostRequestTypedDict": ".execute_workflow_registration_v1_workflows_registrations_workflow_registration_id_execute_postop",
+ "ResponseExecuteWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDExecutePost": ".execute_workflow_registration_v1_workflows_registrations_workflow_registration_id_execute_postop",
+ "ResponseExecuteWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDExecutePostTypedDict": ".execute_workflow_registration_v1_workflows_registrations_workflow_registration_id_execute_postop",
+ "ExecuteWorkflowV1WorkflowsWorkflowIdentifierExecutePostRequest": ".execute_workflow_v1_workflows_workflow_identifier_execute_postop",
+ "ExecuteWorkflowV1WorkflowsWorkflowIdentifierExecutePostRequestTypedDict": ".execute_workflow_v1_workflows_workflow_identifier_execute_postop",
+ "ResponseExecuteWorkflowV1WorkflowsWorkflowIdentifierExecutePost": ".execute_workflow_v1_workflows_workflow_identifier_execute_postop",
+ "ResponseExecuteWorkflowV1WorkflowsWorkflowIdentifierExecutePostTypedDict": ".execute_workflow_v1_workflows_workflow_identifier_execute_postop",
"ExecutionConfig": ".executionconfig",
"ExecutionConfigTypedDict": ".executionconfig",
"ExportDatasetToJsonlV1ObservabilityDatasetsDatasetIDExportsToJsonlGetRequest": ".export_dataset_to_jsonl_v1_observability_datasets_dataset_id_exports_to_jsonl_getop",
"ExportDatasetToJsonlV1ObservabilityDatasetsDatasetIDExportsToJsonlGetRequestTypedDict": ".export_dataset_to_jsonl_v1_observability_datasets_dataset_id_exports_to_jsonl_getop",
"ExportDatasetResponse": ".exportdatasetresponse",
"ExportDatasetResponseTypedDict": ".exportdatasetresponse",
+ "Failure": ".failure",
+ "FailureTypedDict": ".failure",
"FeedResultChatCompletionEventPreview": ".feedresultchatcompletioneventpreview",
"FeedResultChatCompletionEventPreviewTypedDict": ".feedresultchatcompletioneventpreview",
"FetchCampaignStatusResponse": ".fetchcampaignstatusresponse",
@@ -3152,16 +4347,51 @@
"GetDatasetRecordsV1ObservabilityDatasetsDatasetIDRecordsGetRequestTypedDict": ".get_dataset_records_v1_observability_datasets_dataset_id_records_getop",
"GetDatasetsV1ObservabilityDatasetsGetRequest": ".get_datasets_v1_observability_datasets_getop",
"GetDatasetsV1ObservabilityDatasetsGetRequestTypedDict": ".get_datasets_v1_observability_datasets_getop",
+ "GetDeploymentV1WorkflowsDeploymentsNameGetRequest": ".get_deployment_v1_workflows_deployments_name_getop",
+ "GetDeploymentV1WorkflowsDeploymentsNameGetRequestTypedDict": ".get_deployment_v1_workflows_deployments_name_getop",
"GetJudgeByIDV1ObservabilityJudgesJudgeIDGetRequest": ".get_judge_by_id_v1_observability_judges_judge_id_getop",
"GetJudgeByIDV1ObservabilityJudgesJudgeIDGetRequestTypedDict": ".get_judge_by_id_v1_observability_judges_judge_id_getop",
"GetJudgesV1ObservabilityJudgesGetRequest": ".get_judges_v1_observability_judges_getop",
"GetJudgesV1ObservabilityJudgesGetRequestTypedDict": ".get_judges_v1_observability_judges_getop",
+ "GetRunHistoryV1WorkflowsRunsRunIDHistoryGetRequest": ".get_run_history_v1_workflows_runs_run_id_history_getop",
+ "GetRunHistoryV1WorkflowsRunsRunIDHistoryGetRequestTypedDict": ".get_run_history_v1_workflows_runs_run_id_history_getop",
+ "GetRunV1WorkflowsRunsRunIDGetRequest": ".get_run_v1_workflows_runs_run_id_getop",
+ "GetRunV1WorkflowsRunsRunIDGetRequestTypedDict": ".get_run_v1_workflows_runs_run_id_getop",
"GetSimilarChatCompletionEventsV1ObservabilityChatCompletionEventsEventIDSimilarEventsGetRequest": ".get_similar_chat_completion_events_v1_observability_chat_completion_events_event_id_similar_events_getop",
"GetSimilarChatCompletionEventsV1ObservabilityChatCompletionEventsEventIDSimilarEventsGetRequestTypedDict": ".get_similar_chat_completion_events_v1_observability_chat_completion_events_event_id_similar_events_getop",
+ "GetStreamEventsV1WorkflowsEventsStreamGetRequest": ".get_stream_events_v1_workflows_events_stream_getop",
+ "GetStreamEventsV1WorkflowsEventsStreamGetRequestTypedDict": ".get_stream_events_v1_workflows_events_stream_getop",
+ "GetStreamEventsV1WorkflowsEventsStreamGetResponseBody": ".get_stream_events_v1_workflows_events_stream_getop",
+ "GetStreamEventsV1WorkflowsEventsStreamGetResponseBodyTypedDict": ".get_stream_events_v1_workflows_events_stream_getop",
+ "Scope": ".get_stream_events_v1_workflows_events_stream_getop",
"GetVoiceSampleAudioV1AudioVoicesVoiceIDSampleGetRequest": ".get_voice_sample_audio_v1_audio_voices_voice_id_sample_getop",
"GetVoiceSampleAudioV1AudioVoicesVoiceIDSampleGetRequestTypedDict": ".get_voice_sample_audio_v1_audio_voices_voice_id_sample_getop",
"GetVoiceV1AudioVoicesVoiceIDGetRequest": ".get_voice_v1_audio_voices_voice_id_getop",
"GetVoiceV1AudioVoicesVoiceIDGetRequestTypedDict": ".get_voice_v1_audio_voices_voice_id_getop",
+ "GetWorkflowEventsV1WorkflowsEventsListGetRequest": ".get_workflow_events_v1_workflows_events_list_getop",
+ "GetWorkflowEventsV1WorkflowsEventsListGetRequestTypedDict": ".get_workflow_events_v1_workflows_events_list_getop",
+ "GetWorkflowExecutionHistoryV1WorkflowsExecutionsExecutionIDHistoryGetRequest": ".get_workflow_execution_history_v1_workflows_executions_execution_id_history_getop",
+ "GetWorkflowExecutionHistoryV1WorkflowsExecutionsExecutionIDHistoryGetRequestTypedDict": ".get_workflow_execution_history_v1_workflows_executions_execution_id_history_getop",
+ "GetWorkflowExecutionTraceEventsRequest": ".get_workflow_execution_trace_eventsop",
+ "GetWorkflowExecutionTraceEventsRequestTypedDict": ".get_workflow_execution_trace_eventsop",
+ "GetWorkflowExecutionTraceOtelRequest": ".get_workflow_execution_trace_otelop",
+ "GetWorkflowExecutionTraceOtelRequestTypedDict": ".get_workflow_execution_trace_otelop",
+ "GetWorkflowExecutionTraceSummaryRequest": ".get_workflow_execution_trace_summaryop",
+ "GetWorkflowExecutionTraceSummaryRequestTypedDict": ".get_workflow_execution_trace_summaryop",
+ "GetWorkflowExecutionV1WorkflowsExecutionsExecutionIDGetRequest": ".get_workflow_execution_v1_workflows_executions_execution_id_getop",
+ "GetWorkflowExecutionV1WorkflowsExecutionsExecutionIDGetRequestTypedDict": ".get_workflow_execution_v1_workflows_executions_execution_id_getop",
+ "GetWorkflowMetricsV1WorkflowsWorkflowNameMetricsGetRequest": ".get_workflow_metrics_v1_workflows_workflow_name_metrics_getop",
+ "GetWorkflowMetricsV1WorkflowsWorkflowNameMetricsGetRequestTypedDict": ".get_workflow_metrics_v1_workflows_workflow_name_metrics_getop",
+ "GetWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDGetRequest": ".get_workflow_registration_v1_workflows_registrations_workflow_registration_id_getop",
+ "GetWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDGetRequestTypedDict": ".get_workflow_registration_v1_workflows_registrations_workflow_registration_id_getop",
+ "GetWorkflowRegistrationsV1WorkflowsRegistrationsGetRequest": ".get_workflow_registrations_v1_workflows_registrations_getop",
+ "GetWorkflowRegistrationsV1WorkflowsRegistrationsGetRequestTypedDict": ".get_workflow_registrations_v1_workflows_registrations_getop",
+ "GetWorkflowV1WorkflowsWorkflowIdentifierGetRequest": ".get_workflow_v1_workflows_workflow_identifier_getop",
+ "GetWorkflowV1WorkflowsWorkflowIdentifierGetRequestTypedDict": ".get_workflow_v1_workflows_workflow_identifier_getop",
+ "GetWorkflowsV1WorkflowsGetRequest": ".get_workflows_v1_workflows_getop",
+ "GetWorkflowsV1WorkflowsGetRequestTypedDict": ".get_workflows_v1_workflows_getop",
+ "GetWorkflowsV1WorkflowsGetResponse": ".get_workflows_v1_workflows_getop",
+ "GetWorkflowsV1WorkflowsGetResponseTypedDict": ".get_workflows_v1_workflows_getop",
"GetFileResponse": ".getfileresponse",
"GetFileResponseTypedDict": ".getfileresponse",
"GetSignedURLResponse": ".getsignedurlresponse",
@@ -3207,6 +4437,8 @@
"JobMetadataTypedDict": ".jobmetadata",
"JobsAPIRoutesBatchCancelBatchJobRequest": ".jobs_api_routes_batch_cancel_batch_jobop",
"JobsAPIRoutesBatchCancelBatchJobRequestTypedDict": ".jobs_api_routes_batch_cancel_batch_jobop",
+ "JobsAPIRoutesBatchDeleteBatchJobRequest": ".jobs_api_routes_batch_delete_batch_jobop",
+ "JobsAPIRoutesBatchDeleteBatchJobRequestTypedDict": ".jobs_api_routes_batch_delete_batch_jobop",
"JobsAPIRoutesBatchGetBatchJobRequest": ".jobs_api_routes_batch_get_batch_jobop",
"JobsAPIRoutesBatchGetBatchJobRequestTypedDict": ".jobs_api_routes_batch_get_batch_jobop",
"JobsAPIRoutesBatchGetBatchJobsRequest": ".jobs_api_routes_batch_get_batch_jobsop",
@@ -3244,6 +4476,27 @@
"JobsAPIRoutesFineTuningUpdateFineTunedModelResponse": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop",
"JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop",
"UnknownJobsAPIRoutesFineTuningUpdateFineTunedModelResponse": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop",
+ "JSONPatchAdd": ".jsonpatchadd",
+ "JSONPatchAddTypedDict": ".jsonpatchadd",
+ "JSONPatchAppend": ".jsonpatchappend",
+ "JSONPatchAppendTypedDict": ".jsonpatchappend",
+ "JSONPatchPayloadRequest": ".jsonpatchpayloadrequest",
+ "JSONPatchPayloadRequestTypedDict": ".jsonpatchpayloadrequest",
+ "JSONPatchPayloadRequestValue": ".jsonpatchpayloadrequest",
+ "JSONPatchPayloadRequestValueTypedDict": ".jsonpatchpayloadrequest",
+ "JSONPatchPayloadResponse": ".jsonpatchpayloadresponse",
+ "JSONPatchPayloadResponseTypedDict": ".jsonpatchpayloadresponse",
+ "JSONPatchPayloadResponseValue": ".jsonpatchpayloadresponse",
+ "JSONPatchPayloadResponseValueTypedDict": ".jsonpatchpayloadresponse",
+ "UnknownJSONPatchPayloadResponseValue": ".jsonpatchpayloadresponse",
+ "JSONPatchRemove": ".jsonpatchremove",
+ "JSONPatchRemoveTypedDict": ".jsonpatchremove",
+ "JSONPatchReplace": ".jsonpatchreplace",
+ "JSONPatchReplaceTypedDict": ".jsonpatchreplace",
+ "JSONPayloadRequest": ".jsonpayloadrequest",
+ "JSONPayloadRequestTypedDict": ".jsonpayloadrequest",
+ "JSONPayloadResponse": ".jsonpayloadresponse",
+ "JSONPayloadResponseTypedDict": ".jsonpayloadresponse",
"JSONSchema": ".jsonschema",
"JSONSchemaTypedDict": ".jsonschema",
"Judge": ".judge",
@@ -3302,6 +4555,8 @@
"LibrariesDocumentsUploadV1RequestTypedDict": ".libraries_documents_upload_v1op",
"LibrariesGetV1Request": ".libraries_get_v1op",
"LibrariesGetV1RequestTypedDict": ".libraries_get_v1op",
+ "LibrariesListV1Request": ".libraries_list_v1op",
+ "LibrariesListV1RequestTypedDict": ".libraries_list_v1op",
"LibrariesShareCreateV1Request": ".libraries_share_create_v1op",
"LibrariesShareCreateV1RequestTypedDict": ".libraries_share_create_v1op",
"LibrariesShareDeleteV1Request": ".libraries_share_delete_v1op",
@@ -3312,10 +4567,19 @@
"LibrariesUpdateV1RequestTypedDict": ".libraries_update_v1op",
"Library": ".library",
"LibraryTypedDict": ".library",
+ "ListDeploymentsV1WorkflowsDeploymentsGetRequest": ".list_deployments_v1_workflows_deployments_getop",
+ "ListDeploymentsV1WorkflowsDeploymentsGetRequestTypedDict": ".list_deployments_v1_workflows_deployments_getop",
"ListModelsV1ModelsGetRequest": ".list_models_v1_models_getop",
"ListModelsV1ModelsGetRequestTypedDict": ".list_models_v1_models_getop",
+ "ListRunsV1WorkflowsRunsGetRequest": ".list_runs_v1_workflows_runs_getop",
+ "ListRunsV1WorkflowsRunsGetRequestTypedDict": ".list_runs_v1_workflows_runs_getop",
+ "ListRunsV1WorkflowsRunsGetResponse": ".list_runs_v1_workflows_runs_getop",
+ "ListRunsV1WorkflowsRunsGetResponseTypedDict": ".list_runs_v1_workflows_runs_getop",
+ "ListRunsV1WorkflowsRunsGetStatus": ".list_runs_v1_workflows_runs_getop",
+ "ListRunsV1WorkflowsRunsGetStatusTypedDict": ".list_runs_v1_workflows_runs_getop",
"ListVoicesV1AudioVoicesGetRequest": ".list_voices_v1_audio_voices_getop",
"ListVoicesV1AudioVoicesGetRequestTypedDict": ".list_voices_v1_audio_voices_getop",
+ "ListVoicesV1AudioVoicesGetType": ".list_voices_v1_audio_voices_getop",
"ListBatchJobsResponse": ".listbatchjobsresponse",
"ListBatchJobsResponseTypedDict": ".listbatchjobsresponse",
"ListCampaignSelectedEventsResponse": ".listcampaignselectedeventsresponse",
@@ -3345,6 +4609,10 @@
"ListLibrariesResponseTypedDict": ".listlibrariesresponse",
"ListSharingResponse": ".listsharingresponse",
"ListSharingResponseTypedDict": ".listsharingresponse",
+ "ListWorkflowEventResponse": ".listworkfloweventresponse",
+ "ListWorkflowEventResponseEvent": ".listworkfloweventresponse",
+ "ListWorkflowEventResponseEventTypedDict": ".listworkfloweventresponse",
+ "ListWorkflowEventResponseTypedDict": ".listworkfloweventresponse",
"MCPServerIcon": ".mcpservericon",
"MCPServerIconTypedDict": ".mcpservericon",
"MCPTool": ".mcptool",
@@ -3411,6 +4679,8 @@
"ModerationObjectTypedDict": ".moderationobject",
"ModerationResponse": ".moderationresponse",
"ModerationResponseTypedDict": ".moderationresponse",
+ "NetworkEncodedInput": ".networkencodedinput",
+ "NetworkEncodedInputTypedDict": ".networkencodedinput",
"OAuth2TokenAuth": ".oauth2tokenauth",
"OAuth2TokenAuthTypedDict": ".oauth2tokenauth",
"ObservabilityErrorCode": ".observabilityerrorcode",
@@ -3471,6 +4741,16 @@
"ProcessStatus": ".processstatus",
"PromptTokensDetails": ".prompttokensdetails",
"PromptTokensDetailsTypedDict": ".prompttokensdetails",
+ "QueryWorkflowExecutionV1WorkflowsExecutionsExecutionIDQueriesPostRequest": ".query_workflow_execution_v1_workflows_executions_execution_id_queries_postop",
+ "QueryWorkflowExecutionV1WorkflowsExecutionsExecutionIDQueriesPostRequestTypedDict": ".query_workflow_execution_v1_workflows_executions_execution_id_queries_postop",
+ "QueryDefinition": ".querydefinition",
+ "QueryDefinitionTypedDict": ".querydefinition",
+ "QueryInvocationBody": ".queryinvocationbody",
+ "QueryInvocationBodyInput": ".queryinvocationbody",
+ "QueryInvocationBodyInputTypedDict": ".queryinvocationbody",
+ "QueryInvocationBodyTypedDict": ".queryinvocationbody",
+ "QueryWorkflowResponse": ".queryworkflowresponse",
+ "QueryWorkflowResponseTypedDict": ".queryworkflowresponse",
"RealtimeTranscriptionError": ".realtimetranscriptionerror",
"RealtimeTranscriptionErrorTypedDict": ".realtimetranscriptionerror",
"RealtimeTranscriptionErrorDetail": ".realtimetranscriptionerrordetail",
@@ -3499,6 +4779,10 @@
"ReferenceID": ".referencechunk",
"ReferenceIDTypedDict": ".referencechunk",
"RequestSource": ".requestsource",
+ "ResetWorkflowV1WorkflowsExecutionsExecutionIDResetPostRequest": ".reset_workflow_v1_workflows_executions_execution_id_reset_postop",
+ "ResetWorkflowV1WorkflowsExecutionsExecutionIDResetPostRequestTypedDict": ".reset_workflow_v1_workflows_executions_execution_id_reset_postop",
+ "ResetInvocationBody": ".resetinvocationbody",
+ "ResetInvocationBodyTypedDict": ".resetinvocationbody",
"ResourceLink": ".resourcelink",
"ResourceLinkTypedDict": ".resourcelink",
"ResourceVisibility": ".resourcevisibility",
@@ -3518,6 +4802,23 @@
"UnknownResponseRetrieveModelV1ModelsModelIDGet": ".retrieve_model_v1_models_model_id_getop",
"Roles": ".roles",
"SampleType": ".sampletype",
+ "ScalarMetric": ".scalarmetric",
+ "ScalarMetricTypedDict": ".scalarmetric",
+ "ScalarMetricValue": ".scalarmetric",
+ "ScalarMetricValueTypedDict": ".scalarmetric",
+ "ScheduleCalendar": ".schedulecalendar",
+ "ScheduleCalendarTypedDict": ".schedulecalendar",
+ "ScheduleDefinition": ".scheduledefinition",
+ "ScheduleDefinitionTypedDict": ".scheduledefinition",
+ "ScheduleDefinitionOutput": ".scheduledefinitionoutput",
+ "ScheduleDefinitionOutputTypedDict": ".scheduledefinitionoutput",
+ "ScheduleInterval": ".scheduleinterval",
+ "ScheduleIntervalTypedDict": ".scheduleinterval",
+ "ScheduleOverlapPolicy": ".scheduleoverlappolicy",
+ "SchedulePolicy": ".schedulepolicy",
+ "SchedulePolicyTypedDict": ".schedulepolicy",
+ "ScheduleRange": ".schedulerange",
+ "ScheduleRangeTypedDict": ".schedulerange",
"SearchChatCompletionEventIdsRequest": ".searchchatcompletioneventidsrequest",
"SearchChatCompletionEventIdsRequestTypedDict": ".searchchatcompletioneventidsrequest",
"SearchChatCompletionEventIdsResponse": ".searchchatcompletioneventidsresponse",
@@ -3535,6 +4836,18 @@
"SharingDeleteTypedDict": ".sharingdelete",
"SharingRequest": ".sharingrequest",
"SharingRequestTypedDict": ".sharingrequest",
+ "SignalWorkflowExecutionV1WorkflowsExecutionsExecutionIDSignalsPostRequest": ".signal_workflow_execution_v1_workflows_executions_execution_id_signals_postop",
+ "SignalWorkflowExecutionV1WorkflowsExecutionsExecutionIDSignalsPostRequestTypedDict": ".signal_workflow_execution_v1_workflows_executions_execution_id_signals_postop",
+ "SignalDefinition": ".signaldefinition",
+ "SignalDefinitionTypedDict": ".signaldefinition",
+ "SignalInvocationBody": ".signalinvocationbody",
+ "SignalInvocationBodyInput": ".signalinvocationbody",
+ "SignalInvocationBodyInputTypedDict": ".signalinvocationbody",
+ "SignalInvocationBodyNetworkEncodedInput": ".signalinvocationbody",
+ "SignalInvocationBodyNetworkEncodedInputTypedDict": ".signalinvocationbody",
+ "SignalInvocationBodyTypedDict": ".signalinvocationbody",
+ "SignalWorkflowResponse": ".signalworkflowresponse",
+ "SignalWorkflowResponseTypedDict": ".signalworkflowresponse",
"Source": ".source",
"SpeechResponse": ".speech_v1_audio_speech_postop",
"SpeechResponseTypedDict": ".speech_v1_audio_speech_postop",
@@ -3554,12 +4867,49 @@
"SpeechStreamDoneTypedDict": ".speechstreamdone",
"SpeechStreamEventTypes": ".speechstreameventtypes",
"SSETypes": ".ssetypes",
+ "StreamV1WorkflowsExecutionsExecutionIDStreamGetRequest": ".stream_v1_workflows_executions_execution_id_stream_getop",
+ "StreamV1WorkflowsExecutionsExecutionIDStreamGetRequestTypedDict": ".stream_v1_workflows_executions_execution_id_stream_getop",
+ "StreamV1WorkflowsExecutionsExecutionIDStreamGetResponseBody": ".stream_v1_workflows_executions_execution_id_stream_getop",
+ "StreamV1WorkflowsExecutionsExecutionIDStreamGetResponseBodyTypedDict": ".stream_v1_workflows_executions_execution_id_stream_getop",
+ "StreamEventSsePayload": ".streameventssepayload",
+ "StreamEventSsePayloadData": ".streameventssepayload",
+ "StreamEventSsePayloadDataTypedDict": ".streameventssepayload",
+ "StreamEventSsePayloadTypedDict": ".streameventssepayload",
+ "StreamEventWorkflowContext": ".streameventworkflowcontext",
+ "StreamEventWorkflowContextTypedDict": ".streameventworkflowcontext",
"SystemMessage": ".systemmessage",
"SystemMessageContent": ".systemmessage",
"SystemMessageContentTypedDict": ".systemmessage",
"SystemMessageTypedDict": ".systemmessage",
"SystemMessageContentChunks": ".systemmessagecontentchunks",
"SystemMessageContentChunksTypedDict": ".systemmessagecontentchunks",
+ "TempoGetTraceResponse": ".tempogettraceresponse",
+ "TempoGetTraceResponseTypedDict": ".tempogettraceresponse",
+ "TempoTraceAttribute": ".tempotraceattribute",
+ "TempoTraceAttributeTypedDict": ".tempotraceattribute",
+ "TempoTraceAttributeValue": ".tempotraceattribute",
+ "TempoTraceAttributeValueTypedDict": ".tempotraceattribute",
+ "TempoTraceAttributeBoolValue": ".tempotraceattributeboolvalue",
+ "TempoTraceAttributeBoolValueTypedDict": ".tempotraceattributeboolvalue",
+ "TempoTraceAttributeIntValue": ".tempotraceattributeintvalue",
+ "TempoTraceAttributeIntValueTypedDict": ".tempotraceattributeintvalue",
+ "TempoTraceAttributeStringValue": ".tempotraceattributestringvalue",
+ "TempoTraceAttributeStringValueTypedDict": ".tempotraceattributestringvalue",
+ "TempoTraceBatch": ".tempotracebatch",
+ "TempoTraceBatchTypedDict": ".tempotracebatch",
+ "TempoTraceEvent": ".tempotraceevent",
+ "TempoTraceEventTypedDict": ".tempotraceevent",
+ "TempoTraceResource": ".tempotraceresource",
+ "TempoTraceResourceTypedDict": ".tempotraceresource",
+ "TempoTraceScope": ".tempotracescope",
+ "TempoTraceScopeTypedDict": ".tempotracescope",
+ "TempoTraceScopeKind": ".tempotracescopekind",
+ "TempoTraceScopeSpan": ".tempotracescopespan",
+ "TempoTraceScopeSpanTypedDict": ".tempotracescopespan",
+ "TempoTraceSpan": ".tempotracespan",
+ "TempoTraceSpanTypedDict": ".tempotracespan",
+ "TerminateWorkflowExecutionV1WorkflowsExecutionsExecutionIDTerminatePostRequest": ".terminate_workflow_execution_v1_workflows_executions_execution_id_terminate_postop",
+ "TerminateWorkflowExecutionV1WorkflowsExecutionsExecutionIDTerminatePostRequestTypedDict": ".terminate_workflow_execution_v1_workflows_executions_execution_id_terminate_postop",
"TextChunk": ".textchunk",
"TextChunkTypedDict": ".textchunk",
"TextContent": ".textcontent",
@@ -3570,6 +4920,12 @@
"ThinkChunkTypedDict": ".thinkchunk",
"Thinking": ".thinkchunk",
"ThinkingTypedDict": ".thinkchunk",
+ "TimeSeriesMetric": ".timeseriesmetric",
+ "TimeSeriesMetricTypedDict": ".timeseriesmetric",
+ "TimeSeriesMetricValue1": ".timeseriesmetric",
+ "TimeSeriesMetricValue1TypedDict": ".timeseriesmetric",
+ "TimeSeriesMetricValue2": ".timeseriesmetric",
+ "TimeSeriesMetricValue2TypedDict": ".timeseriesmetric",
"TimestampGranularity": ".timestampgranularity",
"Tool": ".tool",
"ToolTypedDict": ".tool",
@@ -3642,8 +4998,12 @@
"TurbineToolLocaleTypedDict": ".turbinetoollocale",
"TurbineToolMeta": ".turbinetoolmeta",
"TurbineToolMetaTypedDict": ".turbinetoolmeta",
+ "UnarchiveWorkflowV1WorkflowsWorkflowIdentifierUnarchivePutRequest": ".unarchive_workflow_v1_workflows_workflow_identifier_unarchive_putop",
+ "UnarchiveWorkflowV1WorkflowsWorkflowIdentifierUnarchivePutRequestTypedDict": ".unarchive_workflow_v1_workflows_workflow_identifier_unarchive_putop",
"UnarchiveModelResponse": ".unarchivemodelresponse",
"UnarchiveModelResponseTypedDict": ".unarchivemodelresponse",
+ "UnscheduleWorkflowV1WorkflowsSchedulesScheduleIDDeleteRequest": ".unschedule_workflow_v1_workflows_schedules_schedule_id_deleteop",
+ "UnscheduleWorkflowV1WorkflowsSchedulesScheduleIDDeleteRequestTypedDict": ".unschedule_workflow_v1_workflows_schedules_schedule_id_deleteop",
"UpdateDatasetRecordPayloadV1ObservabilityDatasetRecordsDatasetRecordIDPayloadPutRequest": ".update_dataset_record_payload_v1_observability_dataset_records_dataset_record_id_payload_putop",
"UpdateDatasetRecordPayloadV1ObservabilityDatasetRecordsDatasetRecordIDPayloadPutRequestTypedDict": ".update_dataset_record_payload_v1_observability_dataset_records_dataset_record_id_payload_putop",
"UpdateDatasetRecordPropertiesV1ObservabilityDatasetRecordsDatasetRecordIDPropertiesPutRequest": ".update_dataset_record_properties_v1_observability_dataset_records_dataset_record_id_properties_putop",
@@ -3654,6 +5014,10 @@
"UpdateJudgeV1ObservabilityJudgesJudgeIDPutRequestTypedDict": ".update_judge_v1_observability_judges_judge_id_putop",
"UpdateVoiceV1AudioVoicesVoiceIDPatchRequest": ".update_voice_v1_audio_voices_voice_id_patchop",
"UpdateVoiceV1AudioVoicesVoiceIDPatchRequestTypedDict": ".update_voice_v1_audio_voices_voice_id_patchop",
+ "UpdateWorkflowExecutionV1WorkflowsExecutionsExecutionIDUpdatesPostRequest": ".update_workflow_execution_v1_workflows_executions_execution_id_updates_postop",
+ "UpdateWorkflowExecutionV1WorkflowsExecutionsExecutionIDUpdatesPostRequestTypedDict": ".update_workflow_execution_v1_workflows_executions_execution_id_updates_postop",
+ "UpdateWorkflowV1WorkflowsWorkflowIdentifierPutRequest": ".update_workflow_v1_workflows_workflow_identifier_putop",
+ "UpdateWorkflowV1WorkflowsWorkflowIdentifierPutRequestTypedDict": ".update_workflow_v1_workflows_workflow_identifier_putop",
"UpdateAgentRequest": ".updateagentrequest",
"UpdateAgentRequestTool": ".updateagentrequest",
"UpdateAgentRequestToolTypedDict": ".updateagentrequest",
@@ -3666,10 +5030,16 @@
"UpdateDatasetRecordPropertiesRequestTypedDict": ".updatedatasetrecordpropertiesrequest",
"UpdateDatasetRequest": ".updatedatasetrequest",
"UpdateDatasetRequestTypedDict": ".updatedatasetrequest",
+ "UpdateDefinition": ".updatedefinition",
+ "UpdateDefinitionTypedDict": ".updatedefinition",
"Attributes": ".updatedocumentrequest",
"AttributesTypedDict": ".updatedocumentrequest",
"UpdateDocumentRequest": ".updatedocumentrequest",
"UpdateDocumentRequestTypedDict": ".updatedocumentrequest",
+ "UpdateInvocationBody": ".updateinvocationbody",
+ "UpdateInvocationBodyInput": ".updateinvocationbody",
+ "UpdateInvocationBodyInputTypedDict": ".updateinvocationbody",
+ "UpdateInvocationBodyTypedDict": ".updateinvocationbody",
"UpdateJudgeRequest": ".updatejudgerequest",
"UpdateJudgeRequestOutput": ".updatejudgerequest",
"UpdateJudgeRequestOutputTypedDict": ".updatejudgerequest",
@@ -3678,6 +5048,8 @@
"UpdateLibraryRequestTypedDict": ".updatelibraryrequest",
"UpdateModelRequest": ".updatemodelrequest",
"UpdateModelRequestTypedDict": ".updatemodelrequest",
+ "UpdateWorkflowResponse": ".updateworkflowresponse",
+ "UpdateWorkflowResponseTypedDict": ".updateworkflowresponse",
"UsageInfo": ".usageinfo",
"UsageInfoTypedDict": ".usageinfo",
"UsageInfoDollarDefs": ".usageinfo_dollar_defs",
@@ -3708,6 +5080,135 @@
"WebSearchPremiumToolTypedDict": ".websearchpremiumtool",
"WebSearchTool": ".websearchtool",
"WebSearchToolTypedDict": ".websearchtool",
+ "Workflow": ".workflow",
+ "WorkflowTypedDict": ".workflow",
+ "WorkflowArchiveResponse": ".workflowarchiveresponse",
+ "WorkflowArchiveResponseTypedDict": ".workflowarchiveresponse",
+ "WorkflowBasicDefinition": ".workflowbasicdefinition",
+ "WorkflowBasicDefinitionTypedDict": ".workflowbasicdefinition",
+ "WorkflowCodeDefinition": ".workflowcodedefinition",
+ "WorkflowCodeDefinitionTypedDict": ".workflowcodedefinition",
+ "WorkflowEventBatchRequest": ".workfloweventbatchrequest",
+ "WorkflowEventBatchRequestEvent": ".workfloweventbatchrequest",
+ "WorkflowEventBatchRequestEventTypedDict": ".workfloweventbatchrequest",
+ "WorkflowEventBatchRequestTypedDict": ".workfloweventbatchrequest",
+ "WorkflowEventBatchResponse": ".workfloweventbatchresponse",
+ "WorkflowEventBatchResponseStatus": ".workfloweventbatchresponse",
+ "WorkflowEventBatchResponseTypedDict": ".workfloweventbatchresponse",
+ "WorkflowEventRequest": ".workfloweventrequest",
+ "WorkflowEventRequestEvent": ".workfloweventrequest",
+ "WorkflowEventRequestEventTypedDict": ".workfloweventrequest",
+ "WorkflowEventRequestTypedDict": ".workfloweventrequest",
+ "WorkflowEventResponse": ".workfloweventresponse",
+ "WorkflowEventResponseStatus": ".workfloweventresponse",
+ "WorkflowEventResponseTypedDict": ".workfloweventresponse",
+ "WorkflowEventType": ".workfloweventtype",
+ "WorkflowExecutionCanceledAttributes": ".workflowexecutioncanceledattributes",
+ "WorkflowExecutionCanceledAttributesTypedDict": ".workflowexecutioncanceledattributes",
+ "WorkflowExecutionCanceledRequest": ".workflowexecutioncanceledrequest",
+ "WorkflowExecutionCanceledRequestTypedDict": ".workflowexecutioncanceledrequest",
+ "WorkflowExecutionCanceledResponse": ".workflowexecutioncanceledresponse",
+ "WorkflowExecutionCanceledResponseTypedDict": ".workflowexecutioncanceledresponse",
+ "WorkflowExecutionCompletedAttributesRequest": ".workflowexecutioncompletedattributesrequest",
+ "WorkflowExecutionCompletedAttributesRequestTypedDict": ".workflowexecutioncompletedattributesrequest",
+ "WorkflowExecutionCompletedAttributesResponse": ".workflowexecutioncompletedattributesresponse",
+ "WorkflowExecutionCompletedAttributesResponseTypedDict": ".workflowexecutioncompletedattributesresponse",
+ "WorkflowExecutionCompletedRequest": ".workflowexecutioncompletedrequest",
+ "WorkflowExecutionCompletedRequestTypedDict": ".workflowexecutioncompletedrequest",
+ "WorkflowExecutionCompletedResponse": ".workflowexecutioncompletedresponse",
+ "WorkflowExecutionCompletedResponseTypedDict": ".workflowexecutioncompletedresponse",
+ "WorkflowExecutionContinuedAsNewAttributesRequest": ".workflowexecutioncontinuedasnewattributesrequest",
+ "WorkflowExecutionContinuedAsNewAttributesRequestTypedDict": ".workflowexecutioncontinuedasnewattributesrequest",
+ "WorkflowExecutionContinuedAsNewAttributesResponse": ".workflowexecutioncontinuedasnewattributesresponse",
+ "WorkflowExecutionContinuedAsNewAttributesResponseTypedDict": ".workflowexecutioncontinuedasnewattributesresponse",
+ "WorkflowExecutionContinuedAsNewRequest": ".workflowexecutioncontinuedasnewrequest",
+ "WorkflowExecutionContinuedAsNewRequestTypedDict": ".workflowexecutioncontinuedasnewrequest",
+ "WorkflowExecutionContinuedAsNewResponse": ".workflowexecutioncontinuedasnewresponse",
+ "WorkflowExecutionContinuedAsNewResponseTypedDict": ".workflowexecutioncontinuedasnewresponse",
+ "WorkflowExecutionFailedAttributes": ".workflowexecutionfailedattributes",
+ "WorkflowExecutionFailedAttributesTypedDict": ".workflowexecutionfailedattributes",
+ "WorkflowExecutionFailedRequest": ".workflowexecutionfailedrequest",
+ "WorkflowExecutionFailedRequestTypedDict": ".workflowexecutionfailedrequest",
+ "WorkflowExecutionFailedResponse": ".workflowexecutionfailedresponse",
+ "WorkflowExecutionFailedResponseTypedDict": ".workflowexecutionfailedresponse",
+ "WorkflowExecutionListResponse": ".workflowexecutionlistresponse",
+ "WorkflowExecutionListResponseTypedDict": ".workflowexecutionlistresponse",
+ "WorkflowExecutionProgressTraceEvent": ".workflowexecutionprogresstraceevent",
+ "WorkflowExecutionProgressTraceEventTypedDict": ".workflowexecutionprogresstraceevent",
+ "WorkflowExecutionRequest": ".workflowexecutionrequest",
+ "WorkflowExecutionRequestTypedDict": ".workflowexecutionrequest",
+ "WorkflowExecutionResponse": ".workflowexecutionresponse",
+ "WorkflowExecutionResponseTypedDict": ".workflowexecutionresponse",
+ "WorkflowExecutionStartedAttributesRequest": ".workflowexecutionstartedattributesrequest",
+ "WorkflowExecutionStartedAttributesRequestTypedDict": ".workflowexecutionstartedattributesrequest",
+ "WorkflowExecutionStartedAttributesResponse": ".workflowexecutionstartedattributesresponse",
+ "WorkflowExecutionStartedAttributesResponseTypedDict": ".workflowexecutionstartedattributesresponse",
+ "WorkflowExecutionStartedRequest": ".workflowexecutionstartedrequest",
+ "WorkflowExecutionStartedRequestTypedDict": ".workflowexecutionstartedrequest",
+ "WorkflowExecutionStartedResponse": ".workflowexecutionstartedresponse",
+ "WorkflowExecutionStartedResponseTypedDict": ".workflowexecutionstartedresponse",
+ "WorkflowExecutionStatus": ".workflowexecutionstatus",
+ "WorkflowExecutionSyncResponse": ".workflowexecutionsyncresponse",
+ "WorkflowExecutionSyncResponseTypedDict": ".workflowexecutionsyncresponse",
+ "WorkflowExecutionTraceEvent": ".workflowexecutiontraceevent",
+ "WorkflowExecutionTraceEventTypedDict": ".workflowexecutiontraceevent",
+ "WorkflowExecutionTraceEventsResponse": ".workflowexecutiontraceeventsresponse",
+ "WorkflowExecutionTraceEventsResponseEvent": ".workflowexecutiontraceeventsresponse",
+ "WorkflowExecutionTraceEventsResponseEventTypedDict": ".workflowexecutiontraceeventsresponse",
+ "WorkflowExecutionTraceEventsResponseTypedDict": ".workflowexecutiontraceeventsresponse",
+ "WorkflowExecutionTraceOTelResponse": ".workflowexecutiontraceotelresponse",
+ "WorkflowExecutionTraceOTelResponseTypedDict": ".workflowexecutiontraceotelresponse",
+ "WorkflowExecutionTraceSummaryAttributesValues": ".workflowexecutiontracesummaryattributesvalues",
+ "WorkflowExecutionTraceSummaryAttributesValuesTypedDict": ".workflowexecutiontracesummaryattributesvalues",
+ "WorkflowExecutionTraceSummaryResponse": ".workflowexecutiontracesummaryresponse",
+ "WorkflowExecutionTraceSummaryResponseTypedDict": ".workflowexecutiontracesummaryresponse",
+ "WorkflowExecutionTraceSummarySpan": ".workflowexecutiontracesummaryspan",
+ "WorkflowExecutionTraceSummarySpanTypedDict": ".workflowexecutiontracesummaryspan",
+ "WorkflowExecutionWithoutResultResponse": ".workflowexecutionwithoutresultresponse",
+ "WorkflowExecutionWithoutResultResponseTypedDict": ".workflowexecutionwithoutresultresponse",
+ "WorkflowGetResponse": ".workflowgetresponse",
+ "WorkflowGetResponseTypedDict": ".workflowgetresponse",
+ "WorkflowListResponse": ".workflowlistresponse",
+ "WorkflowListResponseTypedDict": ".workflowlistresponse",
+ "WorkflowMetadata": ".workflowmetadata",
+ "WorkflowMetadataTypedDict": ".workflowmetadata",
+ "WorkflowMetrics": ".workflowmetrics",
+ "WorkflowMetricsTypedDict": ".workflowmetrics",
+ "WorkflowRegistration": ".workflowregistration",
+ "WorkflowRegistrationTypedDict": ".workflowregistration",
+ "WorkflowRegistrationGetResponse": ".workflowregistrationgetresponse",
+ "WorkflowRegistrationGetResponseTypedDict": ".workflowregistrationgetresponse",
+ "WorkflowRegistrationListResponse": ".workflowregistrationlistresponse",
+ "WorkflowRegistrationListResponseTypedDict": ".workflowregistrationlistresponse",
+ "WorkflowRegistrationWithWorkerStatus": ".workflowregistrationwithworkerstatus",
+ "WorkflowRegistrationWithWorkerStatusTypedDict": ".workflowregistrationwithworkerstatus",
+ "WorkflowScheduleListResponse": ".workflowschedulelistresponse",
+ "WorkflowScheduleListResponseTypedDict": ".workflowschedulelistresponse",
+ "WorkflowScheduleRequest": ".workflowschedulerequest",
+ "WorkflowScheduleRequestTypedDict": ".workflowschedulerequest",
+ "WorkflowScheduleResponse": ".workflowscheduleresponse",
+ "WorkflowScheduleResponseTypedDict": ".workflowscheduleresponse",
+ "WorkflowTaskFailedAttributes": ".workflowtaskfailedattributes",
+ "WorkflowTaskFailedAttributesTypedDict": ".workflowtaskfailedattributes",
+ "WorkflowTaskFailedRequest": ".workflowtaskfailedrequest",
+ "WorkflowTaskFailedRequestTypedDict": ".workflowtaskfailedrequest",
+ "WorkflowTaskFailedResponse": ".workflowtaskfailedresponse",
+ "WorkflowTaskFailedResponseTypedDict": ".workflowtaskfailedresponse",
+ "WorkflowTaskTimedOutAttributes": ".workflowtasktimedoutattributes",
+ "WorkflowTaskTimedOutAttributesTypedDict": ".workflowtasktimedoutattributes",
+ "WorkflowTaskTimedOutRequest": ".workflowtasktimedoutrequest",
+ "WorkflowTaskTimedOutRequestTypedDict": ".workflowtasktimedoutrequest",
+ "WorkflowTaskTimedOutResponse": ".workflowtasktimedoutresponse",
+ "WorkflowTaskTimedOutResponseTypedDict": ".workflowtasktimedoutresponse",
+ "WorkflowType": ".workflowtype",
+ "WorkflowUnarchiveResponse": ".workflowunarchiveresponse",
+ "WorkflowUnarchiveResponseTypedDict": ".workflowunarchiveresponse",
+ "WorkflowUpdateRequest": ".workflowupdaterequest",
+ "WorkflowUpdateRequestTypedDict": ".workflowupdaterequest",
+ "WorkflowUpdateResponse": ".workflowupdateresponse",
+ "WorkflowUpdateResponseTypedDict": ".workflowupdateresponse",
+ "WorkflowWithWorkerStatus": ".workflowwithworkerstatus",
+ "WorkflowWithWorkerStatusTypedDict": ".workflowwithworkerstatus",
}
diff --git a/src/mistralai/client/models/activitytaskcompletedattributesrequest.py b/src/mistralai/client/models/activitytaskcompletedattributesrequest.py
new file mode 100644
index 00000000..560310cf
--- /dev/null
+++ b/src/mistralai/client/models/activitytaskcompletedattributesrequest.py
@@ -0,0 +1,37 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: a9e5ef17794f
+
+from __future__ import annotations
+from .jsonpayloadrequest import JSONPayloadRequest, JSONPayloadRequestTypedDict
+from mistralai.client.types import BaseModel
+from typing_extensions import TypedDict
+
+
+class ActivityTaskCompletedAttributesRequestTypedDict(TypedDict):
+ r"""Attributes for activity task completed events."""
+
+ task_id: str
+ r"""Unique identifier for the activity task within the workflow."""
+ activity_name: str
+ r"""The registered name of the activity being executed."""
+ result: JSONPayloadRequestTypedDict
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
+
+
+class ActivityTaskCompletedAttributesRequest(BaseModel):
+ r"""Attributes for activity task completed events."""
+
+ task_id: str
+ r"""Unique identifier for the activity task within the workflow."""
+
+ activity_name: str
+ r"""The registered name of the activity being executed."""
+
+ result: JSONPayloadRequest
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
diff --git a/src/mistralai/client/models/activitytaskcompletedattributesresponse.py b/src/mistralai/client/models/activitytaskcompletedattributesresponse.py
new file mode 100644
index 00000000..899acb62
--- /dev/null
+++ b/src/mistralai/client/models/activitytaskcompletedattributesresponse.py
@@ -0,0 +1,37 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 8174941767cc
+
+from __future__ import annotations
+from .jsonpayloadresponse import JSONPayloadResponse, JSONPayloadResponseTypedDict
+from mistralai.client.types import BaseModel
+from typing_extensions import TypedDict
+
+
+class ActivityTaskCompletedAttributesResponseTypedDict(TypedDict):
+ r"""Attributes for activity task completed events."""
+
+ task_id: str
+ r"""Unique identifier for the activity task within the workflow."""
+ activity_name: str
+ r"""The registered name of the activity being executed."""
+ result: JSONPayloadResponseTypedDict
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
+
+
+class ActivityTaskCompletedAttributesResponse(BaseModel):
+ r"""Attributes for activity task completed events."""
+
+ task_id: str
+ r"""Unique identifier for the activity task within the workflow."""
+
+ activity_name: str
+ r"""The registered name of the activity being executed."""
+
+ result: JSONPayloadResponse
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
diff --git a/src/mistralai/client/models/activitytaskcompletedrequest.py b/src/mistralai/client/models/activitytaskcompletedrequest.py
new file mode 100644
index 00000000..fae82f66
--- /dev/null
+++ b/src/mistralai/client/models/activitytaskcompletedrequest.py
@@ -0,0 +1,120 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: d051525d65ba
+
+from __future__ import annotations
+from .activitytaskcompletedattributesrequest import (
+ ActivityTaskCompletedAttributesRequest,
+ ActivityTaskCompletedAttributesRequestTypedDict,
+)
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class ActivityTaskCompletedRequestTypedDict(TypedDict):
+ r"""Emitted when an activity task completes successfully.
+
+ Contains timing information about the successful execution.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: ActivityTaskCompletedAttributesRequestTypedDict
+ r"""Attributes for activity task completed events."""
+ event_timestamp: NotRequired[int]
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ parent_workflow_exec_id: NotRequired[Nullable[str]]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ event_type: Literal["ACTIVITY_TASK_COMPLETED"]
+ r"""Event type discriminator."""
+
+
+class ActivityTaskCompletedRequest(BaseModel):
+ r"""Emitted when an activity task completes successfully.
+
+ Contains timing information about the successful execution.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: ActivityTaskCompletedAttributesRequest
+ r"""Attributes for activity task completed events."""
+
+ event_timestamp: Optional[int] = None
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ parent_workflow_exec_id: OptionalNullable[str] = UNSET
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["ACTIVITY_TASK_COMPLETED"]],
+ AfterValidator(validate_const("ACTIVITY_TASK_COMPLETED")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "ACTIVITY_TASK_COMPLETED"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ ["event_timestamp", "parent_workflow_exec_id", "event_type"]
+ )
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ ActivityTaskCompletedRequest.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/activitytaskcompletedresponse.py b/src/mistralai/client/models/activitytaskcompletedresponse.py
new file mode 100644
index 00000000..b324066b
--- /dev/null
+++ b/src/mistralai/client/models/activitytaskcompletedresponse.py
@@ -0,0 +1,112 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 28373f0a2c9e
+
+from __future__ import annotations
+from .activitytaskcompletedattributesresponse import (
+ ActivityTaskCompletedAttributesResponse,
+ ActivityTaskCompletedAttributesResponseTypedDict,
+)
+from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, TypedDict
+
+
+class ActivityTaskCompletedResponseTypedDict(TypedDict):
+ r"""Emitted when an activity task completes successfully.
+
+ Contains timing information about the successful execution.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: ActivityTaskCompletedAttributesResponseTypedDict
+ r"""Attributes for activity task completed events."""
+ event_type: Literal["ACTIVITY_TASK_COMPLETED"]
+ r"""Event type discriminator."""
+
+
+class ActivityTaskCompletedResponse(BaseModel):
+ r"""Emitted when an activity task completes successfully.
+
+ Contains timing information about the successful execution.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: ActivityTaskCompletedAttributesResponse
+ r"""Attributes for activity task completed events."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["ACTIVITY_TASK_COMPLETED"]],
+ AfterValidator(validate_const("ACTIVITY_TASK_COMPLETED")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "ACTIVITY_TASK_COMPLETED"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["event_type"])
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ ActivityTaskCompletedResponse.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/activitytaskfailedattributes.py b/src/mistralai/client/models/activitytaskfailedattributes.py
new file mode 100644
index 00000000..37749d4a
--- /dev/null
+++ b/src/mistralai/client/models/activitytaskfailedattributes.py
@@ -0,0 +1,36 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 12635cd17417
+
+from __future__ import annotations
+from .failure import Failure, FailureTypedDict
+from mistralai.client.types import BaseModel
+from typing_extensions import TypedDict
+
+
+class ActivityTaskFailedAttributesTypedDict(TypedDict):
+ r"""Attributes for activity task failed events (final failure after all retries)."""
+
+ task_id: str
+ r"""Unique identifier for the activity task within the workflow."""
+ activity_name: str
+ r"""The registered name of the activity being executed."""
+ attempt: int
+ r"""The final attempt number that failed (1-indexed)."""
+ failure: FailureTypedDict
+ r"""Represents an error or exception that occurred during execution."""
+
+
+class ActivityTaskFailedAttributes(BaseModel):
+ r"""Attributes for activity task failed events (final failure after all retries)."""
+
+ task_id: str
+ r"""Unique identifier for the activity task within the workflow."""
+
+ activity_name: str
+ r"""The registered name of the activity being executed."""
+
+ attempt: int
+ r"""The final attempt number that failed (1-indexed)."""
+
+ failure: Failure
+ r"""Represents an error or exception that occurred during execution."""
diff --git a/src/mistralai/client/models/activitytaskfailedrequest.py b/src/mistralai/client/models/activitytaskfailedrequest.py
new file mode 100644
index 00000000..1d1e9226
--- /dev/null
+++ b/src/mistralai/client/models/activitytaskfailedrequest.py
@@ -0,0 +1,120 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 1d4b5b52ae29
+
+from __future__ import annotations
+from .activitytaskfailedattributes import (
+ ActivityTaskFailedAttributes,
+ ActivityTaskFailedAttributesTypedDict,
+)
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class ActivityTaskFailedRequestTypedDict(TypedDict):
+ r"""Emitted when an activity task fails after exhausting all retry attempts.
+
+ This is a terminal event indicating the activity could not complete successfully.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: ActivityTaskFailedAttributesTypedDict
+ r"""Attributes for activity task failed events (final failure after all retries)."""
+ event_timestamp: NotRequired[int]
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ parent_workflow_exec_id: NotRequired[Nullable[str]]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ event_type: Literal["ACTIVITY_TASK_FAILED"]
+ r"""Event type discriminator."""
+
+
+class ActivityTaskFailedRequest(BaseModel):
+ r"""Emitted when an activity task fails after exhausting all retry attempts.
+
+ This is a terminal event indicating the activity could not complete successfully.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: ActivityTaskFailedAttributes
+ r"""Attributes for activity task failed events (final failure after all retries)."""
+
+ event_timestamp: Optional[int] = None
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ parent_workflow_exec_id: OptionalNullable[str] = UNSET
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["ACTIVITY_TASK_FAILED"]],
+ AfterValidator(validate_const("ACTIVITY_TASK_FAILED")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "ACTIVITY_TASK_FAILED"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ ["event_timestamp", "parent_workflow_exec_id", "event_type"]
+ )
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ ActivityTaskFailedRequest.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/activitytaskfailedresponse.py b/src/mistralai/client/models/activitytaskfailedresponse.py
new file mode 100644
index 00000000..3410ea84
--- /dev/null
+++ b/src/mistralai/client/models/activitytaskfailedresponse.py
@@ -0,0 +1,112 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: b739e8fc9b44
+
+from __future__ import annotations
+from .activitytaskfailedattributes import (
+ ActivityTaskFailedAttributes,
+ ActivityTaskFailedAttributesTypedDict,
+)
+from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, TypedDict
+
+
+class ActivityTaskFailedResponseTypedDict(TypedDict):
+ r"""Emitted when an activity task fails after exhausting all retry attempts.
+
+ This is a terminal event indicating the activity could not complete successfully.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: ActivityTaskFailedAttributesTypedDict
+ r"""Attributes for activity task failed events (final failure after all retries)."""
+ event_type: Literal["ACTIVITY_TASK_FAILED"]
+ r"""Event type discriminator."""
+
+
+class ActivityTaskFailedResponse(BaseModel):
+ r"""Emitted when an activity task fails after exhausting all retry attempts.
+
+ This is a terminal event indicating the activity could not complete successfully.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: ActivityTaskFailedAttributes
+ r"""Attributes for activity task failed events (final failure after all retries)."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["ACTIVITY_TASK_FAILED"]],
+ AfterValidator(validate_const("ACTIVITY_TASK_FAILED")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "ACTIVITY_TASK_FAILED"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["event_type"])
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ ActivityTaskFailedResponse.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/activitytaskretryingattributes.py b/src/mistralai/client/models/activitytaskretryingattributes.py
new file mode 100644
index 00000000..b0ee11d8
--- /dev/null
+++ b/src/mistralai/client/models/activitytaskretryingattributes.py
@@ -0,0 +1,36 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 9aaa9eecac09
+
+from __future__ import annotations
+from .failure import Failure, FailureTypedDict
+from mistralai.client.types import BaseModel
+from typing_extensions import TypedDict
+
+
+class ActivityTaskRetryingAttributesTypedDict(TypedDict):
+ r"""Attributes for activity task retrying events."""
+
+ task_id: str
+ r"""Unique identifier for the activity task within the workflow."""
+ activity_name: str
+ r"""The registered name of the activity being executed."""
+ attempt: int
+ r"""The attempt number that failed (1-indexed)."""
+ failure: FailureTypedDict
+ r"""Represents an error or exception that occurred during execution."""
+
+
+class ActivityTaskRetryingAttributes(BaseModel):
+ r"""Attributes for activity task retrying events."""
+
+ task_id: str
+ r"""Unique identifier for the activity task within the workflow."""
+
+ activity_name: str
+ r"""The registered name of the activity being executed."""
+
+ attempt: int
+ r"""The attempt number that failed (1-indexed)."""
+
+ failure: Failure
+ r"""Represents an error or exception that occurred during execution."""
diff --git a/src/mistralai/client/models/activitytaskretryingrequest.py b/src/mistralai/client/models/activitytaskretryingrequest.py
new file mode 100644
index 00000000..8c4d84f9
--- /dev/null
+++ b/src/mistralai/client/models/activitytaskretryingrequest.py
@@ -0,0 +1,120 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 6f2e394e1e75
+
+from __future__ import annotations
+from .activitytaskretryingattributes import (
+ ActivityTaskRetryingAttributes,
+ ActivityTaskRetryingAttributesTypedDict,
+)
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class ActivityTaskRetryingRequestTypedDict(TypedDict):
+ r"""Emitted when an activity task fails and will be retried.
+
+ Contains information about the failed attempt and the error that occurred.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: ActivityTaskRetryingAttributesTypedDict
+ r"""Attributes for activity task retrying events."""
+ event_timestamp: NotRequired[int]
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ parent_workflow_exec_id: NotRequired[Nullable[str]]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ event_type: Literal["ACTIVITY_TASK_RETRYING"]
+ r"""Event type discriminator."""
+
+
+class ActivityTaskRetryingRequest(BaseModel):
+ r"""Emitted when an activity task fails and will be retried.
+
+ Contains information about the failed attempt and the error that occurred.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: ActivityTaskRetryingAttributes
+ r"""Attributes for activity task retrying events."""
+
+ event_timestamp: Optional[int] = None
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ parent_workflow_exec_id: OptionalNullable[str] = UNSET
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["ACTIVITY_TASK_RETRYING"]],
+ AfterValidator(validate_const("ACTIVITY_TASK_RETRYING")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "ACTIVITY_TASK_RETRYING"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ ["event_timestamp", "parent_workflow_exec_id", "event_type"]
+ )
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ ActivityTaskRetryingRequest.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/activitytaskretryingresponse.py b/src/mistralai/client/models/activitytaskretryingresponse.py
new file mode 100644
index 00000000..cf054a27
--- /dev/null
+++ b/src/mistralai/client/models/activitytaskretryingresponse.py
@@ -0,0 +1,112 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 3a4ff1e01fa1
+
+from __future__ import annotations
+from .activitytaskretryingattributes import (
+ ActivityTaskRetryingAttributes,
+ ActivityTaskRetryingAttributesTypedDict,
+)
+from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, TypedDict
+
+
+class ActivityTaskRetryingResponseTypedDict(TypedDict):
+ r"""Emitted when an activity task fails and will be retried.
+
+ Contains information about the failed attempt and the error that occurred.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: ActivityTaskRetryingAttributesTypedDict
+ r"""Attributes for activity task retrying events."""
+ event_type: Literal["ACTIVITY_TASK_RETRYING"]
+ r"""Event type discriminator."""
+
+
+class ActivityTaskRetryingResponse(BaseModel):
+ r"""Emitted when an activity task fails and will be retried.
+
+ Contains information about the failed attempt and the error that occurred.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: ActivityTaskRetryingAttributes
+ r"""Attributes for activity task retrying events."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["ACTIVITY_TASK_RETRYING"]],
+ AfterValidator(validate_const("ACTIVITY_TASK_RETRYING")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "ACTIVITY_TASK_RETRYING"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["event_type"])
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ ActivityTaskRetryingResponse.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/activitytaskstartedattributesrequest.py b/src/mistralai/client/models/activitytaskstartedattributesrequest.py
new file mode 100644
index 00000000..dfa3fa04
--- /dev/null
+++ b/src/mistralai/client/models/activitytaskstartedattributesrequest.py
@@ -0,0 +1,37 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 51b3e5622e0b
+
+from __future__ import annotations
+from .jsonpayloadrequest import JSONPayloadRequest, JSONPayloadRequestTypedDict
+from mistralai.client.types import BaseModel
+from typing_extensions import TypedDict
+
+
+class ActivityTaskStartedAttributesRequestTypedDict(TypedDict):
+ r"""Attributes for activity task started events."""
+
+ task_id: str
+ r"""Unique identifier for the activity task within the workflow."""
+ activity_name: str
+ r"""The registered name of the activity being executed."""
+ input: JSONPayloadRequestTypedDict
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
+
+
+class ActivityTaskStartedAttributesRequest(BaseModel):
+ r"""Attributes for activity task started events."""
+
+ task_id: str
+ r"""Unique identifier for the activity task within the workflow."""
+
+ activity_name: str
+ r"""The registered name of the activity being executed."""
+
+ input: JSONPayloadRequest
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
diff --git a/src/mistralai/client/models/activitytaskstartedattributesresponse.py b/src/mistralai/client/models/activitytaskstartedattributesresponse.py
new file mode 100644
index 00000000..100626d4
--- /dev/null
+++ b/src/mistralai/client/models/activitytaskstartedattributesresponse.py
@@ -0,0 +1,37 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 3a365e2c2942
+
+from __future__ import annotations
+from .jsonpayloadresponse import JSONPayloadResponse, JSONPayloadResponseTypedDict
+from mistralai.client.types import BaseModel
+from typing_extensions import TypedDict
+
+
+class ActivityTaskStartedAttributesResponseTypedDict(TypedDict):
+ r"""Attributes for activity task started events."""
+
+ task_id: str
+ r"""Unique identifier for the activity task within the workflow."""
+ activity_name: str
+ r"""The registered name of the activity being executed."""
+ input: JSONPayloadResponseTypedDict
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
+
+
+class ActivityTaskStartedAttributesResponse(BaseModel):
+ r"""Attributes for activity task started events."""
+
+ task_id: str
+ r"""Unique identifier for the activity task within the workflow."""
+
+ activity_name: str
+ r"""The registered name of the activity being executed."""
+
+ input: JSONPayloadResponse
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
diff --git a/src/mistralai/client/models/activitytaskstartedrequest.py b/src/mistralai/client/models/activitytaskstartedrequest.py
new file mode 100644
index 00000000..503b3998
--- /dev/null
+++ b/src/mistralai/client/models/activitytaskstartedrequest.py
@@ -0,0 +1,122 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: aa5813a6903a
+
+from __future__ import annotations
+from .activitytaskstartedattributesrequest import (
+ ActivityTaskStartedAttributesRequest,
+ ActivityTaskStartedAttributesRequestTypedDict,
+)
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class ActivityTaskStartedRequestTypedDict(TypedDict):
+ r"""Emitted when an activity task begins execution.
+
+ This is the first event for an activity, emitted on the first attempt only.
+ Subsequent retry attempts emit ACTIVITY_TASK_RETRYING instead.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: ActivityTaskStartedAttributesRequestTypedDict
+ r"""Attributes for activity task started events."""
+ event_timestamp: NotRequired[int]
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ parent_workflow_exec_id: NotRequired[Nullable[str]]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ event_type: Literal["ACTIVITY_TASK_STARTED"]
+ r"""Event type discriminator."""
+
+
+class ActivityTaskStartedRequest(BaseModel):
+ r"""Emitted when an activity task begins execution.
+
+ This is the first event for an activity, emitted on the first attempt only.
+ Subsequent retry attempts emit ACTIVITY_TASK_RETRYING instead.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: ActivityTaskStartedAttributesRequest
+ r"""Attributes for activity task started events."""
+
+ event_timestamp: Optional[int] = None
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ parent_workflow_exec_id: OptionalNullable[str] = UNSET
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["ACTIVITY_TASK_STARTED"]],
+ AfterValidator(validate_const("ACTIVITY_TASK_STARTED")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "ACTIVITY_TASK_STARTED"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ ["event_timestamp", "parent_workflow_exec_id", "event_type"]
+ )
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ ActivityTaskStartedRequest.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/activitytaskstartedresponse.py b/src/mistralai/client/models/activitytaskstartedresponse.py
new file mode 100644
index 00000000..75186e02
--- /dev/null
+++ b/src/mistralai/client/models/activitytaskstartedresponse.py
@@ -0,0 +1,114 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: d01ef9cb3955
+
+from __future__ import annotations
+from .activitytaskstartedattributesresponse import (
+ ActivityTaskStartedAttributesResponse,
+ ActivityTaskStartedAttributesResponseTypedDict,
+)
+from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, TypedDict
+
+
+class ActivityTaskStartedResponseTypedDict(TypedDict):
+ r"""Emitted when an activity task begins execution.
+
+ This is the first event for an activity, emitted on the first attempt only.
+ Subsequent retry attempts emit ACTIVITY_TASK_RETRYING instead.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: ActivityTaskStartedAttributesResponseTypedDict
+ r"""Attributes for activity task started events."""
+ event_type: Literal["ACTIVITY_TASK_STARTED"]
+ r"""Event type discriminator."""
+
+
+class ActivityTaskStartedResponse(BaseModel):
+ r"""Emitted when an activity task begins execution.
+
+ This is the first event for an activity, emitted on the first attempt only.
+ Subsequent retry attempts emit ACTIVITY_TASK_RETRYING instead.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: ActivityTaskStartedAttributesResponse
+ r"""Attributes for activity task started events."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["ACTIVITY_TASK_STARTED"]],
+ AfterValidator(validate_const("ACTIVITY_TASK_STARTED")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "ACTIVITY_TASK_STARTED"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["event_type"])
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ ActivityTaskStartedResponse.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/archive_workflow_v1_workflows_workflow_identifier_archive_putop.py b/src/mistralai/client/models/archive_workflow_v1_workflows_workflow_identifier_archive_putop.py
new file mode 100644
index 00000000..8582aaf5
--- /dev/null
+++ b/src/mistralai/client/models/archive_workflow_v1_workflows_workflow_identifier_archive_putop.py
@@ -0,0 +1,17 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 0be575ead94b
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+from mistralai.client.utils import FieldMetadata, PathParamMetadata
+from typing_extensions import Annotated, TypedDict
+
+
+class ArchiveWorkflowV1WorkflowsWorkflowIdentifierArchivePutRequestTypedDict(TypedDict):
+ workflow_identifier: str
+
+
+class ArchiveWorkflowV1WorkflowsWorkflowIdentifierArchivePutRequest(BaseModel):
+ workflow_identifier: Annotated[
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
+ ]
diff --git a/src/mistralai/client/models/basefielddefinition.py b/src/mistralai/client/models/basefielddefinition.py
index b57ff2ca..acf9cf8d 100644
--- a/src/mistralai/client/models/basefielddefinition.py
+++ b/src/mistralai/client/models/basefielddefinition.py
@@ -15,7 +15,7 @@
from typing_extensions import NotRequired, TypedDict
-TypeEnum = Union[
+BaseFieldDefinitionType = Union[
Literal[
"ENUM",
"TEXT",
@@ -58,7 +58,7 @@
class BaseFieldDefinitionTypedDict(TypedDict):
name: str
label: str
- type: TypeEnum
+ type: BaseFieldDefinitionType
supported_operators: List[SupportedOperator]
group: NotRequired[Nullable[str]]
@@ -68,7 +68,7 @@ class BaseFieldDefinition(BaseModel):
label: str
- type: TypeEnum
+ type: BaseFieldDefinitionType
supported_operators: List[SupportedOperator]
diff --git a/src/mistralai/client/models/batchexecutionbody.py b/src/mistralai/client/models/batchexecutionbody.py
new file mode 100644
index 00000000..7d9895e5
--- /dev/null
+++ b/src/mistralai/client/models/batchexecutionbody.py
@@ -0,0 +1,17 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 6cfbee9c4ec7
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+from typing import List
+from typing_extensions import TypedDict
+
+
+class BatchExecutionBodyTypedDict(TypedDict):
+ execution_ids: List[str]
+ r"""List of execution IDs to process"""
+
+
+class BatchExecutionBody(BaseModel):
+ execution_ids: List[str]
+ r"""List of execution IDs to process"""
diff --git a/src/mistralai/client/models/batchexecutionresponse.py b/src/mistralai/client/models/batchexecutionresponse.py
new file mode 100644
index 00000000..a0c1f06e
--- /dev/null
+++ b/src/mistralai/client/models/batchexecutionresponse.py
@@ -0,0 +1,35 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 37e6d5d616fe
+
+from __future__ import annotations
+from .batchexecutionresult import BatchExecutionResult, BatchExecutionResultTypedDict
+from mistralai.client.types import BaseModel, UNSET_SENTINEL
+from pydantic import model_serializer
+from typing import Dict, Optional
+from typing_extensions import NotRequired, TypedDict
+
+
+class BatchExecutionResponseTypedDict(TypedDict):
+ results: NotRequired[Dict[str, BatchExecutionResultTypedDict]]
+ r"""Mapping of execution_id to result with status and optional error message"""
+
+
+class BatchExecutionResponse(BaseModel):
+ results: Optional[Dict[str, BatchExecutionResult]] = None
+ r"""Mapping of execution_id to result with status and optional error message"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["results"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+
+ if val != UNSET_SENTINEL:
+ if val is not None or k not in optional_fields:
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/batchexecutionresult.py b/src/mistralai/client/models/batchexecutionresult.py
new file mode 100644
index 00000000..ebe41e01
--- /dev/null
+++ b/src/mistralai/client/models/batchexecutionresult.py
@@ -0,0 +1,53 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: ca6840204f22
+
+from __future__ import annotations
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing_extensions import NotRequired, TypedDict
+
+
+class BatchExecutionResultTypedDict(TypedDict):
+ status: str
+ r"""Status of the operation (success/failure)"""
+ error: NotRequired[Nullable[str]]
+ r"""Error message if operation failed"""
+
+
+class BatchExecutionResult(BaseModel):
+ status: str
+ r"""Status of the operation (success/failure)"""
+
+ error: OptionalNullable[str] = UNSET
+ r"""Error message if operation failed"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["error"])
+ nullable_fields = set(["error"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/cancel_workflow_execution_v1_workflows_executions_execution_id_cancel_postop.py b/src/mistralai/client/models/cancel_workflow_execution_v1_workflows_executions_execution_id_cancel_postop.py
new file mode 100644
index 00000000..8ba7176c
--- /dev/null
+++ b/src/mistralai/client/models/cancel_workflow_execution_v1_workflows_executions_execution_id_cancel_postop.py
@@ -0,0 +1,21 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: e26fc5a228af
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+from mistralai.client.utils import FieldMetadata, PathParamMetadata
+from typing_extensions import Annotated, TypedDict
+
+
+class CancelWorkflowExecutionV1WorkflowsExecutionsExecutionIDCancelPostRequestTypedDict(
+ TypedDict
+):
+ execution_id: str
+
+
+class CancelWorkflowExecutionV1WorkflowsExecutionsExecutionIDCancelPostRequest(
+ BaseModel
+):
+ execution_id: Annotated[
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
+ ]
diff --git a/src/mistralai/client/models/classifierfinetunedmodel.py b/src/mistralai/client/models/classifierfinetunedmodel.py
index 7c2bfc80..35d557aa 100644
--- a/src/mistralai/client/models/classifierfinetunedmodel.py
+++ b/src/mistralai/client/models/classifierfinetunedmodel.py
@@ -34,13 +34,13 @@ class ClassifierFineTunedModelTypedDict(TypedDict):
root_version: str
archived: bool
capabilities: FineTunedModelCapabilitiesTypedDict
- job: str
classifier_targets: List[ClassifierTargetResultTypedDict]
object: Literal["model"]
name: NotRequired[Nullable[str]]
description: NotRequired[Nullable[str]]
max_context_length: NotRequired[int]
aliases: NotRequired[List[str]]
+ job: NotRequired[Nullable[str]]
model_type: Literal["classifier"]
@@ -61,8 +61,6 @@ class ClassifierFineTunedModel(BaseModel):
capabilities: FineTunedModelCapabilities
- job: str
-
classifier_targets: List[ClassifierTargetResult]
object: Annotated[
@@ -78,6 +76,8 @@ class ClassifierFineTunedModel(BaseModel):
aliases: Optional[List[str]] = None
+ job: OptionalNullable[str] = UNSET
+
model_type: Annotated[
Annotated[Literal["classifier"], AfterValidator(validate_const("classifier"))],
pydantic.Field(alias="model_type"),
@@ -86,9 +86,9 @@ class ClassifierFineTunedModel(BaseModel):
@model_serializer(mode="wrap")
def serialize_model(self, handler):
optional_fields = set(
- ["object", "name", "description", "max_context_length", "aliases"]
+ ["object", "name", "description", "max_context_length", "aliases", "job"]
)
- nullable_fields = set(["name", "description"])
+ nullable_fields = set(["name", "description", "job"])
serialized = handler(self)
m = {}
diff --git a/src/mistralai/client/models/completionfinetunedmodel.py b/src/mistralai/client/models/completionfinetunedmodel.py
index e75b8d2f..9b420ed7 100644
--- a/src/mistralai/client/models/completionfinetunedmodel.py
+++ b/src/mistralai/client/models/completionfinetunedmodel.py
@@ -30,12 +30,12 @@ class CompletionFineTunedModelTypedDict(TypedDict):
root_version: str
archived: bool
capabilities: FineTunedModelCapabilitiesTypedDict
- job: str
object: Literal["model"]
name: NotRequired[Nullable[str]]
description: NotRequired[Nullable[str]]
max_context_length: NotRequired[int]
aliases: NotRequired[List[str]]
+ job: NotRequired[Nullable[str]]
model_type: Literal["completion"]
@@ -56,8 +56,6 @@ class CompletionFineTunedModel(BaseModel):
capabilities: FineTunedModelCapabilities
- job: str
-
object: Annotated[
Annotated[Optional[Literal["model"]], AfterValidator(validate_const("model"))],
pydantic.Field(alias="object"),
@@ -71,6 +69,8 @@ class CompletionFineTunedModel(BaseModel):
aliases: Optional[List[str]] = None
+ job: OptionalNullable[str] = UNSET
+
model_type: Annotated[
Annotated[Literal["completion"], AfterValidator(validate_const("completion"))],
pydantic.Field(alias="model_type"),
@@ -79,9 +79,9 @@ class CompletionFineTunedModel(BaseModel):
@model_serializer(mode="wrap")
def serialize_model(self, handler):
optional_fields = set(
- ["object", "name", "description", "max_context_length", "aliases"]
+ ["object", "name", "description", "max_context_length", "aliases", "job"]
)
- nullable_fields = set(["name", "description"])
+ nullable_fields = set(["name", "description", "job"])
serialized = handler(self)
m = {}
diff --git a/src/mistralai/client/models/connector_call_tool_v1op.py b/src/mistralai/client/models/connector_call_tool_v1op.py
index df5783d0..9c77123e 100644
--- a/src/mistralai/client/models/connector_call_tool_v1op.py
+++ b/src/mistralai/client/models/connector_call_tool_v1op.py
@@ -6,15 +6,28 @@
ConnectorCallToolRequest,
ConnectorCallToolRequestTypedDict,
)
-from mistralai.client.types import BaseModel
-from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata
-from typing_extensions import Annotated, TypedDict
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from mistralai.client.utils import (
+ FieldMetadata,
+ PathParamMetadata,
+ QueryParamMetadata,
+ RequestMetadata,
+)
+from pydantic import model_serializer
+from typing_extensions import Annotated, NotRequired, TypedDict
class ConnectorCallToolV1RequestTypedDict(TypedDict):
tool_name: str
connector_id_or_name: str
connector_call_tool_request: ConnectorCallToolRequestTypedDict
+ credentials_name: NotRequired[Nullable[str]]
class ConnectorCallToolV1Request(BaseModel):
@@ -30,3 +43,33 @@ class ConnectorCallToolV1Request(BaseModel):
ConnectorCallToolRequest,
FieldMetadata(request=RequestMetadata(media_type="application/json")),
]
+
+ credentials_name: Annotated[
+ OptionalNullable[str],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = UNSET
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["credentials_name"])
+ nullable_fields = set(["credentials_name"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/connector_list_tools_v1op.py b/src/mistralai/client/models/connector_list_tools_v1op.py
index eb5de321..030ffb2c 100644
--- a/src/mistralai/client/models/connector_list_tools_v1op.py
+++ b/src/mistralai/client/models/connector_list_tools_v1op.py
@@ -63,26 +63,15 @@ def serialize_model(self, handler):
return m
-ResponseConnectorListToolsV11TypedDict = TypeAliasType(
- "ResponseConnectorListToolsV11TypedDict",
- Union[MCPToolTypedDict, ConnectorToolTypedDict],
-)
-
-
-ResponseConnectorListToolsV11 = TypeAliasType(
- "ResponseConnectorListToolsV11", Union[MCPTool, ConnectorTool]
-)
-
-
-ResponseConnectorListToolsV12TypedDict = TypeAliasType(
- "ResponseConnectorListToolsV12TypedDict",
- Union[List[ResponseConnectorListToolsV11TypedDict], List[Dict[str, Any]]],
+ResponseConnectorListToolsV1TypedDict = TypeAliasType(
+ "ResponseConnectorListToolsV1TypedDict",
+ Union[List[ConnectorToolTypedDict], List[MCPToolTypedDict], List[Dict[str, Any]]],
)
r"""Successful Response"""
-ResponseConnectorListToolsV12 = TypeAliasType(
- "ResponseConnectorListToolsV12",
- Union[List[ResponseConnectorListToolsV11], List[Dict[str, Any]]],
+ResponseConnectorListToolsV1 = TypeAliasType(
+ "ResponseConnectorListToolsV1",
+ Union[List[ConnectorTool], List[MCPTool], List[Dict[str, Any]]],
)
r"""Successful Response"""
diff --git a/src/mistralai/client/models/customtaskcanceledattributes.py b/src/mistralai/client/models/customtaskcanceledattributes.py
new file mode 100644
index 00000000..cea2137e
--- /dev/null
+++ b/src/mistralai/client/models/customtaskcanceledattributes.py
@@ -0,0 +1,62 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: c20b8be67b8c
+
+from __future__ import annotations
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing_extensions import NotRequired, TypedDict
+
+
+class CustomTaskCanceledAttributesTypedDict(TypedDict):
+ r"""Attributes for custom task canceled events."""
+
+ custom_task_id: str
+ r"""Unique identifier for the custom task within the workflow."""
+ custom_task_type: str
+ r"""The type/category of the custom task (e.g., 'llm_call', 'api_request')."""
+ reason: NotRequired[Nullable[str]]
+ r"""Optional reason provided for the cancellation."""
+
+
+class CustomTaskCanceledAttributes(BaseModel):
+ r"""Attributes for custom task canceled events."""
+
+ custom_task_id: str
+ r"""Unique identifier for the custom task within the workflow."""
+
+ custom_task_type: str
+ r"""The type/category of the custom task (e.g., 'llm_call', 'api_request')."""
+
+ reason: OptionalNullable[str] = UNSET
+ r"""Optional reason provided for the cancellation."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["reason"])
+ nullable_fields = set(["reason"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/customtaskcanceledrequest.py b/src/mistralai/client/models/customtaskcanceledrequest.py
new file mode 100644
index 00000000..0cb82aea
--- /dev/null
+++ b/src/mistralai/client/models/customtaskcanceledrequest.py
@@ -0,0 +1,120 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 4b4f6b8c8ffd
+
+from __future__ import annotations
+from .customtaskcanceledattributes import (
+ CustomTaskCanceledAttributes,
+ CustomTaskCanceledAttributesTypedDict,
+)
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class CustomTaskCanceledRequestTypedDict(TypedDict):
+ r"""Emitted when a custom task is canceled.
+
+ Indicates the task was explicitly stopped before completion.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: CustomTaskCanceledAttributesTypedDict
+ r"""Attributes for custom task canceled events."""
+ event_timestamp: NotRequired[int]
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ parent_workflow_exec_id: NotRequired[Nullable[str]]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ event_type: Literal["CUSTOM_TASK_CANCELED"]
+ r"""Event type discriminator."""
+
+
+class CustomTaskCanceledRequest(BaseModel):
+ r"""Emitted when a custom task is canceled.
+
+ Indicates the task was explicitly stopped before completion.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: CustomTaskCanceledAttributes
+ r"""Attributes for custom task canceled events."""
+
+ event_timestamp: Optional[int] = None
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ parent_workflow_exec_id: OptionalNullable[str] = UNSET
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["CUSTOM_TASK_CANCELED"]],
+ AfterValidator(validate_const("CUSTOM_TASK_CANCELED")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "CUSTOM_TASK_CANCELED"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ ["event_timestamp", "parent_workflow_exec_id", "event_type"]
+ )
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ CustomTaskCanceledRequest.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/customtaskcanceledresponse.py b/src/mistralai/client/models/customtaskcanceledresponse.py
new file mode 100644
index 00000000..7873b3ca
--- /dev/null
+++ b/src/mistralai/client/models/customtaskcanceledresponse.py
@@ -0,0 +1,112 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 0eeb9d6cf409
+
+from __future__ import annotations
+from .customtaskcanceledattributes import (
+ CustomTaskCanceledAttributes,
+ CustomTaskCanceledAttributesTypedDict,
+)
+from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, TypedDict
+
+
+class CustomTaskCanceledResponseTypedDict(TypedDict):
+ r"""Emitted when a custom task is canceled.
+
+ Indicates the task was explicitly stopped before completion.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: CustomTaskCanceledAttributesTypedDict
+ r"""Attributes for custom task canceled events."""
+ event_type: Literal["CUSTOM_TASK_CANCELED"]
+ r"""Event type discriminator."""
+
+
+class CustomTaskCanceledResponse(BaseModel):
+ r"""Emitted when a custom task is canceled.
+
+ Indicates the task was explicitly stopped before completion.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: CustomTaskCanceledAttributes
+ r"""Attributes for custom task canceled events."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["CUSTOM_TASK_CANCELED"]],
+ AfterValidator(validate_const("CUSTOM_TASK_CANCELED")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "CUSTOM_TASK_CANCELED"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["event_type"])
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ CustomTaskCanceledResponse.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/customtaskcompletedattributesrequest.py b/src/mistralai/client/models/customtaskcompletedattributesrequest.py
new file mode 100644
index 00000000..5e344a32
--- /dev/null
+++ b/src/mistralai/client/models/customtaskcompletedattributesrequest.py
@@ -0,0 +1,37 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 9ec058aedb96
+
+from __future__ import annotations
+from .jsonpayloadrequest import JSONPayloadRequest, JSONPayloadRequestTypedDict
+from mistralai.client.types import BaseModel
+from typing_extensions import TypedDict
+
+
+class CustomTaskCompletedAttributesRequestTypedDict(TypedDict):
+ r"""Attributes for custom task completed events."""
+
+ custom_task_id: str
+ r"""Unique identifier for the custom task within the workflow."""
+ custom_task_type: str
+ r"""The type/category of the custom task (e.g., 'llm_call', 'api_request')."""
+ payload: JSONPayloadRequestTypedDict
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
+
+
+class CustomTaskCompletedAttributesRequest(BaseModel):
+ r"""Attributes for custom task completed events."""
+
+ custom_task_id: str
+ r"""Unique identifier for the custom task within the workflow."""
+
+ custom_task_type: str
+ r"""The type/category of the custom task (e.g., 'llm_call', 'api_request')."""
+
+ payload: JSONPayloadRequest
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
diff --git a/src/mistralai/client/models/customtaskcompletedattributesresponse.py b/src/mistralai/client/models/customtaskcompletedattributesresponse.py
new file mode 100644
index 00000000..8b28d105
--- /dev/null
+++ b/src/mistralai/client/models/customtaskcompletedattributesresponse.py
@@ -0,0 +1,37 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 0b9fb891f354
+
+from __future__ import annotations
+from .jsonpayloadresponse import JSONPayloadResponse, JSONPayloadResponseTypedDict
+from mistralai.client.types import BaseModel
+from typing_extensions import TypedDict
+
+
+class CustomTaskCompletedAttributesResponseTypedDict(TypedDict):
+ r"""Attributes for custom task completed events."""
+
+ custom_task_id: str
+ r"""Unique identifier for the custom task within the workflow."""
+ custom_task_type: str
+ r"""The type/category of the custom task (e.g., 'llm_call', 'api_request')."""
+ payload: JSONPayloadResponseTypedDict
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
+
+
+class CustomTaskCompletedAttributesResponse(BaseModel):
+ r"""Attributes for custom task completed events."""
+
+ custom_task_id: str
+ r"""Unique identifier for the custom task within the workflow."""
+
+ custom_task_type: str
+ r"""The type/category of the custom task (e.g., 'llm_call', 'api_request')."""
+
+ payload: JSONPayloadResponse
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
diff --git a/src/mistralai/client/models/customtaskcompletedrequest.py b/src/mistralai/client/models/customtaskcompletedrequest.py
new file mode 100644
index 00000000..eee10e8e
--- /dev/null
+++ b/src/mistralai/client/models/customtaskcompletedrequest.py
@@ -0,0 +1,120 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: ca7326e72707
+
+from __future__ import annotations
+from .customtaskcompletedattributesrequest import (
+ CustomTaskCompletedAttributesRequest,
+ CustomTaskCompletedAttributesRequestTypedDict,
+)
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class CustomTaskCompletedRequestTypedDict(TypedDict):
+ r"""Emitted when a custom task completes successfully.
+
+ Contains the final result of the task execution.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: CustomTaskCompletedAttributesRequestTypedDict
+ r"""Attributes for custom task completed events."""
+ event_timestamp: NotRequired[int]
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ parent_workflow_exec_id: NotRequired[Nullable[str]]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ event_type: Literal["CUSTOM_TASK_COMPLETED"]
+ r"""Event type discriminator."""
+
+
+class CustomTaskCompletedRequest(BaseModel):
+ r"""Emitted when a custom task completes successfully.
+
+ Contains the final result of the task execution.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: CustomTaskCompletedAttributesRequest
+ r"""Attributes for custom task completed events."""
+
+ event_timestamp: Optional[int] = None
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ parent_workflow_exec_id: OptionalNullable[str] = UNSET
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["CUSTOM_TASK_COMPLETED"]],
+ AfterValidator(validate_const("CUSTOM_TASK_COMPLETED")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "CUSTOM_TASK_COMPLETED"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ ["event_timestamp", "parent_workflow_exec_id", "event_type"]
+ )
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ CustomTaskCompletedRequest.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/customtaskcompletedresponse.py b/src/mistralai/client/models/customtaskcompletedresponse.py
new file mode 100644
index 00000000..5d2a8686
--- /dev/null
+++ b/src/mistralai/client/models/customtaskcompletedresponse.py
@@ -0,0 +1,112 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 719b68571f4c
+
+from __future__ import annotations
+from .customtaskcompletedattributesresponse import (
+ CustomTaskCompletedAttributesResponse,
+ CustomTaskCompletedAttributesResponseTypedDict,
+)
+from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, TypedDict
+
+
+class CustomTaskCompletedResponseTypedDict(TypedDict):
+ r"""Emitted when a custom task completes successfully.
+
+ Contains the final result of the task execution.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: CustomTaskCompletedAttributesResponseTypedDict
+ r"""Attributes for custom task completed events."""
+ event_type: Literal["CUSTOM_TASK_COMPLETED"]
+ r"""Event type discriminator."""
+
+
+class CustomTaskCompletedResponse(BaseModel):
+ r"""Emitted when a custom task completes successfully.
+
+ Contains the final result of the task execution.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: CustomTaskCompletedAttributesResponse
+ r"""Attributes for custom task completed events."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["CUSTOM_TASK_COMPLETED"]],
+ AfterValidator(validate_const("CUSTOM_TASK_COMPLETED")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "CUSTOM_TASK_COMPLETED"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["event_type"])
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ CustomTaskCompletedResponse.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/customtaskfailedattributes.py b/src/mistralai/client/models/customtaskfailedattributes.py
new file mode 100644
index 00000000..b66d7d3c
--- /dev/null
+++ b/src/mistralai/client/models/customtaskfailedattributes.py
@@ -0,0 +1,31 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: ff33698df363
+
+from __future__ import annotations
+from .failure import Failure, FailureTypedDict
+from mistralai.client.types import BaseModel
+from typing_extensions import TypedDict
+
+
+class CustomTaskFailedAttributesTypedDict(TypedDict):
+ r"""Attributes for custom task failed events."""
+
+ custom_task_id: str
+ r"""Unique identifier for the custom task within the workflow."""
+ custom_task_type: str
+ r"""The type/category of the custom task (e.g., 'llm_call', 'api_request')."""
+ failure: FailureTypedDict
+ r"""Represents an error or exception that occurred during execution."""
+
+
+class CustomTaskFailedAttributes(BaseModel):
+ r"""Attributes for custom task failed events."""
+
+ custom_task_id: str
+ r"""Unique identifier for the custom task within the workflow."""
+
+ custom_task_type: str
+ r"""The type/category of the custom task (e.g., 'llm_call', 'api_request')."""
+
+ failure: Failure
+ r"""Represents an error or exception that occurred during execution."""
diff --git a/src/mistralai/client/models/customtaskfailedrequest.py b/src/mistralai/client/models/customtaskfailedrequest.py
new file mode 100644
index 00000000..abf2d74f
--- /dev/null
+++ b/src/mistralai/client/models/customtaskfailedrequest.py
@@ -0,0 +1,120 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 844b7e2b33de
+
+from __future__ import annotations
+from .customtaskfailedattributes import (
+ CustomTaskFailedAttributes,
+ CustomTaskFailedAttributesTypedDict,
+)
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class CustomTaskFailedRequestTypedDict(TypedDict):
+ r"""Emitted when a custom task fails.
+
+ Contains details about the failure for debugging and error handling.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: CustomTaskFailedAttributesTypedDict
+ r"""Attributes for custom task failed events."""
+ event_timestamp: NotRequired[int]
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ parent_workflow_exec_id: NotRequired[Nullable[str]]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ event_type: Literal["CUSTOM_TASK_FAILED"]
+ r"""Event type discriminator."""
+
+
+class CustomTaskFailedRequest(BaseModel):
+ r"""Emitted when a custom task fails.
+
+ Contains details about the failure for debugging and error handling.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: CustomTaskFailedAttributes
+ r"""Attributes for custom task failed events."""
+
+ event_timestamp: Optional[int] = None
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ parent_workflow_exec_id: OptionalNullable[str] = UNSET
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["CUSTOM_TASK_FAILED"]],
+ AfterValidator(validate_const("CUSTOM_TASK_FAILED")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "CUSTOM_TASK_FAILED"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ ["event_timestamp", "parent_workflow_exec_id", "event_type"]
+ )
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ CustomTaskFailedRequest.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/customtaskfailedresponse.py b/src/mistralai/client/models/customtaskfailedresponse.py
new file mode 100644
index 00000000..1f9835ec
--- /dev/null
+++ b/src/mistralai/client/models/customtaskfailedresponse.py
@@ -0,0 +1,112 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 1cde7920833f
+
+from __future__ import annotations
+from .customtaskfailedattributes import (
+ CustomTaskFailedAttributes,
+ CustomTaskFailedAttributesTypedDict,
+)
+from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, TypedDict
+
+
+class CustomTaskFailedResponseTypedDict(TypedDict):
+ r"""Emitted when a custom task fails.
+
+ Contains details about the failure for debugging and error handling.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: CustomTaskFailedAttributesTypedDict
+ r"""Attributes for custom task failed events."""
+ event_type: Literal["CUSTOM_TASK_FAILED"]
+ r"""Event type discriminator."""
+
+
+class CustomTaskFailedResponse(BaseModel):
+ r"""Emitted when a custom task fails.
+
+ Contains details about the failure for debugging and error handling.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: CustomTaskFailedAttributes
+ r"""Attributes for custom task failed events."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["CUSTOM_TASK_FAILED"]],
+ AfterValidator(validate_const("CUSTOM_TASK_FAILED")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "CUSTOM_TASK_FAILED"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["event_type"])
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ CustomTaskFailedResponse.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/customtaskinprogressattributesrequest.py b/src/mistralai/client/models/customtaskinprogressattributesrequest.py
new file mode 100644
index 00000000..5737a4e1
--- /dev/null
+++ b/src/mistralai/client/models/customtaskinprogressattributesrequest.py
@@ -0,0 +1,55 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: d003c9954634
+
+from __future__ import annotations
+from .jsonpatchpayloadrequest import (
+ JSONPatchPayloadRequest,
+ JSONPatchPayloadRequestTypedDict,
+)
+from .jsonpayloadrequest import JSONPayloadRequest, JSONPayloadRequestTypedDict
+from mistralai.client.types import BaseModel
+from mistralai.client.utils import get_discriminator
+from pydantic import Discriminator, Tag
+from typing import Union
+from typing_extensions import Annotated, TypeAliasType, TypedDict
+
+
+CustomTaskInProgressAttributesRequestPayloadTypedDict = TypeAliasType(
+ "CustomTaskInProgressAttributesRequestPayloadTypedDict",
+ Union[JSONPayloadRequestTypedDict, JSONPatchPayloadRequestTypedDict],
+)
+r"""The current state or incremental update for the task."""
+
+
+CustomTaskInProgressAttributesRequestPayload = Annotated[
+ Union[
+ Annotated[JSONPayloadRequest, Tag("json")],
+ Annotated[JSONPatchPayloadRequest, Tag("json_patch")],
+ ],
+ Discriminator(lambda m: get_discriminator(m, "type", "type")),
+]
+r"""The current state or incremental update for the task."""
+
+
+class CustomTaskInProgressAttributesRequestTypedDict(TypedDict):
+ r"""Attributes for custom task in-progress events with streaming updates."""
+
+ custom_task_id: str
+ r"""Unique identifier for the custom task within the workflow."""
+ custom_task_type: str
+ r"""The type/category of the custom task (e.g., 'llm_call', 'api_request')."""
+ payload: CustomTaskInProgressAttributesRequestPayloadTypedDict
+ r"""The current state or incremental update for the task."""
+
+
+class CustomTaskInProgressAttributesRequest(BaseModel):
+ r"""Attributes for custom task in-progress events with streaming updates."""
+
+ custom_task_id: str
+ r"""Unique identifier for the custom task within the workflow."""
+
+ custom_task_type: str
+ r"""The type/category of the custom task (e.g., 'llm_call', 'api_request')."""
+
+ payload: CustomTaskInProgressAttributesRequestPayload
+ r"""The current state or incremental update for the task."""
diff --git a/src/mistralai/client/models/customtaskinprogressattributesresponse.py b/src/mistralai/client/models/customtaskinprogressattributesresponse.py
new file mode 100644
index 00000000..9b5fd20a
--- /dev/null
+++ b/src/mistralai/client/models/customtaskinprogressattributesresponse.py
@@ -0,0 +1,82 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 895147a1a6a8
+
+from __future__ import annotations
+from .jsonpatchpayloadresponse import (
+ JSONPatchPayloadResponse,
+ JSONPatchPayloadResponseTypedDict,
+)
+from .jsonpayloadresponse import JSONPayloadResponse, JSONPayloadResponseTypedDict
+from functools import partial
+from mistralai.client.types import BaseModel
+from mistralai.client.utils.unions import parse_open_union
+from pydantic import ConfigDict
+from pydantic.functional_validators import BeforeValidator
+from typing import Any, Literal, Union
+from typing_extensions import Annotated, TypeAliasType, TypedDict
+
+
+CustomTaskInProgressAttributesResponsePayloadTypedDict = TypeAliasType(
+ "CustomTaskInProgressAttributesResponsePayloadTypedDict",
+ Union[JSONPayloadResponseTypedDict, JSONPatchPayloadResponseTypedDict],
+)
+r"""The current state or incremental update for the task."""
+
+
+class UnknownCustomTaskInProgressAttributesResponsePayload(BaseModel):
+ r"""A CustomTaskInProgressAttributesResponsePayload variant the SDK doesn't recognize. Preserves the raw payload."""
+
+ type: Literal["UNKNOWN"] = "UNKNOWN"
+ raw: Any
+ is_unknown: Literal[True] = True
+
+ model_config = ConfigDict(frozen=True)
+
+
+_CUSTOM_TASK_IN_PROGRESS_ATTRIBUTES_RESPONSE_PAYLOAD_VARIANTS: dict[str, Any] = {
+ "json": JSONPayloadResponse,
+ "json_patch": JSONPatchPayloadResponse,
+}
+
+
+CustomTaskInProgressAttributesResponsePayload = Annotated[
+ Union[
+ JSONPayloadResponse,
+ JSONPatchPayloadResponse,
+ UnknownCustomTaskInProgressAttributesResponsePayload,
+ ],
+ BeforeValidator(
+ partial(
+ parse_open_union,
+ disc_key="type",
+ variants=_CUSTOM_TASK_IN_PROGRESS_ATTRIBUTES_RESPONSE_PAYLOAD_VARIANTS,
+ unknown_cls=UnknownCustomTaskInProgressAttributesResponsePayload,
+ union_name="CustomTaskInProgressAttributesResponsePayload",
+ )
+ ),
+]
+r"""The current state or incremental update for the task."""
+
+
+class CustomTaskInProgressAttributesResponseTypedDict(TypedDict):
+ r"""Attributes for custom task in-progress events with streaming updates."""
+
+ custom_task_id: str
+ r"""Unique identifier for the custom task within the workflow."""
+ custom_task_type: str
+ r"""The type/category of the custom task (e.g., 'llm_call', 'api_request')."""
+ payload: CustomTaskInProgressAttributesResponsePayloadTypedDict
+ r"""The current state or incremental update for the task."""
+
+
+class CustomTaskInProgressAttributesResponse(BaseModel):
+ r"""Attributes for custom task in-progress events with streaming updates."""
+
+ custom_task_id: str
+ r"""Unique identifier for the custom task within the workflow."""
+
+ custom_task_type: str
+ r"""The type/category of the custom task (e.g., 'llm_call', 'api_request')."""
+
+ payload: CustomTaskInProgressAttributesResponsePayload
+ r"""The current state or incremental update for the task."""
diff --git a/src/mistralai/client/models/customtaskinprogressrequest.py b/src/mistralai/client/models/customtaskinprogressrequest.py
new file mode 100644
index 00000000..c2c2faab
--- /dev/null
+++ b/src/mistralai/client/models/customtaskinprogressrequest.py
@@ -0,0 +1,122 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: d62648fe4f1e
+
+from __future__ import annotations
+from .customtaskinprogressattributesrequest import (
+ CustomTaskInProgressAttributesRequest,
+ CustomTaskInProgressAttributesRequestTypedDict,
+)
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class CustomTaskInProgressRequestTypedDict(TypedDict):
+ r"""Emitted during custom task execution to report progress.
+
+ This event supports streaming updates via JSON or JSON Patch payloads,
+ enabling real-time progress tracking for long-running tasks.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: CustomTaskInProgressAttributesRequestTypedDict
+ r"""Attributes for custom task in-progress events with streaming updates."""
+ event_timestamp: NotRequired[int]
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ parent_workflow_exec_id: NotRequired[Nullable[str]]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ event_type: Literal["CUSTOM_TASK_IN_PROGRESS"]
+ r"""Event type discriminator."""
+
+
+class CustomTaskInProgressRequest(BaseModel):
+ r"""Emitted during custom task execution to report progress.
+
+ This event supports streaming updates via JSON or JSON Patch payloads,
+ enabling real-time progress tracking for long-running tasks.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: CustomTaskInProgressAttributesRequest
+ r"""Attributes for custom task in-progress events with streaming updates."""
+
+ event_timestamp: Optional[int] = None
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ parent_workflow_exec_id: OptionalNullable[str] = UNSET
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["CUSTOM_TASK_IN_PROGRESS"]],
+ AfterValidator(validate_const("CUSTOM_TASK_IN_PROGRESS")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "CUSTOM_TASK_IN_PROGRESS"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ ["event_timestamp", "parent_workflow_exec_id", "event_type"]
+ )
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ CustomTaskInProgressRequest.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/customtaskinprogressresponse.py b/src/mistralai/client/models/customtaskinprogressresponse.py
new file mode 100644
index 00000000..33c126f7
--- /dev/null
+++ b/src/mistralai/client/models/customtaskinprogressresponse.py
@@ -0,0 +1,114 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 9d012ecb7626
+
+from __future__ import annotations
+from .customtaskinprogressattributesresponse import (
+ CustomTaskInProgressAttributesResponse,
+ CustomTaskInProgressAttributesResponseTypedDict,
+)
+from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, TypedDict
+
+
+class CustomTaskInProgressResponseTypedDict(TypedDict):
+ r"""Emitted during custom task execution to report progress.
+
+ This event supports streaming updates via JSON or JSON Patch payloads,
+ enabling real-time progress tracking for long-running tasks.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: CustomTaskInProgressAttributesResponseTypedDict
+ r"""Attributes for custom task in-progress events with streaming updates."""
+ event_type: Literal["CUSTOM_TASK_IN_PROGRESS"]
+ r"""Event type discriminator."""
+
+
+class CustomTaskInProgressResponse(BaseModel):
+ r"""Emitted during custom task execution to report progress.
+
+ This event supports streaming updates via JSON or JSON Patch payloads,
+ enabling real-time progress tracking for long-running tasks.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: CustomTaskInProgressAttributesResponse
+ r"""Attributes for custom task in-progress events with streaming updates."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["CUSTOM_TASK_IN_PROGRESS"]],
+ AfterValidator(validate_const("CUSTOM_TASK_IN_PROGRESS")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "CUSTOM_TASK_IN_PROGRESS"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["event_type"])
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ CustomTaskInProgressResponse.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/customtaskstartedattributesrequest.py b/src/mistralai/client/models/customtaskstartedattributesrequest.py
new file mode 100644
index 00000000..db1ecbb6
--- /dev/null
+++ b/src/mistralai/client/models/customtaskstartedattributesrequest.py
@@ -0,0 +1,55 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 23ea4ebe9e0b
+
+from __future__ import annotations
+from .jsonpayloadrequest import JSONPayloadRequest, JSONPayloadRequestTypedDict
+from mistralai.client.types import BaseModel, UNSET_SENTINEL
+from pydantic import model_serializer
+from typing import Optional
+from typing_extensions import NotRequired, TypedDict
+
+
+class CustomTaskStartedAttributesRequestTypedDict(TypedDict):
+ r"""Attributes for custom task started events."""
+
+ custom_task_id: str
+ r"""Unique identifier for the custom task within the workflow."""
+ custom_task_type: str
+ r"""The type/category of the custom task (e.g., 'llm_call', 'api_request')."""
+ payload: NotRequired[JSONPayloadRequestTypedDict]
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
+
+
+class CustomTaskStartedAttributesRequest(BaseModel):
+ r"""Attributes for custom task started events."""
+
+ custom_task_id: str
+ r"""Unique identifier for the custom task within the workflow."""
+
+ custom_task_type: str
+ r"""The type/category of the custom task (e.g., 'llm_call', 'api_request')."""
+
+ payload: Optional[JSONPayloadRequest] = None
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["payload"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+
+ if val != UNSET_SENTINEL:
+ if val is not None or k not in optional_fields:
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/customtaskstartedattributesresponse.py b/src/mistralai/client/models/customtaskstartedattributesresponse.py
new file mode 100644
index 00000000..71bed58f
--- /dev/null
+++ b/src/mistralai/client/models/customtaskstartedattributesresponse.py
@@ -0,0 +1,55 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 6b8946c77018
+
+from __future__ import annotations
+from .jsonpayloadresponse import JSONPayloadResponse, JSONPayloadResponseTypedDict
+from mistralai.client.types import BaseModel, UNSET_SENTINEL
+from pydantic import model_serializer
+from typing import Optional
+from typing_extensions import NotRequired, TypedDict
+
+
+class CustomTaskStartedAttributesResponseTypedDict(TypedDict):
+ r"""Attributes for custom task started events."""
+
+ custom_task_id: str
+ r"""Unique identifier for the custom task within the workflow."""
+ custom_task_type: str
+ r"""The type/category of the custom task (e.g., 'llm_call', 'api_request')."""
+ payload: NotRequired[JSONPayloadResponseTypedDict]
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
+
+
+class CustomTaskStartedAttributesResponse(BaseModel):
+ r"""Attributes for custom task started events."""
+
+ custom_task_id: str
+ r"""Unique identifier for the custom task within the workflow."""
+
+ custom_task_type: str
+ r"""The type/category of the custom task (e.g., 'llm_call', 'api_request')."""
+
+ payload: Optional[JSONPayloadResponse] = None
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["payload"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+
+ if val != UNSET_SENTINEL:
+ if val is not None or k not in optional_fields:
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/customtaskstartedrequest.py b/src/mistralai/client/models/customtaskstartedrequest.py
new file mode 100644
index 00000000..c4bb6c2a
--- /dev/null
+++ b/src/mistralai/client/models/customtaskstartedrequest.py
@@ -0,0 +1,122 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 39792cc12bde
+
+from __future__ import annotations
+from .customtaskstartedattributesrequest import (
+ CustomTaskStartedAttributesRequest,
+ CustomTaskStartedAttributesRequestTypedDict,
+)
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class CustomTaskStartedRequestTypedDict(TypedDict):
+ r"""Emitted when a custom task begins execution.
+
+ Custom tasks represent user-defined units of work within a workflow,
+ such as LLM calls, API requests, or data processing steps.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: CustomTaskStartedAttributesRequestTypedDict
+ r"""Attributes for custom task started events."""
+ event_timestamp: NotRequired[int]
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ parent_workflow_exec_id: NotRequired[Nullable[str]]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ event_type: Literal["CUSTOM_TASK_STARTED"]
+ r"""Event type discriminator."""
+
+
+class CustomTaskStartedRequest(BaseModel):
+ r"""Emitted when a custom task begins execution.
+
+ Custom tasks represent user-defined units of work within a workflow,
+ such as LLM calls, API requests, or data processing steps.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: CustomTaskStartedAttributesRequest
+ r"""Attributes for custom task started events."""
+
+ event_timestamp: Optional[int] = None
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ parent_workflow_exec_id: OptionalNullable[str] = UNSET
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["CUSTOM_TASK_STARTED"]],
+ AfterValidator(validate_const("CUSTOM_TASK_STARTED")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "CUSTOM_TASK_STARTED"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ ["event_timestamp", "parent_workflow_exec_id", "event_type"]
+ )
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ CustomTaskStartedRequest.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/customtaskstartedresponse.py b/src/mistralai/client/models/customtaskstartedresponse.py
new file mode 100644
index 00000000..59d11fa8
--- /dev/null
+++ b/src/mistralai/client/models/customtaskstartedresponse.py
@@ -0,0 +1,114 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 02b330a5292e
+
+from __future__ import annotations
+from .customtaskstartedattributesresponse import (
+ CustomTaskStartedAttributesResponse,
+ CustomTaskStartedAttributesResponseTypedDict,
+)
+from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, TypedDict
+
+
+class CustomTaskStartedResponseTypedDict(TypedDict):
+ r"""Emitted when a custom task begins execution.
+
+ Custom tasks represent user-defined units of work within a workflow,
+ such as LLM calls, API requests, or data processing steps.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: CustomTaskStartedAttributesResponseTypedDict
+ r"""Attributes for custom task started events."""
+ event_type: Literal["CUSTOM_TASK_STARTED"]
+ r"""Event type discriminator."""
+
+
+class CustomTaskStartedResponse(BaseModel):
+ r"""Emitted when a custom task begins execution.
+
+ Custom tasks represent user-defined units of work within a workflow,
+ such as LLM calls, API requests, or data processing steps.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: CustomTaskStartedAttributesResponse
+ r"""Attributes for custom task started events."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["CUSTOM_TASK_STARTED"]],
+ AfterValidator(validate_const("CUSTOM_TASK_STARTED")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "CUSTOM_TASK_STARTED"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["event_type"])
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ CustomTaskStartedResponse.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/customtasktimedoutattributes.py b/src/mistralai/client/models/customtasktimedoutattributes.py
new file mode 100644
index 00000000..47517ab1
--- /dev/null
+++ b/src/mistralai/client/models/customtasktimedoutattributes.py
@@ -0,0 +1,62 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 9cc865098add
+
+from __future__ import annotations
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing_extensions import NotRequired, TypedDict
+
+
+class CustomTaskTimedOutAttributesTypedDict(TypedDict):
+ r"""Attributes for custom task timed out events."""
+
+ custom_task_id: str
+ r"""Unique identifier for the custom task within the workflow."""
+ custom_task_type: str
+ r"""The type/category of the custom task (e.g., 'llm_call', 'api_request')."""
+ timeout_type: NotRequired[Nullable[str]]
+ r"""The type of timeout that occurred."""
+
+
+class CustomTaskTimedOutAttributes(BaseModel):
+ r"""Attributes for custom task timed out events."""
+
+ custom_task_id: str
+ r"""Unique identifier for the custom task within the workflow."""
+
+ custom_task_type: str
+ r"""The type/category of the custom task (e.g., 'llm_call', 'api_request')."""
+
+ timeout_type: OptionalNullable[str] = UNSET
+ r"""The type of timeout that occurred."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["timeout_type"])
+ nullable_fields = set(["timeout_type"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/customtasktimedoutrequest.py b/src/mistralai/client/models/customtasktimedoutrequest.py
new file mode 100644
index 00000000..def540f4
--- /dev/null
+++ b/src/mistralai/client/models/customtasktimedoutrequest.py
@@ -0,0 +1,120 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 75499a6e7c0e
+
+from __future__ import annotations
+from .customtasktimedoutattributes import (
+ CustomTaskTimedOutAttributes,
+ CustomTaskTimedOutAttributesTypedDict,
+)
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class CustomTaskTimedOutRequestTypedDict(TypedDict):
+ r"""Emitted when a custom task exceeds its timeout.
+
+ Indicates the task did not complete within its configured time limit.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: CustomTaskTimedOutAttributesTypedDict
+ r"""Attributes for custom task timed out events."""
+ event_timestamp: NotRequired[int]
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ parent_workflow_exec_id: NotRequired[Nullable[str]]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ event_type: Literal["CUSTOM_TASK_TIMED_OUT"]
+ r"""Event type discriminator."""
+
+
+class CustomTaskTimedOutRequest(BaseModel):
+ r"""Emitted when a custom task exceeds its timeout.
+
+ Indicates the task did not complete within its configured time limit.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: CustomTaskTimedOutAttributes
+ r"""Attributes for custom task timed out events."""
+
+ event_timestamp: Optional[int] = None
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ parent_workflow_exec_id: OptionalNullable[str] = UNSET
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["CUSTOM_TASK_TIMED_OUT"]],
+ AfterValidator(validate_const("CUSTOM_TASK_TIMED_OUT")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "CUSTOM_TASK_TIMED_OUT"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ ["event_timestamp", "parent_workflow_exec_id", "event_type"]
+ )
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ CustomTaskTimedOutRequest.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/customtasktimedoutresponse.py b/src/mistralai/client/models/customtasktimedoutresponse.py
new file mode 100644
index 00000000..7f274a53
--- /dev/null
+++ b/src/mistralai/client/models/customtasktimedoutresponse.py
@@ -0,0 +1,112 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 2dbbc78b85d1
+
+from __future__ import annotations
+from .customtasktimedoutattributes import (
+ CustomTaskTimedOutAttributes,
+ CustomTaskTimedOutAttributesTypedDict,
+)
+from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, TypedDict
+
+
+class CustomTaskTimedOutResponseTypedDict(TypedDict):
+ r"""Emitted when a custom task exceeds its timeout.
+
+ Indicates the task did not complete within its configured time limit.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: CustomTaskTimedOutAttributesTypedDict
+ r"""Attributes for custom task timed out events."""
+ event_type: Literal["CUSTOM_TASK_TIMED_OUT"]
+ r"""Event type discriminator."""
+
+
+class CustomTaskTimedOutResponse(BaseModel):
+ r"""Emitted when a custom task exceeds its timeout.
+
+ Indicates the task did not complete within its configured time limit.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: CustomTaskTimedOutAttributes
+ r"""Attributes for custom task timed out events."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["CUSTOM_TASK_TIMED_OUT"]],
+ AfterValidator(validate_const("CUSTOM_TASK_TIMED_OUT")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "CUSTOM_TASK_TIMED_OUT"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["event_type"])
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ CustomTaskTimedOutResponse.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/deletebatchjobresponse.py b/src/mistralai/client/models/deletebatchjobresponse.py
new file mode 100644
index 00000000..958f0c4f
--- /dev/null
+++ b/src/mistralai/client/models/deletebatchjobresponse.py
@@ -0,0 +1,50 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 79a43aab6cf9
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel, UNSET_SENTINEL
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class DeleteBatchJobResponseTypedDict(TypedDict):
+ id: str
+ object: Literal["batch"]
+ deleted: NotRequired[bool]
+
+
+class DeleteBatchJobResponse(BaseModel):
+ id: str
+
+ object: Annotated[
+ Annotated[Optional[Literal["batch"]], AfterValidator(validate_const("batch"))],
+ pydantic.Field(alias="object"),
+ ] = "batch"
+
+ deleted: Optional[bool] = True
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["object", "deleted"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+
+ if val != UNSET_SENTINEL:
+ if val is not None or k not in optional_fields:
+ m[k] = val
+
+ return m
+
+
+try:
+ DeleteBatchJobResponse.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/deploymentdetailresponse.py b/src/mistralai/client/models/deploymentdetailresponse.py
new file mode 100644
index 00000000..82f021f1
--- /dev/null
+++ b/src/mistralai/client/models/deploymentdetailresponse.py
@@ -0,0 +1,47 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 7f4a17a1c7ca
+
+from __future__ import annotations
+from .deploymentworkerresponse import (
+ DeploymentWorkerResponse,
+ DeploymentWorkerResponseTypedDict,
+)
+from datetime import datetime
+from mistralai.client.types import BaseModel
+from typing import List
+from typing_extensions import TypedDict
+
+
+class DeploymentDetailResponseTypedDict(TypedDict):
+ id: str
+ r"""Unique identifier of the deployment"""
+ name: str
+ r"""Deployment name"""
+ is_active: bool
+ r"""Whether at least one worker is currently live"""
+ created_at: datetime
+ r"""When the deployment was first registered"""
+ updated_at: datetime
+ r"""When the deployment was last updated"""
+ workers: List[DeploymentWorkerResponseTypedDict]
+ r"""Workers registered for the deployment"""
+
+
+class DeploymentDetailResponse(BaseModel):
+ id: str
+ r"""Unique identifier of the deployment"""
+
+ name: str
+ r"""Deployment name"""
+
+ is_active: bool
+ r"""Whether at least one worker is currently live"""
+
+ created_at: datetime
+ r"""When the deployment was first registered"""
+
+ updated_at: datetime
+ r"""When the deployment was last updated"""
+
+ workers: List[DeploymentWorkerResponse]
+ r"""Workers registered for the deployment"""
diff --git a/src/mistralai/client/models/deploymentlistresponse.py b/src/mistralai/client/models/deploymentlistresponse.py
new file mode 100644
index 00000000..7926cde7
--- /dev/null
+++ b/src/mistralai/client/models/deploymentlistresponse.py
@@ -0,0 +1,18 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 1f0b404ba621
+
+from __future__ import annotations
+from .deploymentresponse import DeploymentResponse, DeploymentResponseTypedDict
+from mistralai.client.types import BaseModel
+from typing import List
+from typing_extensions import TypedDict
+
+
+class DeploymentListResponseTypedDict(TypedDict):
+ deployments: List[DeploymentResponseTypedDict]
+ r"""List of deployments"""
+
+
+class DeploymentListResponse(BaseModel):
+ deployments: List[DeploymentResponse]
+ r"""List of deployments"""
diff --git a/src/mistralai/client/models/deploymentresponse.py b/src/mistralai/client/models/deploymentresponse.py
new file mode 100644
index 00000000..7f8ed3a3
--- /dev/null
+++ b/src/mistralai/client/models/deploymentresponse.py
@@ -0,0 +1,37 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: ea15ddd64402
+
+from __future__ import annotations
+from datetime import datetime
+from mistralai.client.types import BaseModel
+from typing_extensions import TypedDict
+
+
+class DeploymentResponseTypedDict(TypedDict):
+ id: str
+ r"""Unique identifier of the deployment"""
+ name: str
+ r"""Deployment name"""
+ is_active: bool
+ r"""Whether at least one worker is currently live"""
+ created_at: datetime
+ r"""When the deployment was first registered"""
+ updated_at: datetime
+ r"""When the deployment was last updated"""
+
+
+class DeploymentResponse(BaseModel):
+ id: str
+ r"""Unique identifier of the deployment"""
+
+ name: str
+ r"""Deployment name"""
+
+ is_active: bool
+ r"""Whether at least one worker is currently live"""
+
+ created_at: datetime
+ r"""When the deployment was first registered"""
+
+ updated_at: datetime
+ r"""When the deployment was last updated"""
diff --git a/src/mistralai/client/models/deploymentworkerresponse.py b/src/mistralai/client/models/deploymentworkerresponse.py
new file mode 100644
index 00000000..0adcb4d6
--- /dev/null
+++ b/src/mistralai/client/models/deploymentworkerresponse.py
@@ -0,0 +1,27 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: b11a9947bd19
+
+from __future__ import annotations
+from datetime import datetime
+from mistralai.client.types import BaseModel
+from typing_extensions import TypedDict
+
+
+class DeploymentWorkerResponseTypedDict(TypedDict):
+ name: str
+ r"""Worker name"""
+ created_at: datetime
+ r"""When the worker first registered"""
+ updated_at: datetime
+ r"""When the worker last registered"""
+
+
+class DeploymentWorkerResponse(BaseModel):
+ name: str
+ r"""Worker name"""
+
+ created_at: datetime
+ r"""When the worker first registered"""
+
+ updated_at: datetime
+ r"""When the worker last registered"""
diff --git a/src/mistralai/client/models/encodedpayloadoptions.py b/src/mistralai/client/models/encodedpayloadoptions.py
new file mode 100644
index 00000000..5c369046
--- /dev/null
+++ b/src/mistralai/client/models/encodedpayloadoptions.py
@@ -0,0 +1,12 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 97955ebc2eb9
+
+from __future__ import annotations
+from typing import Literal
+
+
+EncodedPayloadOptions = Literal[
+ "offloaded",
+ "encrypted",
+ "encrypted-partial",
+]
diff --git a/src/mistralai/client/models/eventprogressstatus.py b/src/mistralai/client/models/eventprogressstatus.py
new file mode 100644
index 00000000..6fde1c39
--- /dev/null
+++ b/src/mistralai/client/models/eventprogressstatus.py
@@ -0,0 +1,16 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 48623263df72
+
+from __future__ import annotations
+from mistralai.client.types import UnrecognizedStr
+from typing import Literal, Union
+
+
+EventProgressStatus = Union[
+ Literal[
+ "RUNNING",
+ "COMPLETED",
+ "FAILED",
+ ],
+ UnrecognizedStr,
+]
diff --git a/src/mistralai/client/models/eventsource.py b/src/mistralai/client/models/eventsource.py
new file mode 100644
index 00000000..d0f4d5e3
--- /dev/null
+++ b/src/mistralai/client/models/eventsource.py
@@ -0,0 +1,11 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 8b926028b7b2
+
+from __future__ import annotations
+from typing import Literal
+
+
+EventSource = Literal[
+ "DATABASE",
+ "LIVE",
+]
diff --git a/src/mistralai/client/models/eventtype.py b/src/mistralai/client/models/eventtype.py
new file mode 100644
index 00000000..a85321b2
--- /dev/null
+++ b/src/mistralai/client/models/eventtype.py
@@ -0,0 +1,15 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: f70686df1fa5
+
+from __future__ import annotations
+from mistralai.client.types import UnrecognizedStr
+from typing import Literal, Union
+
+
+EventType = Union[
+ Literal[
+ "EVENT",
+ "EVENT_PROGRESS",
+ ],
+ UnrecognizedStr,
+]
diff --git a/src/mistralai/client/models/execute_workflow_registration_v1_workflows_registrations_workflow_registration_id_execute_postop.py b/src/mistralai/client/models/execute_workflow_registration_v1_workflows_registrations_workflow_registration_id_execute_postop.py
new file mode 100644
index 00000000..2e79ef25
--- /dev/null
+++ b/src/mistralai/client/models/execute_workflow_registration_v1_workflows_registrations_workflow_registration_id_execute_postop.py
@@ -0,0 +1,54 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 3e2249825144
+
+from __future__ import annotations
+from .workflowexecutionrequest import (
+ WorkflowExecutionRequest,
+ WorkflowExecutionRequestTypedDict,
+)
+from .workflowexecutionresponse import (
+ WorkflowExecutionResponse,
+ WorkflowExecutionResponseTypedDict,
+)
+from .workflowexecutionsyncresponse import (
+ WorkflowExecutionSyncResponse,
+ WorkflowExecutionSyncResponseTypedDict,
+)
+from mistralai.client.types import BaseModel
+from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata
+from typing import Union
+from typing_extensions import Annotated, TypeAliasType, TypedDict
+
+
+class ExecuteWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDExecutePostRequestTypedDict(
+ TypedDict
+):
+ workflow_registration_id: str
+ workflow_execution_request: WorkflowExecutionRequestTypedDict
+
+
+class ExecuteWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDExecutePostRequest(
+ BaseModel
+):
+ workflow_registration_id: Annotated[
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
+ ]
+
+ workflow_execution_request: Annotated[
+ WorkflowExecutionRequest,
+ FieldMetadata(request=RequestMetadata(media_type="application/json")),
+ ]
+
+
+ResponseExecuteWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDExecutePostTypedDict = TypeAliasType(
+ "ResponseExecuteWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDExecutePostTypedDict",
+ Union[WorkflowExecutionSyncResponseTypedDict, WorkflowExecutionResponseTypedDict],
+)
+r"""Successful Response"""
+
+
+ResponseExecuteWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDExecutePost = TypeAliasType(
+ "ResponseExecuteWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDExecutePost",
+ Union[WorkflowExecutionSyncResponse, WorkflowExecutionResponse],
+)
+r"""Successful Response"""
diff --git a/src/mistralai/client/models/execute_workflow_v1_workflows_workflow_identifier_execute_postop.py b/src/mistralai/client/models/execute_workflow_v1_workflows_workflow_identifier_execute_postop.py
new file mode 100644
index 00000000..bd9496a2
--- /dev/null
+++ b/src/mistralai/client/models/execute_workflow_v1_workflows_workflow_identifier_execute_postop.py
@@ -0,0 +1,56 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 5da876c66fc5
+
+from __future__ import annotations
+from .workflowexecutionrequest import (
+ WorkflowExecutionRequest,
+ WorkflowExecutionRequestTypedDict,
+)
+from .workflowexecutionresponse import (
+ WorkflowExecutionResponse,
+ WorkflowExecutionResponseTypedDict,
+)
+from .workflowexecutionsyncresponse import (
+ WorkflowExecutionSyncResponse,
+ WorkflowExecutionSyncResponseTypedDict,
+)
+from mistralai.client.types import BaseModel
+from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata
+from typing import Union
+from typing_extensions import Annotated, TypeAliasType, TypedDict
+
+
+class ExecuteWorkflowV1WorkflowsWorkflowIdentifierExecutePostRequestTypedDict(
+ TypedDict
+):
+ workflow_identifier: str
+ workflow_execution_request: WorkflowExecutionRequestTypedDict
+
+
+class ExecuteWorkflowV1WorkflowsWorkflowIdentifierExecutePostRequest(BaseModel):
+ workflow_identifier: Annotated[
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
+ ]
+
+ workflow_execution_request: Annotated[
+ WorkflowExecutionRequest,
+ FieldMetadata(request=RequestMetadata(media_type="application/json")),
+ ]
+
+
+ResponseExecuteWorkflowV1WorkflowsWorkflowIdentifierExecutePostTypedDict = (
+ TypeAliasType(
+ "ResponseExecuteWorkflowV1WorkflowsWorkflowIdentifierExecutePostTypedDict",
+ Union[
+ WorkflowExecutionSyncResponseTypedDict, WorkflowExecutionResponseTypedDict
+ ],
+ )
+)
+r"""Successful Response"""
+
+
+ResponseExecuteWorkflowV1WorkflowsWorkflowIdentifierExecutePost = TypeAliasType(
+ "ResponseExecuteWorkflowV1WorkflowsWorkflowIdentifierExecutePost",
+ Union[WorkflowExecutionSyncResponse, WorkflowExecutionResponse],
+)
+r"""Successful Response"""
diff --git a/src/mistralai/client/models/failure.py b/src/mistralai/client/models/failure.py
new file mode 100644
index 00000000..85ca77a5
--- /dev/null
+++ b/src/mistralai/client/models/failure.py
@@ -0,0 +1,20 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 596e38493eaa
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+from typing_extensions import TypedDict
+
+
+class FailureTypedDict(TypedDict):
+ r"""Represents an error or exception that occurred during execution."""
+
+ message: str
+ r"""A human-readable description of the failure."""
+
+
+class Failure(BaseModel):
+ r"""Represents an error or exception that occurred during execution."""
+
+ message: str
+ r"""A human-readable description of the failure."""
diff --git a/src/mistralai/client/models/ftmodelcard.py b/src/mistralai/client/models/ftmodelcard.py
index bb7c52c8..922667b0 100644
--- a/src/mistralai/client/models/ftmodelcard.py
+++ b/src/mistralai/client/models/ftmodelcard.py
@@ -27,7 +27,6 @@ class FTModelCardTypedDict(TypedDict):
r"""This is populated by Harmattan, but some fields have a name
that we don't want to expose in the API.
"""
- job: str
root: str
object: NotRequired[str]
created: NotRequired[int]
@@ -40,6 +39,7 @@ class FTModelCardTypedDict(TypedDict):
deprecation_replacement_model: NotRequired[Nullable[str]]
default_model_temperature: NotRequired[Nullable[float]]
type: Literal["fine-tuned"]
+ job: NotRequired[Nullable[str]]
archived: NotRequired[bool]
@@ -53,8 +53,6 @@ class FTModelCard(BaseModel):
that we don't want to expose in the API.
"""
- job: str
-
root: str
object: Optional[str] = "model"
@@ -82,6 +80,8 @@ class FTModelCard(BaseModel):
pydantic.Field(alias="type"),
] = "fine-tuned"
+ job: OptionalNullable[str] = UNSET
+
archived: Optional[bool] = False
@model_serializer(mode="wrap")
@@ -98,6 +98,7 @@ def serialize_model(self, handler):
"deprecation",
"deprecation_replacement_model",
"default_model_temperature",
+ "job",
"archived",
]
)
@@ -108,6 +109,7 @@ def serialize_model(self, handler):
"deprecation",
"deprecation_replacement_model",
"default_model_temperature",
+ "job",
]
)
serialized = handler(self)
diff --git a/src/mistralai/client/models/get_deployment_v1_workflows_deployments_name_getop.py b/src/mistralai/client/models/get_deployment_v1_workflows_deployments_name_getop.py
new file mode 100644
index 00000000..1f699bed
--- /dev/null
+++ b/src/mistralai/client/models/get_deployment_v1_workflows_deployments_name_getop.py
@@ -0,0 +1,17 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 3fae92e2573a
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+from mistralai.client.utils import FieldMetadata, PathParamMetadata
+from typing_extensions import Annotated, TypedDict
+
+
+class GetDeploymentV1WorkflowsDeploymentsNameGetRequestTypedDict(TypedDict):
+ name: str
+
+
+class GetDeploymentV1WorkflowsDeploymentsNameGetRequest(BaseModel):
+ name: Annotated[
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
+ ]
diff --git a/src/mistralai/client/models/get_run_history_v1_workflows_runs_run_id_history_getop.py b/src/mistralai/client/models/get_run_history_v1_workflows_runs_run_id_history_getop.py
new file mode 100644
index 00000000..d974f981
--- /dev/null
+++ b/src/mistralai/client/models/get_run_history_v1_workflows_runs_run_id_history_getop.py
@@ -0,0 +1,17 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 9d566ab77998
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+from mistralai.client.utils import FieldMetadata, PathParamMetadata
+from typing_extensions import Annotated, TypedDict
+
+
+class GetRunHistoryV1WorkflowsRunsRunIDHistoryGetRequestTypedDict(TypedDict):
+ run_id: str
+
+
+class GetRunHistoryV1WorkflowsRunsRunIDHistoryGetRequest(BaseModel):
+ run_id: Annotated[
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
+ ]
diff --git a/src/mistralai/client/models/get_run_v1_workflows_runs_run_id_getop.py b/src/mistralai/client/models/get_run_v1_workflows_runs_run_id_getop.py
new file mode 100644
index 00000000..d9b6758f
--- /dev/null
+++ b/src/mistralai/client/models/get_run_v1_workflows_runs_run_id_getop.py
@@ -0,0 +1,17 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 60463c59ff01
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+from mistralai.client.utils import FieldMetadata, PathParamMetadata
+from typing_extensions import Annotated, TypedDict
+
+
+class GetRunV1WorkflowsRunsRunIDGetRequestTypedDict(TypedDict):
+ run_id: str
+
+
+class GetRunV1WorkflowsRunsRunIDGetRequest(BaseModel):
+ run_id: Annotated[
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
+ ]
diff --git a/src/mistralai/client/models/get_stream_events_v1_workflows_events_stream_getop.py b/src/mistralai/client/models/get_stream_events_v1_workflows_events_stream_getop.py
new file mode 100644
index 00000000..442a7a94
--- /dev/null
+++ b/src/mistralai/client/models/get_stream_events_v1_workflows_events_stream_getop.py
@@ -0,0 +1,182 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 8dd6ce0e8d66
+
+from __future__ import annotations
+from .streameventssepayload import StreamEventSsePayload, StreamEventSsePayloadTypedDict
+from .workfloweventtype import WorkflowEventType
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from mistralai.client.utils import FieldMetadata, HeaderMetadata, QueryParamMetadata
+import pydantic
+from pydantic import model_serializer
+from typing import Any, Dict, List, Literal, Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+Scope = Literal[
+ "activity",
+ "workflow",
+ "*",
+]
+
+
+class GetStreamEventsV1WorkflowsEventsStreamGetRequestTypedDict(TypedDict):
+ scope: NotRequired[Scope]
+ activity_name: NotRequired[str]
+ activity_id: NotRequired[str]
+ workflow_name: NotRequired[str]
+ workflow_exec_id: NotRequired[str]
+ root_workflow_exec_id: NotRequired[str]
+ parent_workflow_exec_id: NotRequired[str]
+ stream: NotRequired[str]
+ start_seq: NotRequired[int]
+ metadata_filters: NotRequired[Nullable[Dict[str, Any]]]
+ workflow_event_types: NotRequired[Nullable[List[WorkflowEventType]]]
+ last_event_id: NotRequired[Nullable[str]]
+
+
+class GetStreamEventsV1WorkflowsEventsStreamGetRequest(BaseModel):
+ scope: Annotated[
+ Optional[Scope],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = "*"
+
+ activity_name: Annotated[
+ Optional[str],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = "*"
+
+ activity_id: Annotated[
+ Optional[str],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = "*"
+
+ workflow_name: Annotated[
+ Optional[str],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = "*"
+
+ workflow_exec_id: Annotated[
+ Optional[str],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = "*"
+
+ root_workflow_exec_id: Annotated[
+ Optional[str],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = "*"
+
+ parent_workflow_exec_id: Annotated[
+ Optional[str],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = "*"
+
+ stream: Annotated[
+ Optional[str],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = "*"
+
+ start_seq: Annotated[
+ Optional[int],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = 0
+
+ metadata_filters: Annotated[
+ OptionalNullable[Dict[str, Any]],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = UNSET
+
+ workflow_event_types: Annotated[
+ OptionalNullable[List[WorkflowEventType]],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = UNSET
+
+ last_event_id: Annotated[
+ OptionalNullable[str],
+ pydantic.Field(alias="last-event-id"),
+ FieldMetadata(header=HeaderMetadata(style="simple", explode=False)),
+ ] = UNSET
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ [
+ "scope",
+ "activity_name",
+ "activity_id",
+ "workflow_name",
+ "workflow_exec_id",
+ "root_workflow_exec_id",
+ "parent_workflow_exec_id",
+ "stream",
+ "start_seq",
+ "metadata_filters",
+ "workflow_event_types",
+ "last-event-id",
+ ]
+ )
+ nullable_fields = set(
+ ["metadata_filters", "workflow_event_types", "last-event-id"]
+ )
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+class GetStreamEventsV1WorkflowsEventsStreamGetResponseBodyTypedDict(TypedDict):
+ r"""Stream of Server-Sent Events (SSE)"""
+
+ event: NotRequired[str]
+ data: NotRequired[StreamEventSsePayloadTypedDict]
+ id: NotRequired[str]
+ retry: NotRequired[int]
+
+
+class GetStreamEventsV1WorkflowsEventsStreamGetResponseBody(BaseModel):
+ r"""Stream of Server-Sent Events (SSE)"""
+
+ event: Optional[str] = None
+
+ data: Optional[StreamEventSsePayload] = None
+
+ id: Optional[str] = None
+
+ retry: Optional[int] = None
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["event", "data", "id", "retry"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+
+ if val != UNSET_SENTINEL:
+ if val is not None or k not in optional_fields:
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/get_workflow_events_v1_workflows_events_list_getop.py b/src/mistralai/client/models/get_workflow_events_v1_workflows_events_list_getop.py
new file mode 100644
index 00000000..186c5548
--- /dev/null
+++ b/src/mistralai/client/models/get_workflow_events_v1_workflows_events_list_getop.py
@@ -0,0 +1,95 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 9d6d093835d6
+
+from __future__ import annotations
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from mistralai.client.utils import FieldMetadata, QueryParamMetadata
+from pydantic import model_serializer
+from typing import Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class GetWorkflowEventsV1WorkflowsEventsListGetRequestTypedDict(TypedDict):
+ root_workflow_exec_id: NotRequired[Nullable[str]]
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ workflow_exec_id: NotRequired[Nullable[str]]
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: NotRequired[Nullable[str]]
+ r"""Run ID of the workflow that emitted this event."""
+ limit: NotRequired[int]
+ r"""Maximum number of events to return."""
+ cursor: NotRequired[Nullable[str]]
+ r"""Cursor for pagination."""
+
+
+class GetWorkflowEventsV1WorkflowsEventsListGetRequest(BaseModel):
+ root_workflow_exec_id: Annotated[
+ OptionalNullable[str],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = UNSET
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ workflow_exec_id: Annotated[
+ OptionalNullable[str],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = UNSET
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: Annotated[
+ OptionalNullable[str],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = UNSET
+ r"""Run ID of the workflow that emitted this event."""
+
+ limit: Annotated[
+ Optional[int],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = 100
+ r"""Maximum number of events to return."""
+
+ cursor: Annotated[
+ OptionalNullable[str],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = UNSET
+ r"""Cursor for pagination."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ [
+ "root_workflow_exec_id",
+ "workflow_exec_id",
+ "workflow_run_id",
+ "limit",
+ "cursor",
+ ]
+ )
+ nullable_fields = set(
+ ["root_workflow_exec_id", "workflow_exec_id", "workflow_run_id", "cursor"]
+ )
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/get_workflow_execution_history_v1_workflows_executions_execution_id_history_getop.py b/src/mistralai/client/models/get_workflow_execution_history_v1_workflows_executions_execution_id_history_getop.py
new file mode 100644
index 00000000..66c6fb06
--- /dev/null
+++ b/src/mistralai/client/models/get_workflow_execution_history_v1_workflows_executions_execution_id_history_getop.py
@@ -0,0 +1,21 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 8d636c8cad1e
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+from mistralai.client.utils import FieldMetadata, PathParamMetadata
+from typing_extensions import Annotated, TypedDict
+
+
+class GetWorkflowExecutionHistoryV1WorkflowsExecutionsExecutionIDHistoryGetRequestTypedDict(
+ TypedDict
+):
+ execution_id: str
+
+
+class GetWorkflowExecutionHistoryV1WorkflowsExecutionsExecutionIDHistoryGetRequest(
+ BaseModel
+):
+ execution_id: Annotated[
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
+ ]
diff --git a/src/mistralai/client/models/get_workflow_execution_trace_eventsop.py b/src/mistralai/client/models/get_workflow_execution_trace_eventsop.py
new file mode 100644
index 00000000..a60fb536
--- /dev/null
+++ b/src/mistralai/client/models/get_workflow_execution_trace_eventsop.py
@@ -0,0 +1,47 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 75438195bf19
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel, UNSET_SENTINEL
+from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata
+from pydantic import model_serializer
+from typing import Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class GetWorkflowExecutionTraceEventsRequestTypedDict(TypedDict):
+ execution_id: str
+ merge_same_id_events: NotRequired[bool]
+ include_internal_events: NotRequired[bool]
+
+
+class GetWorkflowExecutionTraceEventsRequest(BaseModel):
+ execution_id: Annotated[
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
+ ]
+
+ merge_same_id_events: Annotated[
+ Optional[bool],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = False
+
+ include_internal_events: Annotated[
+ Optional[bool],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = False
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["merge_same_id_events", "include_internal_events"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+
+ if val != UNSET_SENTINEL:
+ if val is not None or k not in optional_fields:
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/get_workflow_execution_trace_otelop.py b/src/mistralai/client/models/get_workflow_execution_trace_otelop.py
new file mode 100644
index 00000000..0abdd75f
--- /dev/null
+++ b/src/mistralai/client/models/get_workflow_execution_trace_otelop.py
@@ -0,0 +1,17 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: f24457d6ea21
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+from mistralai.client.utils import FieldMetadata, PathParamMetadata
+from typing_extensions import Annotated, TypedDict
+
+
+class GetWorkflowExecutionTraceOtelRequestTypedDict(TypedDict):
+ execution_id: str
+
+
+class GetWorkflowExecutionTraceOtelRequest(BaseModel):
+ execution_id: Annotated[
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
+ ]
diff --git a/src/mistralai/client/models/get_workflow_execution_trace_summaryop.py b/src/mistralai/client/models/get_workflow_execution_trace_summaryop.py
new file mode 100644
index 00000000..54b080e0
--- /dev/null
+++ b/src/mistralai/client/models/get_workflow_execution_trace_summaryop.py
@@ -0,0 +1,17 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: e44e62e3c444
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+from mistralai.client.utils import FieldMetadata, PathParamMetadata
+from typing_extensions import Annotated, TypedDict
+
+
+class GetWorkflowExecutionTraceSummaryRequestTypedDict(TypedDict):
+ execution_id: str
+
+
+class GetWorkflowExecutionTraceSummaryRequest(BaseModel):
+ execution_id: Annotated[
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
+ ]
diff --git a/src/mistralai/client/models/get_workflow_execution_v1_workflows_executions_execution_id_getop.py b/src/mistralai/client/models/get_workflow_execution_v1_workflows_executions_execution_id_getop.py
new file mode 100644
index 00000000..c9926054
--- /dev/null
+++ b/src/mistralai/client/models/get_workflow_execution_v1_workflows_executions_execution_id_getop.py
@@ -0,0 +1,19 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 25bc5d3fec8d
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+from mistralai.client.utils import FieldMetadata, PathParamMetadata
+from typing_extensions import Annotated, TypedDict
+
+
+class GetWorkflowExecutionV1WorkflowsExecutionsExecutionIDGetRequestTypedDict(
+ TypedDict
+):
+ execution_id: str
+
+
+class GetWorkflowExecutionV1WorkflowsExecutionsExecutionIDGetRequest(BaseModel):
+ execution_id: Annotated[
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
+ ]
diff --git a/src/mistralai/client/models/get_workflow_metrics_v1_workflows_workflow_name_metrics_getop.py b/src/mistralai/client/models/get_workflow_metrics_v1_workflows_workflow_name_metrics_getop.py
new file mode 100644
index 00000000..1d9c8989
--- /dev/null
+++ b/src/mistralai/client/models/get_workflow_metrics_v1_workflows_workflow_name_metrics_getop.py
@@ -0,0 +1,66 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 449550c7f76a
+
+from __future__ import annotations
+from datetime import datetime
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata
+from pydantic import model_serializer
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class GetWorkflowMetricsV1WorkflowsWorkflowNameMetricsGetRequestTypedDict(TypedDict):
+ workflow_name: str
+ start_time: NotRequired[Nullable[datetime]]
+ r"""Filter workflows started after this time (ISO 8601)"""
+ end_time: NotRequired[Nullable[datetime]]
+ r"""Filter workflows started before this time (ISO 8601)"""
+
+
+class GetWorkflowMetricsV1WorkflowsWorkflowNameMetricsGetRequest(BaseModel):
+ workflow_name: Annotated[
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
+ ]
+
+ start_time: Annotated[
+ OptionalNullable[datetime],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = UNSET
+ r"""Filter workflows started after this time (ISO 8601)"""
+
+ end_time: Annotated[
+ OptionalNullable[datetime],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = UNSET
+ r"""Filter workflows started before this time (ISO 8601)"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["start_time", "end_time"])
+ nullable_fields = set(["start_time", "end_time"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/get_workflow_registration_v1_workflows_registrations_workflow_registration_id_getop.py b/src/mistralai/client/models/get_workflow_registration_v1_workflows_registrations_workflow_registration_id_getop.py
new file mode 100644
index 00000000..fe7d639d
--- /dev/null
+++ b/src/mistralai/client/models/get_workflow_registration_v1_workflows_registrations_workflow_registration_id_getop.py
@@ -0,0 +1,55 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: a6b7d0f559ef
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel, UNSET_SENTINEL
+from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata
+from pydantic import model_serializer
+from typing import Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class GetWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDGetRequestTypedDict(
+ TypedDict
+):
+ workflow_registration_id: str
+ with_workflow: NotRequired[bool]
+ r"""Whether to include the workflow definition"""
+ include_shared: NotRequired[bool]
+ r"""Whether to include shared workflow versions"""
+
+
+class GetWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDGetRequest(
+ BaseModel
+):
+ workflow_registration_id: Annotated[
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
+ ]
+
+ with_workflow: Annotated[
+ Optional[bool],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = False
+ r"""Whether to include the workflow definition"""
+
+ include_shared: Annotated[
+ Optional[bool],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = True
+ r"""Whether to include shared workflow versions"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["with_workflow", "include_shared"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+
+ if val != UNSET_SENTINEL:
+ if val is not None or k not in optional_fields:
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/get_workflow_registrations_v1_workflows_registrations_getop.py b/src/mistralai/client/models/get_workflow_registrations_v1_workflows_registrations_getop.py
new file mode 100644
index 00000000..478ee3fb
--- /dev/null
+++ b/src/mistralai/client/models/get_workflow_registrations_v1_workflows_registrations_getop.py
@@ -0,0 +1,147 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 822f256b2372
+
+from __future__ import annotations
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from mistralai.client.utils import FieldMetadata, QueryParamMetadata
+from pydantic import model_serializer
+from typing import Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class GetWorkflowRegistrationsV1WorkflowsRegistrationsGetRequestTypedDict(TypedDict):
+ workflow_id: NotRequired[Nullable[str]]
+ r"""The workflow ID to filter by"""
+ task_queue: NotRequired[Nullable[str]]
+ r"""The task queue to filter by"""
+ active_only: NotRequired[bool]
+ r"""Whether to only return active workflows versions"""
+ include_shared: NotRequired[bool]
+ r"""Whether to include shared workflow versions"""
+ workflow_search: NotRequired[Nullable[str]]
+ r"""The workflow name to filter by"""
+ archived: NotRequired[Nullable[bool]]
+ r"""Filter by archived state. False=exclude archived, True=only archived, None=include all"""
+ with_workflow: NotRequired[bool]
+ r"""Whether to include the workflow definition"""
+ available_in_chat_assistant: NotRequired[Nullable[bool]]
+ r"""Whether to only return workflows compatible with chat assistant"""
+ limit: NotRequired[int]
+ r"""The maximum number of workflows versions to return"""
+ cursor: NotRequired[Nullable[str]]
+ r"""The cursor for pagination"""
+
+
+class GetWorkflowRegistrationsV1WorkflowsRegistrationsGetRequest(BaseModel):
+ workflow_id: Annotated[
+ OptionalNullable[str],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = UNSET
+ r"""The workflow ID to filter by"""
+
+ task_queue: Annotated[
+ OptionalNullable[str],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = UNSET
+ r"""The task queue to filter by"""
+
+ active_only: Annotated[
+ Optional[bool],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = False
+ r"""Whether to only return active workflows versions"""
+
+ include_shared: Annotated[
+ Optional[bool],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = True
+ r"""Whether to include shared workflow versions"""
+
+ workflow_search: Annotated[
+ OptionalNullable[str],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = UNSET
+ r"""The workflow name to filter by"""
+
+ archived: Annotated[
+ OptionalNullable[bool],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = UNSET
+ r"""Filter by archived state. False=exclude archived, True=only archived, None=include all"""
+
+ with_workflow: Annotated[
+ Optional[bool],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = False
+ r"""Whether to include the workflow definition"""
+
+ available_in_chat_assistant: Annotated[
+ OptionalNullable[bool],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = UNSET
+ r"""Whether to only return workflows compatible with chat assistant"""
+
+ limit: Annotated[
+ Optional[int],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = 50
+ r"""The maximum number of workflows versions to return"""
+
+ cursor: Annotated[
+ OptionalNullable[str],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = UNSET
+ r"""The cursor for pagination"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ [
+ "workflow_id",
+ "task_queue",
+ "active_only",
+ "include_shared",
+ "workflow_search",
+ "archived",
+ "with_workflow",
+ "available_in_chat_assistant",
+ "limit",
+ "cursor",
+ ]
+ )
+ nullable_fields = set(
+ [
+ "workflow_id",
+ "task_queue",
+ "workflow_search",
+ "archived",
+ "available_in_chat_assistant",
+ "cursor",
+ ]
+ )
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/get_workflow_v1_workflows_workflow_identifier_getop.py b/src/mistralai/client/models/get_workflow_v1_workflows_workflow_identifier_getop.py
new file mode 100644
index 00000000..20d0b6dd
--- /dev/null
+++ b/src/mistralai/client/models/get_workflow_v1_workflows_workflow_identifier_getop.py
@@ -0,0 +1,17 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 097af37374fd
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+from mistralai.client.utils import FieldMetadata, PathParamMetadata
+from typing_extensions import Annotated, TypedDict
+
+
+class GetWorkflowV1WorkflowsWorkflowIdentifierGetRequestTypedDict(TypedDict):
+ workflow_identifier: str
+
+
+class GetWorkflowV1WorkflowsWorkflowIdentifierGetRequest(BaseModel):
+ workflow_identifier: Annotated[
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
+ ]
diff --git a/src/mistralai/client/models/get_workflows_v1_workflows_getop.py b/src/mistralai/client/models/get_workflows_v1_workflows_getop.py
new file mode 100644
index 00000000..6cbad5e4
--- /dev/null
+++ b/src/mistralai/client/models/get_workflows_v1_workflows_getop.py
@@ -0,0 +1,116 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: a128585aee76
+
+from __future__ import annotations
+from .workflowlistresponse import WorkflowListResponse, WorkflowListResponseTypedDict
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from mistralai.client.utils import FieldMetadata, QueryParamMetadata
+from pydantic import model_serializer
+from typing import Awaitable, Callable, Optional, Union
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class GetWorkflowsV1WorkflowsGetRequestTypedDict(TypedDict):
+ active_only: NotRequired[bool]
+ r"""Whether to only return active workflows"""
+ include_shared: NotRequired[bool]
+ r"""Whether to include shared workflows"""
+ available_in_chat_assistant: NotRequired[Nullable[bool]]
+ r"""Whether to only return workflows compatible with chat assistant"""
+ archived: NotRequired[Nullable[bool]]
+ r"""Filter by archived state. False=exclude archived, True=only archived, None=include all"""
+ cursor: NotRequired[Nullable[str]]
+ r"""The cursor for pagination"""
+ limit: NotRequired[int]
+ r"""The maximum number of workflows to return"""
+
+
+class GetWorkflowsV1WorkflowsGetRequest(BaseModel):
+ active_only: Annotated[
+ Optional[bool],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = False
+ r"""Whether to only return active workflows"""
+
+ include_shared: Annotated[
+ Optional[bool],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = True
+ r"""Whether to include shared workflows"""
+
+ available_in_chat_assistant: Annotated[
+ OptionalNullable[bool],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = UNSET
+ r"""Whether to only return workflows compatible with chat assistant"""
+
+ archived: Annotated[
+ OptionalNullable[bool],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = UNSET
+ r"""Filter by archived state. False=exclude archived, True=only archived, None=include all"""
+
+ cursor: Annotated[
+ OptionalNullable[str],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = UNSET
+ r"""The cursor for pagination"""
+
+ limit: Annotated[
+ Optional[int],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = 50
+ r"""The maximum number of workflows to return"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ [
+ "active_only",
+ "include_shared",
+ "available_in_chat_assistant",
+ "archived",
+ "cursor",
+ "limit",
+ ]
+ )
+ nullable_fields = set(["available_in_chat_assistant", "archived", "cursor"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+class GetWorkflowsV1WorkflowsGetResponseTypedDict(TypedDict):
+ result: WorkflowListResponseTypedDict
+
+
+class GetWorkflowsV1WorkflowsGetResponse(BaseModel):
+ next: Union[
+ Callable[[], Optional[GetWorkflowsV1WorkflowsGetResponse]],
+ Callable[[], Awaitable[Optional[GetWorkflowsV1WorkflowsGetResponse]]],
+ ]
+
+ result: WorkflowListResponse
diff --git a/src/mistralai/client/models/jobs_api_routes_batch_delete_batch_jobop.py b/src/mistralai/client/models/jobs_api_routes_batch_delete_batch_jobop.py
new file mode 100644
index 00000000..9fc4a7e7
--- /dev/null
+++ b/src/mistralai/client/models/jobs_api_routes_batch_delete_batch_jobop.py
@@ -0,0 +1,17 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 8c43af108342
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+from mistralai.client.utils import FieldMetadata, PathParamMetadata
+from typing_extensions import Annotated, TypedDict
+
+
+class JobsAPIRoutesBatchDeleteBatchJobRequestTypedDict(TypedDict):
+ job_id: str
+
+
+class JobsAPIRoutesBatchDeleteBatchJobRequest(BaseModel):
+ job_id: Annotated[
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
+ ]
diff --git a/src/mistralai/client/models/jsonpatchadd.py b/src/mistralai/client/models/jsonpatchadd.py
new file mode 100644
index 00000000..d4242f11
--- /dev/null
+++ b/src/mistralai/client/models/jsonpatchadd.py
@@ -0,0 +1,39 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: fb2a1e58a6a5
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic.functional_validators import AfterValidator
+from typing import Any, Literal
+from typing_extensions import Annotated, TypedDict
+
+
+class JSONPatchAddTypedDict(TypedDict):
+ path: str
+ r"""A JSON Pointer (RFC 6901) identifying the target location within the document. Can be a string path (e.g., '/foo/bar'), '/', '', or an empty list [] for root-level operations."""
+ value: Any
+ r"""The value to use for the operation"""
+ op: Literal["add"]
+ r"""Add operation"""
+
+
+class JSONPatchAdd(BaseModel):
+ path: str
+ r"""A JSON Pointer (RFC 6901) identifying the target location within the document. Can be a string path (e.g., '/foo/bar'), '/', '', or an empty list [] for root-level operations."""
+
+ value: Any
+ r"""The value to use for the operation"""
+
+ op: Annotated[
+ Annotated[Literal["add"], AfterValidator(validate_const("add"))],
+ pydantic.Field(alias="op"),
+ ] = "add"
+ r"""Add operation"""
+
+
+try:
+ JSONPatchAdd.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/jsonpatchappend.py b/src/mistralai/client/models/jsonpatchappend.py
new file mode 100644
index 00000000..7181ddac
--- /dev/null
+++ b/src/mistralai/client/models/jsonpatchappend.py
@@ -0,0 +1,39 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 61801f21f4b0
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic.functional_validators import AfterValidator
+from typing import Literal
+from typing_extensions import Annotated, TypedDict
+
+
+class JSONPatchAppendTypedDict(TypedDict):
+ path: str
+ r"""A JSON Pointer (RFC 6901) identifying the target location within the document. Can be a string path (e.g., '/foo/bar'), '/', '', or an empty list [] for root-level operations."""
+ value: str
+ r"""The value to use for the operation. A string to append to the existing value"""
+ op: Literal["append"]
+ r"""'append' is an extension for efficient string concatenation in streaming scenarios."""
+
+
+class JSONPatchAppend(BaseModel):
+ path: str
+ r"""A JSON Pointer (RFC 6901) identifying the target location within the document. Can be a string path (e.g., '/foo/bar'), '/', '', or an empty list [] for root-level operations."""
+
+ value: str
+ r"""The value to use for the operation. A string to append to the existing value"""
+
+ op: Annotated[
+ Annotated[Literal["append"], AfterValidator(validate_const("append"))],
+ pydantic.Field(alias="op"),
+ ] = "append"
+ r"""'append' is an extension for efficient string concatenation in streaming scenarios."""
+
+
+try:
+ JSONPatchAppend.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/jsonpatchpayloadrequest.py b/src/mistralai/client/models/jsonpatchpayloadrequest.py
new file mode 100644
index 00000000..96a6d689
--- /dev/null
+++ b/src/mistralai/client/models/jsonpatchpayloadrequest.py
@@ -0,0 +1,66 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 3f10ecfda228
+
+from __future__ import annotations
+from .jsonpatchadd import JSONPatchAdd, JSONPatchAddTypedDict
+from .jsonpatchappend import JSONPatchAppend, JSONPatchAppendTypedDict
+from .jsonpatchremove import JSONPatchRemove, JSONPatchRemoveTypedDict
+from .jsonpatchreplace import JSONPatchReplace, JSONPatchReplaceTypedDict
+from mistralai.client.types import BaseModel
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import Field
+from pydantic.functional_validators import AfterValidator
+from typing import List, Literal, Union
+from typing_extensions import Annotated, TypeAliasType, TypedDict
+
+
+JSONPatchPayloadRequestValueTypedDict = TypeAliasType(
+ "JSONPatchPayloadRequestValueTypedDict",
+ Union[
+ JSONPatchAppendTypedDict,
+ JSONPatchAddTypedDict,
+ JSONPatchReplaceTypedDict,
+ JSONPatchRemoveTypedDict,
+ ],
+)
+
+
+JSONPatchPayloadRequestValue = Annotated[
+ Union[JSONPatchAdd, JSONPatchAppend, JSONPatchRemove, JSONPatchReplace],
+ Field(discriminator="op"),
+]
+
+
+class JSONPatchPayloadRequestTypedDict(TypedDict):
+ r"""A payload containing a list of JSON Patch operations.
+
+ Used for streaming incremental updates to workflow state.
+ """
+
+ value: List[JSONPatchPayloadRequestValueTypedDict]
+ r"""The list of JSON Patch operations to apply in order."""
+ type: Literal["json_patch"]
+ r"""Discriminator indicating this is a JSON Patch payload."""
+
+
+class JSONPatchPayloadRequest(BaseModel):
+ r"""A payload containing a list of JSON Patch operations.
+
+ Used for streaming incremental updates to workflow state.
+ """
+
+ value: List[JSONPatchPayloadRequestValue]
+ r"""The list of JSON Patch operations to apply in order."""
+
+ type: Annotated[
+ Annotated[Literal["json_patch"], AfterValidator(validate_const("json_patch"))],
+ pydantic.Field(alias="type"),
+ ] = "json_patch"
+ r"""Discriminator indicating this is a JSON Patch payload."""
+
+
+try:
+ JSONPatchPayloadRequest.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/jsonpatchpayloadresponse.py b/src/mistralai/client/models/jsonpatchpayloadresponse.py
new file mode 100644
index 00000000..345e6d14
--- /dev/null
+++ b/src/mistralai/client/models/jsonpatchpayloadresponse.py
@@ -0,0 +1,100 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 1b39f46f529f
+
+from __future__ import annotations
+from .jsonpatchadd import JSONPatchAdd, JSONPatchAddTypedDict
+from .jsonpatchappend import JSONPatchAppend, JSONPatchAppendTypedDict
+from .jsonpatchremove import JSONPatchRemove, JSONPatchRemoveTypedDict
+from .jsonpatchreplace import JSONPatchReplace, JSONPatchReplaceTypedDict
+from functools import partial
+from mistralai.client.types import BaseModel
+from mistralai.client.utils import validate_const
+from mistralai.client.utils.unions import parse_open_union
+import pydantic
+from pydantic import ConfigDict
+from pydantic.functional_validators import AfterValidator, BeforeValidator
+from typing import Any, List, Literal, Union
+from typing_extensions import Annotated, TypeAliasType, TypedDict
+
+
+JSONPatchPayloadResponseValueTypedDict = TypeAliasType(
+ "JSONPatchPayloadResponseValueTypedDict",
+ Union[
+ JSONPatchAppendTypedDict,
+ JSONPatchAddTypedDict,
+ JSONPatchReplaceTypedDict,
+ JSONPatchRemoveTypedDict,
+ ],
+)
+
+
+class UnknownJSONPatchPayloadResponseValue(BaseModel):
+ r"""A JSONPatchPayloadResponseValue variant the SDK doesn't recognize. Preserves the raw payload."""
+
+ op: Literal["UNKNOWN"] = "UNKNOWN"
+ raw: Any
+ is_unknown: Literal[True] = True
+
+ model_config = ConfigDict(frozen=True)
+
+
+_JSON_PATCH_PAYLOAD_RESPONSE_VALUE_VARIANTS: dict[str, Any] = {
+ "add": JSONPatchAdd,
+ "append": JSONPatchAppend,
+ "remove": JSONPatchRemove,
+ "replace": JSONPatchReplace,
+}
+
+
+JSONPatchPayloadResponseValue = Annotated[
+ Union[
+ JSONPatchAdd,
+ JSONPatchAppend,
+ JSONPatchRemove,
+ JSONPatchReplace,
+ UnknownJSONPatchPayloadResponseValue,
+ ],
+ BeforeValidator(
+ partial(
+ parse_open_union,
+ disc_key="op",
+ variants=_JSON_PATCH_PAYLOAD_RESPONSE_VALUE_VARIANTS,
+ unknown_cls=UnknownJSONPatchPayloadResponseValue,
+ union_name="JSONPatchPayloadResponseValue",
+ )
+ ),
+]
+
+
+class JSONPatchPayloadResponseTypedDict(TypedDict):
+ r"""A payload containing a list of JSON Patch operations.
+
+ Used for streaming incremental updates to workflow state.
+ """
+
+ value: List[JSONPatchPayloadResponseValueTypedDict]
+ r"""The list of JSON Patch operations to apply in order."""
+ type: Literal["json_patch"]
+ r"""Discriminator indicating this is a JSON Patch payload."""
+
+
+class JSONPatchPayloadResponse(BaseModel):
+ r"""A payload containing a list of JSON Patch operations.
+
+ Used for streaming incremental updates to workflow state.
+ """
+
+ value: List[JSONPatchPayloadResponseValue]
+ r"""The list of JSON Patch operations to apply in order."""
+
+ type: Annotated[
+ Annotated[Literal["json_patch"], AfterValidator(validate_const("json_patch"))],
+ pydantic.Field(alias="type"),
+ ] = "json_patch"
+ r"""Discriminator indicating this is a JSON Patch payload."""
+
+
+try:
+ JSONPatchPayloadResponse.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/jsonpatchremove.py b/src/mistralai/client/models/jsonpatchremove.py
new file mode 100644
index 00000000..5bcedf87
--- /dev/null
+++ b/src/mistralai/client/models/jsonpatchremove.py
@@ -0,0 +1,39 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: e472e5b752ec
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic.functional_validators import AfterValidator
+from typing import Any, Literal
+from typing_extensions import Annotated, TypedDict
+
+
+class JSONPatchRemoveTypedDict(TypedDict):
+ path: str
+ r"""A JSON Pointer (RFC 6901) identifying the target location within the document. Can be a string path (e.g., '/foo/bar'), '/', '', or an empty list [] for root-level operations."""
+ value: Any
+ r"""The value to use for the operation"""
+ op: Literal["remove"]
+ r"""Remove operation"""
+
+
+class JSONPatchRemove(BaseModel):
+ path: str
+ r"""A JSON Pointer (RFC 6901) identifying the target location within the document. Can be a string path (e.g., '/foo/bar'), '/', '', or an empty list [] for root-level operations."""
+
+ value: Any
+ r"""The value to use for the operation"""
+
+ op: Annotated[
+ Annotated[Literal["remove"], AfterValidator(validate_const("remove"))],
+ pydantic.Field(alias="op"),
+ ] = "remove"
+ r"""Remove operation"""
+
+
+try:
+ JSONPatchRemove.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/jsonpatchreplace.py b/src/mistralai/client/models/jsonpatchreplace.py
new file mode 100644
index 00000000..3dd7aee6
--- /dev/null
+++ b/src/mistralai/client/models/jsonpatchreplace.py
@@ -0,0 +1,39 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 816f9df2f3c9
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic.functional_validators import AfterValidator
+from typing import Any, Literal
+from typing_extensions import Annotated, TypedDict
+
+
+class JSONPatchReplaceTypedDict(TypedDict):
+ path: str
+ r"""A JSON Pointer (RFC 6901) identifying the target location within the document. Can be a string path (e.g., '/foo/bar'), '/', '', or an empty list [] for root-level operations."""
+ value: Any
+ r"""The value to use for the operation"""
+ op: Literal["replace"]
+ r"""Replace operation"""
+
+
+class JSONPatchReplace(BaseModel):
+ path: str
+ r"""A JSON Pointer (RFC 6901) identifying the target location within the document. Can be a string path (e.g., '/foo/bar'), '/', '', or an empty list [] for root-level operations."""
+
+ value: Any
+ r"""The value to use for the operation"""
+
+ op: Annotated[
+ Annotated[Literal["replace"], AfterValidator(validate_const("replace"))],
+ pydantic.Field(alias="op"),
+ ] = "replace"
+ r"""Replace operation"""
+
+
+try:
+ JSONPatchReplace.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/jsonpayloadrequest.py b/src/mistralai/client/models/jsonpayloadrequest.py
new file mode 100644
index 00000000..252b8dac
--- /dev/null
+++ b/src/mistralai/client/models/jsonpayloadrequest.py
@@ -0,0 +1,61 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 0d49a02162ea
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel, UNSET_SENTINEL
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Any, Literal, Optional
+from typing_extensions import Annotated, TypedDict
+
+
+class JSONPayloadRequestTypedDict(TypedDict):
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
+
+ value: Any
+ r"""The JSON-serializable payload value."""
+ type: Literal["json"]
+ r"""Discriminator indicating this is a raw JSON payload."""
+
+
+class JSONPayloadRequest(BaseModel):
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
+
+ value: Any
+ r"""The JSON-serializable payload value."""
+
+ type: Annotated[
+ Annotated[Optional[Literal["json"]], AfterValidator(validate_const("json"))],
+ pydantic.Field(alias="type"),
+ ] = "json"
+ r"""Discriminator indicating this is a raw JSON payload."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["type"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+
+ if val != UNSET_SENTINEL:
+ if val is not None or k not in optional_fields:
+ m[k] = val
+
+ return m
+
+
+try:
+ JSONPayloadRequest.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/jsonpayloadresponse.py b/src/mistralai/client/models/jsonpayloadresponse.py
new file mode 100644
index 00000000..038ea329
--- /dev/null
+++ b/src/mistralai/client/models/jsonpayloadresponse.py
@@ -0,0 +1,61 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: ba3265f85453
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel, UNSET_SENTINEL
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Any, Literal, Optional
+from typing_extensions import Annotated, TypedDict
+
+
+class JSONPayloadResponseTypedDict(TypedDict):
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
+
+ value: Any
+ r"""The JSON-serializable payload value."""
+ type: Literal["json"]
+ r"""Discriminator indicating this is a raw JSON payload."""
+
+
+class JSONPayloadResponse(BaseModel):
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
+
+ value: Any
+ r"""The JSON-serializable payload value."""
+
+ type: Annotated[
+ Annotated[Optional[Literal["json"]], AfterValidator(validate_const("json"))],
+ pydantic.Field(alias="type"),
+ ] = "json"
+ r"""Discriminator indicating this is a raw JSON payload."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["type"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+
+ if val != UNSET_SENTINEL:
+ if val is not None or k not in optional_fields:
+ m[k] = val
+
+ return m
+
+
+try:
+ JSONPayloadResponse.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/libraries_list_v1op.py b/src/mistralai/client/models/libraries_list_v1op.py
new file mode 100644
index 00000000..088a41b2
--- /dev/null
+++ b/src/mistralai/client/models/libraries_list_v1op.py
@@ -0,0 +1,42 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 2d9b1b4deeb0
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel, UNSET_SENTINEL
+from mistralai.client.utils import FieldMetadata, QueryParamMetadata
+from pydantic import model_serializer
+from typing import Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class LibrariesListV1RequestTypedDict(TypedDict):
+ page_size: NotRequired[int]
+ page: NotRequired[int]
+
+
+class LibrariesListV1Request(BaseModel):
+ page_size: Annotated[
+ Optional[int],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = 100
+
+ page: Annotated[
+ Optional[int],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = 0
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["page_size", "page"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+
+ if val != UNSET_SENTINEL:
+ if val is not None or k not in optional_fields:
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/list_deployments_v1_workflows_deployments_getop.py b/src/mistralai/client/models/list_deployments_v1_workflows_deployments_getop.py
new file mode 100644
index 00000000..066b8db1
--- /dev/null
+++ b/src/mistralai/client/models/list_deployments_v1_workflows_deployments_getop.py
@@ -0,0 +1,57 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 0c6586ffcab0
+
+from __future__ import annotations
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from mistralai.client.utils import FieldMetadata, QueryParamMetadata
+from pydantic import model_serializer
+from typing import Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class ListDeploymentsV1WorkflowsDeploymentsGetRequestTypedDict(TypedDict):
+ active_only: NotRequired[bool]
+ workflow_name: NotRequired[Nullable[str]]
+
+
+class ListDeploymentsV1WorkflowsDeploymentsGetRequest(BaseModel):
+ active_only: Annotated[
+ Optional[bool],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = True
+
+ workflow_name: Annotated[
+ OptionalNullable[str],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = UNSET
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["active_only", "workflow_name"])
+ nullable_fields = set(["workflow_name"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/list_runs_v1_workflows_runs_getop.py b/src/mistralai/client/models/list_runs_v1_workflows_runs_getop.py
new file mode 100644
index 00000000..e61140cf
--- /dev/null
+++ b/src/mistralai/client/models/list_runs_v1_workflows_runs_getop.py
@@ -0,0 +1,121 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 2f1b225158c3
+
+from __future__ import annotations
+from .workflowexecutionlistresponse import (
+ WorkflowExecutionListResponse,
+ WorkflowExecutionListResponseTypedDict,
+)
+from .workflowexecutionstatus import WorkflowExecutionStatus
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from mistralai.client.utils import FieldMetadata, QueryParamMetadata
+from pydantic import model_serializer
+from typing import Awaitable, Callable, List, Optional, Union
+from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
+
+
+ListRunsV1WorkflowsRunsGetStatusTypedDict = TypeAliasType(
+ "ListRunsV1WorkflowsRunsGetStatusTypedDict",
+ Union[WorkflowExecutionStatus, List[WorkflowExecutionStatus]],
+)
+r"""Filter by workflow status"""
+
+
+ListRunsV1WorkflowsRunsGetStatus = TypeAliasType(
+ "ListRunsV1WorkflowsRunsGetStatus",
+ Union[WorkflowExecutionStatus, List[WorkflowExecutionStatus]],
+)
+r"""Filter by workflow status"""
+
+
+class ListRunsV1WorkflowsRunsGetRequestTypedDict(TypedDict):
+ workflow_identifier: NotRequired[Nullable[str]]
+ r"""Filter by workflow name or id"""
+ search: NotRequired[Nullable[str]]
+ r"""Search by workflow name, display name or id"""
+ status: NotRequired[Nullable[ListRunsV1WorkflowsRunsGetStatusTypedDict]]
+ r"""Filter by workflow status"""
+ page_size: NotRequired[int]
+ r"""Number of items per page"""
+ next_page_token: NotRequired[Nullable[str]]
+ r"""Token for the next page of results"""
+
+
+class ListRunsV1WorkflowsRunsGetRequest(BaseModel):
+ workflow_identifier: Annotated[
+ OptionalNullable[str],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = UNSET
+ r"""Filter by workflow name or id"""
+
+ search: Annotated[
+ OptionalNullable[str],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = UNSET
+ r"""Search by workflow name, display name or id"""
+
+ status: Annotated[
+ OptionalNullable[ListRunsV1WorkflowsRunsGetStatus],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = UNSET
+ r"""Filter by workflow status"""
+
+ page_size: Annotated[
+ Optional[int],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = 50
+ r"""Number of items per page"""
+
+ next_page_token: Annotated[
+ OptionalNullable[str],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = UNSET
+ r"""Token for the next page of results"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ ["workflow_identifier", "search", "status", "page_size", "next_page_token"]
+ )
+ nullable_fields = set(
+ ["workflow_identifier", "search", "status", "next_page_token"]
+ )
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+class ListRunsV1WorkflowsRunsGetResponseTypedDict(TypedDict):
+ result: WorkflowExecutionListResponseTypedDict
+
+
+class ListRunsV1WorkflowsRunsGetResponse(BaseModel):
+ next: Union[
+ Callable[[], Optional[ListRunsV1WorkflowsRunsGetResponse]],
+ Callable[[], Awaitable[Optional[ListRunsV1WorkflowsRunsGetResponse]]],
+ ]
+
+ result: WorkflowExecutionListResponse
diff --git a/src/mistralai/client/models/list_voices_v1_audio_voices_getop.py b/src/mistralai/client/models/list_voices_v1_audio_voices_getop.py
index 16ae81c7..30161b33 100644
--- a/src/mistralai/client/models/list_voices_v1_audio_voices_getop.py
+++ b/src/mistralai/client/models/list_voices_v1_audio_voices_getop.py
@@ -5,15 +5,25 @@
from mistralai.client.types import BaseModel, UNSET_SENTINEL
from mistralai.client.utils import FieldMetadata, QueryParamMetadata
from pydantic import model_serializer
-from typing import Optional
+from typing import Literal, Optional
from typing_extensions import Annotated, NotRequired, TypedDict
+ListVoicesV1AudioVoicesGetType = Literal[
+ "all",
+ "custom",
+ "preset",
+]
+r"""Filter the voices between customs and presets"""
+
+
class ListVoicesV1AudioVoicesGetRequestTypedDict(TypedDict):
limit: NotRequired[int]
r"""Maximum number of voices to return"""
offset: NotRequired[int]
r"""Offset for pagination"""
+ type: NotRequired[ListVoicesV1AudioVoicesGetType]
+ r"""Filter the voices between customs and presets"""
class ListVoicesV1AudioVoicesGetRequest(BaseModel):
@@ -29,9 +39,15 @@ class ListVoicesV1AudioVoicesGetRequest(BaseModel):
] = 0
r"""Offset for pagination"""
+ type: Annotated[
+ Optional[ListVoicesV1AudioVoicesGetType],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = "all"
+ r"""Filter the voices between customs and presets"""
+
@model_serializer(mode="wrap")
def serialize_model(self, handler):
- optional_fields = set(["limit", "offset"])
+ optional_fields = set(["limit", "offset", "type"])
serialized = handler(self)
m = {}
diff --git a/src/mistralai/client/models/listlibrariesresponse.py b/src/mistralai/client/models/listlibrariesresponse.py
index 337fe105..be4125f2 100644
--- a/src/mistralai/client/models/listlibrariesresponse.py
+++ b/src/mistralai/client/models/listlibrariesresponse.py
@@ -3,14 +3,18 @@
from __future__ import annotations
from .library import Library, LibraryTypedDict
+from .paginationinfo import PaginationInfo, PaginationInfoTypedDict
from mistralai.client.types import BaseModel
from typing import List
from typing_extensions import TypedDict
class ListLibrariesResponseTypedDict(TypedDict):
+ pagination: PaginationInfoTypedDict
data: List[LibraryTypedDict]
class ListLibrariesResponse(BaseModel):
+ pagination: PaginationInfo
+
data: List[Library]
diff --git a/src/mistralai/client/models/listworkfloweventresponse.py b/src/mistralai/client/models/listworkfloweventresponse.py
new file mode 100644
index 00000000..be99ccea
--- /dev/null
+++ b/src/mistralai/client/models/listworkfloweventresponse.py
@@ -0,0 +1,170 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 20a423148117
+
+from __future__ import annotations
+from .activitytaskcompletedresponse import (
+ ActivityTaskCompletedResponse,
+ ActivityTaskCompletedResponseTypedDict,
+)
+from .activitytaskfailedresponse import (
+ ActivityTaskFailedResponse,
+ ActivityTaskFailedResponseTypedDict,
+)
+from .activitytaskretryingresponse import (
+ ActivityTaskRetryingResponse,
+ ActivityTaskRetryingResponseTypedDict,
+)
+from .activitytaskstartedresponse import (
+ ActivityTaskStartedResponse,
+ ActivityTaskStartedResponseTypedDict,
+)
+from .customtaskcanceledresponse import (
+ CustomTaskCanceledResponse,
+ CustomTaskCanceledResponseTypedDict,
+)
+from .customtaskcompletedresponse import (
+ CustomTaskCompletedResponse,
+ CustomTaskCompletedResponseTypedDict,
+)
+from .customtaskfailedresponse import (
+ CustomTaskFailedResponse,
+ CustomTaskFailedResponseTypedDict,
+)
+from .customtaskinprogressresponse import (
+ CustomTaskInProgressResponse,
+ CustomTaskInProgressResponseTypedDict,
+)
+from .customtaskstartedresponse import (
+ CustomTaskStartedResponse,
+ CustomTaskStartedResponseTypedDict,
+)
+from .customtasktimedoutresponse import (
+ CustomTaskTimedOutResponse,
+ CustomTaskTimedOutResponseTypedDict,
+)
+from .workflowexecutioncanceledresponse import (
+ WorkflowExecutionCanceledResponse,
+ WorkflowExecutionCanceledResponseTypedDict,
+)
+from .workflowexecutioncompletedresponse import (
+ WorkflowExecutionCompletedResponse,
+ WorkflowExecutionCompletedResponseTypedDict,
+)
+from .workflowexecutioncontinuedasnewresponse import (
+ WorkflowExecutionContinuedAsNewResponse,
+ WorkflowExecutionContinuedAsNewResponseTypedDict,
+)
+from .workflowexecutionfailedresponse import (
+ WorkflowExecutionFailedResponse,
+ WorkflowExecutionFailedResponseTypedDict,
+)
+from .workflowexecutionstartedresponse import (
+ WorkflowExecutionStartedResponse,
+ WorkflowExecutionStartedResponseTypedDict,
+)
+from .workflowtaskfailedresponse import (
+ WorkflowTaskFailedResponse,
+ WorkflowTaskFailedResponseTypedDict,
+)
+from .workflowtasktimedoutresponse import (
+ WorkflowTaskTimedOutResponse,
+ WorkflowTaskTimedOutResponseTypedDict,
+)
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing import List, Union
+from typing_extensions import NotRequired, TypeAliasType, TypedDict
+
+
+ListWorkflowEventResponseEventTypedDict = TypeAliasType(
+ "ListWorkflowEventResponseEventTypedDict",
+ Union[
+ WorkflowExecutionStartedResponseTypedDict,
+ WorkflowExecutionCompletedResponseTypedDict,
+ WorkflowExecutionFailedResponseTypedDict,
+ WorkflowExecutionCanceledResponseTypedDict,
+ WorkflowExecutionContinuedAsNewResponseTypedDict,
+ WorkflowTaskTimedOutResponseTypedDict,
+ WorkflowTaskFailedResponseTypedDict,
+ CustomTaskStartedResponseTypedDict,
+ CustomTaskInProgressResponseTypedDict,
+ CustomTaskCompletedResponseTypedDict,
+ CustomTaskFailedResponseTypedDict,
+ CustomTaskTimedOutResponseTypedDict,
+ CustomTaskCanceledResponseTypedDict,
+ ActivityTaskStartedResponseTypedDict,
+ ActivityTaskCompletedResponseTypedDict,
+ ActivityTaskRetryingResponseTypedDict,
+ ActivityTaskFailedResponseTypedDict,
+ ],
+)
+
+
+ListWorkflowEventResponseEvent = TypeAliasType(
+ "ListWorkflowEventResponseEvent",
+ Union[
+ WorkflowExecutionStartedResponse,
+ WorkflowExecutionCompletedResponse,
+ WorkflowExecutionFailedResponse,
+ WorkflowExecutionCanceledResponse,
+ WorkflowExecutionContinuedAsNewResponse,
+ WorkflowTaskTimedOutResponse,
+ WorkflowTaskFailedResponse,
+ CustomTaskStartedResponse,
+ CustomTaskInProgressResponse,
+ CustomTaskCompletedResponse,
+ CustomTaskFailedResponse,
+ CustomTaskTimedOutResponse,
+ CustomTaskCanceledResponse,
+ ActivityTaskStartedResponse,
+ ActivityTaskCompletedResponse,
+ ActivityTaskRetryingResponse,
+ ActivityTaskFailedResponse,
+ ],
+)
+
+
+class ListWorkflowEventResponseTypedDict(TypedDict):
+ events: List[ListWorkflowEventResponseEventTypedDict]
+ r"""List of workflow events."""
+ next_cursor: NotRequired[Nullable[str]]
+ r"""Cursor for pagination."""
+
+
+class ListWorkflowEventResponse(BaseModel):
+ events: List[ListWorkflowEventResponseEvent]
+ r"""List of workflow events."""
+
+ next_cursor: OptionalNullable[str] = UNSET
+ r"""Cursor for pagination."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["next_cursor"])
+ nullable_fields = set(["next_cursor"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/networkencodedinput.py b/src/mistralai/client/models/networkencodedinput.py
new file mode 100644
index 00000000..70d92414
--- /dev/null
+++ b/src/mistralai/client/models/networkencodedinput.py
@@ -0,0 +1,45 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 6dc5321dbe77
+
+from __future__ import annotations
+from .encodedpayloadoptions import EncodedPayloadOptions
+from mistralai.client.types import BaseModel, UNSET_SENTINEL
+from pydantic import model_serializer
+from typing import List, Optional
+from typing_extensions import NotRequired, TypedDict
+
+
+class NetworkEncodedInputTypedDict(TypedDict):
+ b64payload: str
+ r"""The encoded payload"""
+ encoding_options: NotRequired[List[EncodedPayloadOptions]]
+ r"""The encoding of the payload"""
+ empty: NotRequired[bool]
+ r"""Whether the payload is empty"""
+
+
+class NetworkEncodedInput(BaseModel):
+ b64payload: str
+ r"""The encoded payload"""
+
+ encoding_options: Optional[List[EncodedPayloadOptions]] = None
+ r"""The encoding of the payload"""
+
+ empty: Optional[bool] = False
+ r"""Whether the payload is empty"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["encoding_options", "empty"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+
+ if val != UNSET_SENTINEL:
+ if val is not None or k not in optional_fields:
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/query_workflow_execution_v1_workflows_executions_execution_id_queries_postop.py b/src/mistralai/client/models/query_workflow_execution_v1_workflows_executions_execution_id_queries_postop.py
new file mode 100644
index 00000000..5598c4b6
--- /dev/null
+++ b/src/mistralai/client/models/query_workflow_execution_v1_workflows_executions_execution_id_queries_postop.py
@@ -0,0 +1,28 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 73826dbd3f5b
+
+from __future__ import annotations
+from .queryinvocationbody import QueryInvocationBody, QueryInvocationBodyTypedDict
+from mistralai.client.types import BaseModel
+from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata
+from typing_extensions import Annotated, TypedDict
+
+
+class QueryWorkflowExecutionV1WorkflowsExecutionsExecutionIDQueriesPostRequestTypedDict(
+ TypedDict
+):
+ execution_id: str
+ query_invocation_body: QueryInvocationBodyTypedDict
+
+
+class QueryWorkflowExecutionV1WorkflowsExecutionsExecutionIDQueriesPostRequest(
+ BaseModel
+):
+ execution_id: Annotated[
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
+ ]
+
+ query_invocation_body: Annotated[
+ QueryInvocationBody,
+ FieldMetadata(request=RequestMetadata(media_type="application/json")),
+ ]
diff --git a/src/mistralai/client/models/querydefinition.py b/src/mistralai/client/models/querydefinition.py
new file mode 100644
index 00000000..bc9b4f20
--- /dev/null
+++ b/src/mistralai/client/models/querydefinition.py
@@ -0,0 +1,64 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 9648273c8b7d
+
+from __future__ import annotations
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing import Any, Dict
+from typing_extensions import NotRequired, TypedDict
+
+
+class QueryDefinitionTypedDict(TypedDict):
+ name: str
+ r"""Name of the query"""
+ input_schema: Dict[str, Any]
+ r"""Input JSON schema of the query's model"""
+ description: NotRequired[Nullable[str]]
+ r"""Description of the query"""
+ output_schema: NotRequired[Nullable[Dict[str, Any]]]
+ r"""Output JSON schema of the query's model"""
+
+
+class QueryDefinition(BaseModel):
+ name: str
+ r"""Name of the query"""
+
+ input_schema: Dict[str, Any]
+ r"""Input JSON schema of the query's model"""
+
+ description: OptionalNullable[str] = UNSET
+ r"""Description of the query"""
+
+ output_schema: OptionalNullable[Dict[str, Any]] = UNSET
+ r"""Output JSON schema of the query's model"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["description", "output_schema"])
+ nullable_fields = set(["description", "output_schema"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/queryinvocationbody.py b/src/mistralai/client/models/queryinvocationbody.py
new file mode 100644
index 00000000..550e1d7b
--- /dev/null
+++ b/src/mistralai/client/models/queryinvocationbody.py
@@ -0,0 +1,68 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: d78662a1a9bf
+
+from __future__ import annotations
+from .networkencodedinput import NetworkEncodedInput, NetworkEncodedInputTypedDict
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing import Any, Dict, Union
+from typing_extensions import NotRequired, TypeAliasType, TypedDict
+
+
+QueryInvocationBodyInputTypedDict = TypeAliasType(
+ "QueryInvocationBodyInputTypedDict",
+ Union[NetworkEncodedInputTypedDict, Dict[str, Any]],
+)
+r"""Input data for the query, matching its schema"""
+
+
+QueryInvocationBodyInput = TypeAliasType(
+ "QueryInvocationBodyInput", Union[NetworkEncodedInput, Dict[str, Any]]
+)
+r"""Input data for the query, matching its schema"""
+
+
+class QueryInvocationBodyTypedDict(TypedDict):
+ name: str
+ r"""The name of the query to request"""
+ input: NotRequired[Nullable[QueryInvocationBodyInputTypedDict]]
+ r"""Input data for the query, matching its schema"""
+
+
+class QueryInvocationBody(BaseModel):
+ name: str
+ r"""The name of the query to request"""
+
+ input: OptionalNullable[QueryInvocationBodyInput] = UNSET
+ r"""Input data for the query, matching its schema"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["input"])
+ nullable_fields = set(["input"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/queryworkflowresponse.py b/src/mistralai/client/models/queryworkflowresponse.py
new file mode 100644
index 00000000..aed982d5
--- /dev/null
+++ b/src/mistralai/client/models/queryworkflowresponse.py
@@ -0,0 +1,20 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 30281fcedce6
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+from typing import Any
+from typing_extensions import TypedDict
+
+
+class QueryWorkflowResponseTypedDict(TypedDict):
+ query_name: str
+ result: Any
+ r"""The result of the Query workflow call"""
+
+
+class QueryWorkflowResponse(BaseModel):
+ query_name: str
+
+ result: Any
+ r"""The result of the Query workflow call"""
diff --git a/src/mistralai/client/models/reset_workflow_v1_workflows_executions_execution_id_reset_postop.py b/src/mistralai/client/models/reset_workflow_v1_workflows_executions_execution_id_reset_postop.py
new file mode 100644
index 00000000..eda20e53
--- /dev/null
+++ b/src/mistralai/client/models/reset_workflow_v1_workflows_executions_execution_id_reset_postop.py
@@ -0,0 +1,24 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 3e9d229cd8ba
+
+from __future__ import annotations
+from .resetinvocationbody import ResetInvocationBody, ResetInvocationBodyTypedDict
+from mistralai.client.types import BaseModel
+from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata
+from typing_extensions import Annotated, TypedDict
+
+
+class ResetWorkflowV1WorkflowsExecutionsExecutionIDResetPostRequestTypedDict(TypedDict):
+ execution_id: str
+ reset_invocation_body: ResetInvocationBodyTypedDict
+
+
+class ResetWorkflowV1WorkflowsExecutionsExecutionIDResetPostRequest(BaseModel):
+ execution_id: Annotated[
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
+ ]
+
+ reset_invocation_body: Annotated[
+ ResetInvocationBody,
+ FieldMetadata(request=RequestMetadata(media_type="application/json")),
+ ]
diff --git a/src/mistralai/client/models/resetinvocationbody.py b/src/mistralai/client/models/resetinvocationbody.py
new file mode 100644
index 00000000..26c3389f
--- /dev/null
+++ b/src/mistralai/client/models/resetinvocationbody.py
@@ -0,0 +1,64 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 3c0a985a5a77
+
+from __future__ import annotations
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing import Optional
+from typing_extensions import NotRequired, TypedDict
+
+
+class ResetInvocationBodyTypedDict(TypedDict):
+ event_id: int
+ r"""The event ID to reset the workflow execution to"""
+ reason: NotRequired[Nullable[str]]
+ r"""Reason for resetting the workflow execution"""
+ exclude_signals: NotRequired[bool]
+ r"""Whether to exclude signals that happened after the reset point"""
+ exclude_updates: NotRequired[bool]
+ r"""Whether to exclude updates that happened after the reset point"""
+
+
+class ResetInvocationBody(BaseModel):
+ event_id: int
+ r"""The event ID to reset the workflow execution to"""
+
+ reason: OptionalNullable[str] = UNSET
+ r"""Reason for resetting the workflow execution"""
+
+ exclude_signals: Optional[bool] = False
+ r"""Whether to exclude signals that happened after the reset point"""
+
+ exclude_updates: Optional[bool] = False
+ r"""Whether to exclude updates that happened after the reset point"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["reason", "exclude_signals", "exclude_updates"])
+ nullable_fields = set(["reason"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/scalarmetric.py b/src/mistralai/client/models/scalarmetric.py
new file mode 100644
index 00000000..b9c70a77
--- /dev/null
+++ b/src/mistralai/client/models/scalarmetric.py
@@ -0,0 +1,27 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: ae8eb1017da6
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+from typing import Union
+from typing_extensions import TypeAliasType, TypedDict
+
+
+ScalarMetricValueTypedDict = TypeAliasType(
+ "ScalarMetricValueTypedDict", Union[int, float]
+)
+
+
+ScalarMetricValue = TypeAliasType("ScalarMetricValue", Union[int, float])
+
+
+class ScalarMetricTypedDict(TypedDict):
+ r"""Scalar metric with a single value."""
+
+ value: ScalarMetricValueTypedDict
+
+
+class ScalarMetric(BaseModel):
+ r"""Scalar metric with a single value."""
+
+ value: ScalarMetricValue
diff --git a/src/mistralai/client/models/schedulecalendar.py b/src/mistralai/client/models/schedulecalendar.py
new file mode 100644
index 00000000..2cb179eb
--- /dev/null
+++ b/src/mistralai/client/models/schedulecalendar.py
@@ -0,0 +1,80 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 76d72d187023
+
+from __future__ import annotations
+from .schedulerange import ScheduleRange, ScheduleRangeTypedDict
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing import List, Optional
+from typing_extensions import NotRequired, TypedDict
+
+
+class ScheduleCalendarTypedDict(TypedDict):
+ second: NotRequired[List[ScheduleRangeTypedDict]]
+ minute: NotRequired[List[ScheduleRangeTypedDict]]
+ hour: NotRequired[List[ScheduleRangeTypedDict]]
+ day_of_month: NotRequired[List[ScheduleRangeTypedDict]]
+ month: NotRequired[List[ScheduleRangeTypedDict]]
+ year: NotRequired[List[ScheduleRangeTypedDict]]
+ day_of_week: NotRequired[List[ScheduleRangeTypedDict]]
+ comment: NotRequired[Nullable[str]]
+
+
+class ScheduleCalendar(BaseModel):
+ second: Optional[List[ScheduleRange]] = None
+
+ minute: Optional[List[ScheduleRange]] = None
+
+ hour: Optional[List[ScheduleRange]] = None
+
+ day_of_month: Optional[List[ScheduleRange]] = None
+
+ month: Optional[List[ScheduleRange]] = None
+
+ year: Optional[List[ScheduleRange]] = None
+
+ day_of_week: Optional[List[ScheduleRange]] = None
+
+ comment: OptionalNullable[str] = UNSET
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ [
+ "second",
+ "minute",
+ "hour",
+ "day_of_month",
+ "month",
+ "year",
+ "day_of_week",
+ "comment",
+ ]
+ )
+ nullable_fields = set(["comment"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/scheduledefinition.py b/src/mistralai/client/models/scheduledefinition.py
new file mode 100644
index 00000000..dc622c3a
--- /dev/null
+++ b/src/mistralai/client/models/scheduledefinition.py
@@ -0,0 +1,142 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 6ea58a356f77
+
+from __future__ import annotations
+from .schedulecalendar import ScheduleCalendar, ScheduleCalendarTypedDict
+from .scheduleinterval import ScheduleInterval, ScheduleIntervalTypedDict
+from .schedulepolicy import SchedulePolicy, SchedulePolicyTypedDict
+from datetime import datetime
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing import Any, List, Optional
+from typing_extensions import NotRequired, TypedDict
+
+
+class ScheduleDefinitionTypedDict(TypedDict):
+ r"""Specification of the times scheduled actions may occur.
+
+ The times are the union of :py:attr:`calendars`, :py:attr:`intervals`, and
+ :py:attr:`cron_expressions` excluding anything in :py:attr:`skip`.
+
+ Used for input where schedule_id is optional (can be provided or auto-generated).
+ """
+
+ input: Any
+ r"""Input to provide to the workflow when starting it."""
+ calendars: NotRequired[List[ScheduleCalendarTypedDict]]
+ r"""Calendar-based specification of times."""
+ intervals: NotRequired[List[ScheduleIntervalTypedDict]]
+ r"""Interval-based specification of times."""
+ cron_expressions: NotRequired[List[str]]
+ r"""Cron-based specification of times."""
+ skip: NotRequired[List[ScheduleCalendarTypedDict]]
+ r"""Set of calendar times to skip."""
+ start_at: NotRequired[Nullable[datetime]]
+ r"""Time after which the first action may be run."""
+ end_at: NotRequired[Nullable[datetime]]
+ r"""Time after which no more actions will be run."""
+ jitter: NotRequired[Nullable[str]]
+ r"""Jitter to apply each action.
+
+ An action's scheduled time will be incremented by a random value between 0
+ and this value if present (but not past the next schedule).
+
+ """
+ time_zone_name: NotRequired[Nullable[str]]
+ r"""IANA time zone name, for example ``US/Central``."""
+ policy: NotRequired[SchedulePolicyTypedDict]
+ schedule_id: NotRequired[Nullable[str]]
+ r"""Unique identifier for the schedule."""
+
+
+class ScheduleDefinition(BaseModel):
+ r"""Specification of the times scheduled actions may occur.
+
+ The times are the union of :py:attr:`calendars`, :py:attr:`intervals`, and
+ :py:attr:`cron_expressions` excluding anything in :py:attr:`skip`.
+
+ Used for input where schedule_id is optional (can be provided or auto-generated).
+ """
+
+ input: Any
+ r"""Input to provide to the workflow when starting it."""
+
+ calendars: Optional[List[ScheduleCalendar]] = None
+ r"""Calendar-based specification of times."""
+
+ intervals: Optional[List[ScheduleInterval]] = None
+ r"""Interval-based specification of times."""
+
+ cron_expressions: Optional[List[str]] = None
+ r"""Cron-based specification of times."""
+
+ skip: Optional[List[ScheduleCalendar]] = None
+ r"""Set of calendar times to skip."""
+
+ start_at: OptionalNullable[datetime] = UNSET
+ r"""Time after which the first action may be run."""
+
+ end_at: OptionalNullable[datetime] = UNSET
+ r"""Time after which no more actions will be run."""
+
+ jitter: OptionalNullable[str] = UNSET
+ r"""Jitter to apply each action.
+
+ An action's scheduled time will be incremented by a random value between 0
+ and this value if present (but not past the next schedule).
+
+ """
+
+ time_zone_name: OptionalNullable[str] = UNSET
+ r"""IANA time zone name, for example ``US/Central``."""
+
+ policy: Optional[SchedulePolicy] = None
+
+ schedule_id: OptionalNullable[str] = UNSET
+ r"""Unique identifier for the schedule."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ [
+ "calendars",
+ "intervals",
+ "cron_expressions",
+ "skip",
+ "start_at",
+ "end_at",
+ "jitter",
+ "time_zone_name",
+ "policy",
+ "schedule_id",
+ ]
+ )
+ nullable_fields = set(
+ ["start_at", "end_at", "jitter", "time_zone_name", "schedule_id"]
+ )
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/scheduledefinitionoutput.py b/src/mistralai/client/models/scheduledefinitionoutput.py
new file mode 100644
index 00000000..f40470fa
--- /dev/null
+++ b/src/mistralai/client/models/scheduledefinitionoutput.py
@@ -0,0 +1,133 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 69dc15b9a0d6
+
+from __future__ import annotations
+from .schedulecalendar import ScheduleCalendar, ScheduleCalendarTypedDict
+from .scheduleinterval import ScheduleInterval, ScheduleIntervalTypedDict
+from .schedulepolicy import SchedulePolicy, SchedulePolicyTypedDict
+from datetime import datetime
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing import Any, List, Optional
+from typing_extensions import NotRequired, TypedDict
+
+
+class ScheduleDefinitionOutputTypedDict(TypedDict):
+ r"""Output representation of a schedule with required schedule_id.
+
+ Used when returning schedules from the API where schedule_id is always present.
+ """
+
+ input: Any
+ r"""Input to provide to the workflow when starting it."""
+ schedule_id: str
+ r"""Unique identifier for the schedule."""
+ calendars: NotRequired[List[ScheduleCalendarTypedDict]]
+ r"""Calendar-based specification of times."""
+ intervals: NotRequired[List[ScheduleIntervalTypedDict]]
+ r"""Interval-based specification of times."""
+ cron_expressions: NotRequired[List[str]]
+ r"""Cron-based specification of times."""
+ skip: NotRequired[List[ScheduleCalendarTypedDict]]
+ r"""Set of calendar times to skip."""
+ start_at: NotRequired[Nullable[datetime]]
+ r"""Time after which the first action may be run."""
+ end_at: NotRequired[Nullable[datetime]]
+ r"""Time after which no more actions will be run."""
+ jitter: NotRequired[Nullable[str]]
+ r"""Jitter to apply each action.
+
+ An action's scheduled time will be incremented by a random value between 0
+ and this value if present (but not past the next schedule).
+
+ """
+ time_zone_name: NotRequired[Nullable[str]]
+ r"""IANA time zone name, for example ``US/Central``."""
+ policy: NotRequired[SchedulePolicyTypedDict]
+
+
+class ScheduleDefinitionOutput(BaseModel):
+ r"""Output representation of a schedule with required schedule_id.
+
+ Used when returning schedules from the API where schedule_id is always present.
+ """
+
+ input: Any
+ r"""Input to provide to the workflow when starting it."""
+
+ schedule_id: str
+ r"""Unique identifier for the schedule."""
+
+ calendars: Optional[List[ScheduleCalendar]] = None
+ r"""Calendar-based specification of times."""
+
+ intervals: Optional[List[ScheduleInterval]] = None
+ r"""Interval-based specification of times."""
+
+ cron_expressions: Optional[List[str]] = None
+ r"""Cron-based specification of times."""
+
+ skip: Optional[List[ScheduleCalendar]] = None
+ r"""Set of calendar times to skip."""
+
+ start_at: OptionalNullable[datetime] = UNSET
+ r"""Time after which the first action may be run."""
+
+ end_at: OptionalNullable[datetime] = UNSET
+ r"""Time after which no more actions will be run."""
+
+ jitter: OptionalNullable[str] = UNSET
+ r"""Jitter to apply each action.
+
+ An action's scheduled time will be incremented by a random value between 0
+ and this value if present (but not past the next schedule).
+
+ """
+
+ time_zone_name: OptionalNullable[str] = UNSET
+ r"""IANA time zone name, for example ``US/Central``."""
+
+ policy: Optional[SchedulePolicy] = None
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ [
+ "calendars",
+ "intervals",
+ "cron_expressions",
+ "skip",
+ "start_at",
+ "end_at",
+ "jitter",
+ "time_zone_name",
+ "policy",
+ ]
+ )
+ nullable_fields = set(["start_at", "end_at", "jitter", "time_zone_name"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/scheduleinterval.py b/src/mistralai/client/models/scheduleinterval.py
new file mode 100644
index 00000000..c01cf852
--- /dev/null
+++ b/src/mistralai/client/models/scheduleinterval.py
@@ -0,0 +1,49 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 1d89c2043566
+
+from __future__ import annotations
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing_extensions import NotRequired, TypedDict
+
+
+class ScheduleIntervalTypedDict(TypedDict):
+ every: str
+ offset: NotRequired[Nullable[str]]
+
+
+class ScheduleInterval(BaseModel):
+ every: str
+
+ offset: OptionalNullable[str] = UNSET
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["offset"])
+ nullable_fields = set(["offset"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/scheduleoverlappolicy.py b/src/mistralai/client/models/scheduleoverlappolicy.py
new file mode 100644
index 00000000..13db3947
--- /dev/null
+++ b/src/mistralai/client/models/scheduleoverlappolicy.py
@@ -0,0 +1,22 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: a729c26f9c43
+
+from __future__ import annotations
+from mistralai.client.types import UnrecognizedInt
+from typing import Literal, Union
+
+
+ScheduleOverlapPolicy = Union[
+ Literal[
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ ],
+ UnrecognizedInt,
+]
+r"""Controls what happens when a workflow would be started by a schedule but
+one is already running.
+"""
diff --git a/src/mistralai/client/models/schedulepolicy.py b/src/mistralai/client/models/schedulepolicy.py
new file mode 100644
index 00000000..9a507656
--- /dev/null
+++ b/src/mistralai/client/models/schedulepolicy.py
@@ -0,0 +1,49 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: f326afe63958
+
+from __future__ import annotations
+from .scheduleoverlappolicy import ScheduleOverlapPolicy
+from mistralai.client.types import BaseModel, UNSET_SENTINEL
+from pydantic import model_serializer
+from typing import Optional
+from typing_extensions import NotRequired, TypedDict
+
+
+class SchedulePolicyTypedDict(TypedDict):
+ catchup_window_seconds: NotRequired[int]
+ r"""After a Temporal server is unavailable, amount of time in seconds in the past to execute missed actions."""
+ overlap: NotRequired[ScheduleOverlapPolicy]
+ r"""Controls what happens when a workflow would be started by a schedule but
+ one is already running.
+ """
+ pause_on_failure: NotRequired[bool]
+ r"""Whether to pause the schedule after a workflow failure."""
+
+
+class SchedulePolicy(BaseModel):
+ catchup_window_seconds: Optional[int] = 31536000
+ r"""After a Temporal server is unavailable, amount of time in seconds in the past to execute missed actions."""
+
+ overlap: Optional[ScheduleOverlapPolicy] = None
+ r"""Controls what happens when a workflow would be started by a schedule but
+ one is already running.
+ """
+
+ pause_on_failure: Optional[bool] = False
+ r"""Whether to pause the schedule after a workflow failure."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["catchup_window_seconds", "overlap", "pause_on_failure"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+
+ if val != UNSET_SENTINEL:
+ if val is not None or k not in optional_fields:
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/schedulerange.py b/src/mistralai/client/models/schedulerange.py
new file mode 100644
index 00000000..e30eed16
--- /dev/null
+++ b/src/mistralai/client/models/schedulerange.py
@@ -0,0 +1,38 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: f9d442a062b5
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel, UNSET_SENTINEL
+from pydantic import model_serializer
+from typing import Optional
+from typing_extensions import NotRequired, TypedDict
+
+
+class ScheduleRangeTypedDict(TypedDict):
+ start: int
+ end: NotRequired[int]
+ step: NotRequired[int]
+
+
+class ScheduleRange(BaseModel):
+ start: int
+
+ end: Optional[int] = 0
+
+ step: Optional[int] = 0
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["end", "step"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+
+ if val != UNSET_SENTINEL:
+ if val is not None or k not in optional_fields:
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/signal_workflow_execution_v1_workflows_executions_execution_id_signals_postop.py b/src/mistralai/client/models/signal_workflow_execution_v1_workflows_executions_execution_id_signals_postop.py
new file mode 100644
index 00000000..c61ed019
--- /dev/null
+++ b/src/mistralai/client/models/signal_workflow_execution_v1_workflows_executions_execution_id_signals_postop.py
@@ -0,0 +1,28 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 16c54f54e60f
+
+from __future__ import annotations
+from .signalinvocationbody import SignalInvocationBody, SignalInvocationBodyTypedDict
+from mistralai.client.types import BaseModel
+from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata
+from typing_extensions import Annotated, TypedDict
+
+
+class SignalWorkflowExecutionV1WorkflowsExecutionsExecutionIDSignalsPostRequestTypedDict(
+ TypedDict
+):
+ execution_id: str
+ signal_invocation_body: SignalInvocationBodyTypedDict
+
+
+class SignalWorkflowExecutionV1WorkflowsExecutionsExecutionIDSignalsPostRequest(
+ BaseModel
+):
+ execution_id: Annotated[
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
+ ]
+
+ signal_invocation_body: Annotated[
+ SignalInvocationBody,
+ FieldMetadata(request=RequestMetadata(media_type="application/json")),
+ ]
diff --git a/src/mistralai/client/models/signaldefinition.py b/src/mistralai/client/models/signaldefinition.py
new file mode 100644
index 00000000..434e1230
--- /dev/null
+++ b/src/mistralai/client/models/signaldefinition.py
@@ -0,0 +1,59 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: f1c1ac98a427
+
+from __future__ import annotations
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing import Any, Dict
+from typing_extensions import NotRequired, TypedDict
+
+
+class SignalDefinitionTypedDict(TypedDict):
+ name: str
+ r"""Name of the signal"""
+ input_schema: Dict[str, Any]
+ r"""Input JSON schema of the signal's model"""
+ description: NotRequired[Nullable[str]]
+ r"""Description of the signal"""
+
+
+class SignalDefinition(BaseModel):
+ name: str
+ r"""Name of the signal"""
+
+ input_schema: Dict[str, Any]
+ r"""Input JSON schema of the signal's model"""
+
+ description: OptionalNullable[str] = UNSET
+ r"""Description of the signal"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["description"])
+ nullable_fields = set(["description"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/signalinvocationbody.py b/src/mistralai/client/models/signalinvocationbody.py
new file mode 100644
index 00000000..3b7a1ff1
--- /dev/null
+++ b/src/mistralai/client/models/signalinvocationbody.py
@@ -0,0 +1,122 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 0fd96a7c058b
+
+from __future__ import annotations
+from .encodedpayloadoptions import EncodedPayloadOptions
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+import pydantic
+from pydantic import ConfigDict, model_serializer
+from typing import Any, Dict, List, Optional, Union
+from typing_extensions import NotRequired, TypeAliasType, TypedDict
+
+
+class SignalInvocationBodyNetworkEncodedInputTypedDict(TypedDict):
+ b64payload: str
+ r"""The encoded payload"""
+ encoding_options: NotRequired[List[EncodedPayloadOptions]]
+ r"""The encoding of the payload"""
+ empty: NotRequired[bool]
+ r"""Whether the payload is empty"""
+
+
+class SignalInvocationBodyNetworkEncodedInput(BaseModel):
+ model_config = ConfigDict(
+ populate_by_name=True, arbitrary_types_allowed=True, extra="allow"
+ )
+ __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False)
+
+ b64payload: str
+ r"""The encoded payload"""
+
+ encoding_options: Optional[List[EncodedPayloadOptions]] = None
+ r"""The encoding of the payload"""
+
+ empty: Optional[bool] = False
+ r"""Whether the payload is empty"""
+
+ @property
+ def additional_properties(self):
+ return self.__pydantic_extra__
+
+ @additional_properties.setter
+ def additional_properties(self, value):
+ self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride]
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["encoding_options", "empty"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ serialized.pop(k, serialized.pop(n, None))
+
+ if val != UNSET_SENTINEL:
+ if val is not None or k not in optional_fields:
+ m[k] = val
+ for k, v in serialized.items():
+ m[k] = v
+
+ return m
+
+
+SignalInvocationBodyInputTypedDict = TypeAliasType(
+ "SignalInvocationBodyInputTypedDict",
+ Union[SignalInvocationBodyNetworkEncodedInputTypedDict, Dict[str, Any]],
+)
+r"""Input data for the signal, matching its schema"""
+
+
+SignalInvocationBodyInput = TypeAliasType(
+ "SignalInvocationBodyInput",
+ Union[SignalInvocationBodyNetworkEncodedInput, Dict[str, Any]],
+)
+r"""Input data for the signal, matching its schema"""
+
+
+class SignalInvocationBodyTypedDict(TypedDict):
+ name: str
+ r"""The name of the signal to send"""
+ input: NotRequired[Nullable[SignalInvocationBodyInputTypedDict]]
+ r"""Input data for the signal, matching its schema"""
+
+
+class SignalInvocationBody(BaseModel):
+ name: str
+ r"""The name of the signal to send"""
+
+ input: OptionalNullable[SignalInvocationBodyInput] = UNSET
+ r"""Input data for the signal, matching its schema"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["input"])
+ nullable_fields = set(["input"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/signalworkflowresponse.py b/src/mistralai/client/models/signalworkflowresponse.py
new file mode 100644
index 00000000..2d9ea356
--- /dev/null
+++ b/src/mistralai/client/models/signalworkflowresponse.py
@@ -0,0 +1,32 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: e1844a7da20b
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel, UNSET_SENTINEL
+from pydantic import model_serializer
+from typing import Optional
+from typing_extensions import NotRequired, TypedDict
+
+
+class SignalWorkflowResponseTypedDict(TypedDict):
+ message: NotRequired[str]
+
+
+class SignalWorkflowResponse(BaseModel):
+ message: Optional[str] = "Signal accepted"
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["message"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+
+ if val != UNSET_SENTINEL:
+ if val is not None or k not in optional_fields:
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/stream_v1_workflows_executions_execution_id_stream_getop.py b/src/mistralai/client/models/stream_v1_workflows_executions_execution_id_stream_getop.py
new file mode 100644
index 00000000..5282e52a
--- /dev/null
+++ b/src/mistralai/client/models/stream_v1_workflows_executions_execution_id_stream_getop.py
@@ -0,0 +1,101 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 793a9301522f
+
+from __future__ import annotations
+from .eventsource import EventSource
+from .streameventssepayload import StreamEventSsePayload, StreamEventSsePayloadTypedDict
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata
+from pydantic import model_serializer
+from typing import Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class StreamV1WorkflowsExecutionsExecutionIDStreamGetRequestTypedDict(TypedDict):
+ execution_id: str
+ event_source: NotRequired[Nullable[EventSource]]
+ last_event_id: NotRequired[Nullable[str]]
+
+
+class StreamV1WorkflowsExecutionsExecutionIDStreamGetRequest(BaseModel):
+ execution_id: Annotated[
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
+ ]
+
+ event_source: Annotated[
+ OptionalNullable[EventSource],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = UNSET
+
+ last_event_id: Annotated[
+ OptionalNullable[str],
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
+ ] = UNSET
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["event_source", "last_event_id"])
+ nullable_fields = set(["event_source", "last_event_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+class StreamV1WorkflowsExecutionsExecutionIDStreamGetResponseBodyTypedDict(TypedDict):
+ r"""Stream of Server-Sent Events (SSE)"""
+
+ event: NotRequired[str]
+ data: NotRequired[StreamEventSsePayloadTypedDict]
+ id: NotRequired[str]
+ retry: NotRequired[int]
+
+
+class StreamV1WorkflowsExecutionsExecutionIDStreamGetResponseBody(BaseModel):
+ r"""Stream of Server-Sent Events (SSE)"""
+
+ event: Optional[str] = None
+
+ data: Optional[StreamEventSsePayload] = None
+
+ id: Optional[str] = None
+
+ retry: Optional[int] = None
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["event", "data", "id", "retry"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+
+ if val != UNSET_SENTINEL:
+ if val is not None or k not in optional_fields:
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/streameventssepayload.py b/src/mistralai/client/models/streameventssepayload.py
new file mode 100644
index 00000000..2c662a65
--- /dev/null
+++ b/src/mistralai/client/models/streameventssepayload.py
@@ -0,0 +1,168 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: c6becbbd80bc
+
+from __future__ import annotations
+from .activitytaskcompletedresponse import (
+ ActivityTaskCompletedResponse,
+ ActivityTaskCompletedResponseTypedDict,
+)
+from .activitytaskfailedresponse import (
+ ActivityTaskFailedResponse,
+ ActivityTaskFailedResponseTypedDict,
+)
+from .activitytaskretryingresponse import (
+ ActivityTaskRetryingResponse,
+ ActivityTaskRetryingResponseTypedDict,
+)
+from .activitytaskstartedresponse import (
+ ActivityTaskStartedResponse,
+ ActivityTaskStartedResponseTypedDict,
+)
+from .customtaskcanceledresponse import (
+ CustomTaskCanceledResponse,
+ CustomTaskCanceledResponseTypedDict,
+)
+from .customtaskcompletedresponse import (
+ CustomTaskCompletedResponse,
+ CustomTaskCompletedResponseTypedDict,
+)
+from .customtaskfailedresponse import (
+ CustomTaskFailedResponse,
+ CustomTaskFailedResponseTypedDict,
+)
+from .customtaskinprogressresponse import (
+ CustomTaskInProgressResponse,
+ CustomTaskInProgressResponseTypedDict,
+)
+from .customtaskstartedresponse import (
+ CustomTaskStartedResponse,
+ CustomTaskStartedResponseTypedDict,
+)
+from .customtasktimedoutresponse import (
+ CustomTaskTimedOutResponse,
+ CustomTaskTimedOutResponseTypedDict,
+)
+from .streameventworkflowcontext import (
+ StreamEventWorkflowContext,
+ StreamEventWorkflowContextTypedDict,
+)
+from .workflowexecutioncanceledresponse import (
+ WorkflowExecutionCanceledResponse,
+ WorkflowExecutionCanceledResponseTypedDict,
+)
+from .workflowexecutioncompletedresponse import (
+ WorkflowExecutionCompletedResponse,
+ WorkflowExecutionCompletedResponseTypedDict,
+)
+from .workflowexecutioncontinuedasnewresponse import (
+ WorkflowExecutionContinuedAsNewResponse,
+ WorkflowExecutionContinuedAsNewResponseTypedDict,
+)
+from .workflowexecutionfailedresponse import (
+ WorkflowExecutionFailedResponse,
+ WorkflowExecutionFailedResponseTypedDict,
+)
+from .workflowexecutionstartedresponse import (
+ WorkflowExecutionStartedResponse,
+ WorkflowExecutionStartedResponseTypedDict,
+)
+from .workflowtaskfailedresponse import (
+ WorkflowTaskFailedResponse,
+ WorkflowTaskFailedResponseTypedDict,
+)
+from .workflowtasktimedoutresponse import (
+ WorkflowTaskTimedOutResponse,
+ WorkflowTaskTimedOutResponseTypedDict,
+)
+from datetime import datetime
+from mistralai.client.types import BaseModel, UNSET_SENTINEL
+from pydantic import model_serializer
+from typing import Any, Dict, Optional, Union
+from typing_extensions import NotRequired, TypeAliasType, TypedDict
+
+
+StreamEventSsePayloadDataTypedDict = TypeAliasType(
+ "StreamEventSsePayloadDataTypedDict",
+ Union[
+ WorkflowExecutionStartedResponseTypedDict,
+ WorkflowExecutionCompletedResponseTypedDict,
+ WorkflowExecutionFailedResponseTypedDict,
+ WorkflowExecutionCanceledResponseTypedDict,
+ WorkflowExecutionContinuedAsNewResponseTypedDict,
+ WorkflowTaskTimedOutResponseTypedDict,
+ WorkflowTaskFailedResponseTypedDict,
+ CustomTaskStartedResponseTypedDict,
+ CustomTaskInProgressResponseTypedDict,
+ CustomTaskCompletedResponseTypedDict,
+ CustomTaskFailedResponseTypedDict,
+ CustomTaskTimedOutResponseTypedDict,
+ CustomTaskCanceledResponseTypedDict,
+ ActivityTaskStartedResponseTypedDict,
+ ActivityTaskCompletedResponseTypedDict,
+ ActivityTaskRetryingResponseTypedDict,
+ ActivityTaskFailedResponseTypedDict,
+ ],
+)
+
+
+StreamEventSsePayloadData = TypeAliasType(
+ "StreamEventSsePayloadData",
+ Union[
+ WorkflowExecutionStartedResponse,
+ WorkflowExecutionCompletedResponse,
+ WorkflowExecutionFailedResponse,
+ WorkflowExecutionCanceledResponse,
+ WorkflowExecutionContinuedAsNewResponse,
+ WorkflowTaskTimedOutResponse,
+ WorkflowTaskFailedResponse,
+ CustomTaskStartedResponse,
+ CustomTaskInProgressResponse,
+ CustomTaskCompletedResponse,
+ CustomTaskFailedResponse,
+ CustomTaskTimedOutResponse,
+ CustomTaskCanceledResponse,
+ ActivityTaskStartedResponse,
+ ActivityTaskCompletedResponse,
+ ActivityTaskRetryingResponse,
+ ActivityTaskFailedResponse,
+ ],
+)
+
+
+class StreamEventSsePayloadTypedDict(TypedDict):
+ stream: str
+ data: StreamEventSsePayloadDataTypedDict
+ workflow_context: StreamEventWorkflowContextTypedDict
+ broker_sequence: int
+ timestamp: NotRequired[datetime]
+ metadata: NotRequired[Dict[str, Any]]
+
+
+class StreamEventSsePayload(BaseModel):
+ stream: str
+
+ data: StreamEventSsePayloadData
+
+ workflow_context: StreamEventWorkflowContext
+
+ broker_sequence: int
+
+ timestamp: Optional[datetime] = None
+
+ metadata: Optional[Dict[str, Any]] = None
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["timestamp", "metadata"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+
+ if val != UNSET_SENTINEL:
+ if val is not None or k not in optional_fields:
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/streameventworkflowcontext.py b/src/mistralai/client/models/streameventworkflowcontext.py
new file mode 100644
index 00000000..f24de860
--- /dev/null
+++ b/src/mistralai/client/models/streameventworkflowcontext.py
@@ -0,0 +1,58 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 14c00c79de78
+
+from __future__ import annotations
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing_extensions import NotRequired, TypedDict
+
+
+class StreamEventWorkflowContextTypedDict(TypedDict):
+ namespace: str
+ workflow_name: str
+ workflow_exec_id: str
+ parent_workflow_exec_id: NotRequired[Nullable[str]]
+ root_workflow_exec_id: NotRequired[Nullable[str]]
+
+
+class StreamEventWorkflowContext(BaseModel):
+ namespace: str
+
+ workflow_name: str
+
+ workflow_exec_id: str
+
+ parent_workflow_exec_id: OptionalNullable[str] = UNSET
+
+ root_workflow_exec_id: OptionalNullable[str] = UNSET
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["parent_workflow_exec_id", "root_workflow_exec_id"])
+ nullable_fields = set(["parent_workflow_exec_id", "root_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/tempogettraceresponse.py b/src/mistralai/client/models/tempogettraceresponse.py
new file mode 100644
index 00000000..8575b954
--- /dev/null
+++ b/src/mistralai/client/models/tempogettraceresponse.py
@@ -0,0 +1,49 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 8bb3c013aa76
+
+from __future__ import annotations
+from .tempotracebatch import TempoTraceBatch, TempoTraceBatchTypedDict
+from mistralai.client.types import BaseModel, UNSET_SENTINEL
+from pydantic import model_serializer
+from typing import List, Optional
+from typing_extensions import NotRequired, TypedDict
+
+
+class TempoGetTraceResponseTypedDict(TypedDict):
+ r"""Trace response in OpenTelemetry format.
+
+ This is the unified trace format used across all trace providers (Tempo, ClickHouse, etc.).
+ Regardless of the underlying backend, all trace data is normalized to this Tempo-compatible
+ OpenTelemetry format to ensure consistency in the API response structure.
+ """
+
+ batches: NotRequired[List[TempoTraceBatchTypedDict]]
+ r"""The batches of the trace"""
+
+
+class TempoGetTraceResponse(BaseModel):
+ r"""Trace response in OpenTelemetry format.
+
+ This is the unified trace format used across all trace providers (Tempo, ClickHouse, etc.).
+ Regardless of the underlying backend, all trace data is normalized to this Tempo-compatible
+ OpenTelemetry format to ensure consistency in the API response structure.
+ """
+
+ batches: Optional[List[TempoTraceBatch]] = None
+ r"""The batches of the trace"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["batches"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+
+ if val != UNSET_SENTINEL:
+ if val is not None or k not in optional_fields:
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/tempotraceattribute.py b/src/mistralai/client/models/tempotraceattribute.py
new file mode 100644
index 00000000..71c1b1f2
--- /dev/null
+++ b/src/mistralai/client/models/tempotraceattribute.py
@@ -0,0 +1,56 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 7d0ec3402dc0
+
+from __future__ import annotations
+from .tempotraceattributeboolvalue import (
+ TempoTraceAttributeBoolValue,
+ TempoTraceAttributeBoolValueTypedDict,
+)
+from .tempotraceattributeintvalue import (
+ TempoTraceAttributeIntValue,
+ TempoTraceAttributeIntValueTypedDict,
+)
+from .tempotraceattributestringvalue import (
+ TempoTraceAttributeStringValue,
+ TempoTraceAttributeStringValueTypedDict,
+)
+from mistralai.client.types import BaseModel
+from typing import Union
+from typing_extensions import TypeAliasType, TypedDict
+
+
+TempoTraceAttributeValueTypedDict = TypeAliasType(
+ "TempoTraceAttributeValueTypedDict",
+ Union[
+ TempoTraceAttributeStringValueTypedDict,
+ TempoTraceAttributeIntValueTypedDict,
+ TempoTraceAttributeBoolValueTypedDict,
+ ],
+)
+r"""The value of the attribute"""
+
+
+TempoTraceAttributeValue = TypeAliasType(
+ "TempoTraceAttributeValue",
+ Union[
+ TempoTraceAttributeStringValue,
+ TempoTraceAttributeIntValue,
+ TempoTraceAttributeBoolValue,
+ ],
+)
+r"""The value of the attribute"""
+
+
+class TempoTraceAttributeTypedDict(TypedDict):
+ key: str
+ r"""The key of the attribute"""
+ value: TempoTraceAttributeValueTypedDict
+ r"""The value of the attribute"""
+
+
+class TempoTraceAttribute(BaseModel):
+ key: str
+ r"""The key of the attribute"""
+
+ value: TempoTraceAttributeValue
+ r"""The value of the attribute"""
diff --git a/src/mistralai/client/models/tempotraceattributeboolvalue.py b/src/mistralai/client/models/tempotraceattributeboolvalue.py
new file mode 100644
index 00000000..72fb79d0
--- /dev/null
+++ b/src/mistralai/client/models/tempotraceattributeboolvalue.py
@@ -0,0 +1,23 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: c7c383a6c05f
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+import pydantic
+from typing_extensions import Annotated, TypedDict
+
+
+class TempoTraceAttributeBoolValueTypedDict(TypedDict):
+ bool_value: bool
+ r"""The boolean value of the attribute"""
+
+
+class TempoTraceAttributeBoolValue(BaseModel):
+ bool_value: Annotated[bool, pydantic.Field(alias="boolValue")]
+ r"""The boolean value of the attribute"""
+
+
+try:
+ TempoTraceAttributeBoolValue.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/tempotraceattributeintvalue.py b/src/mistralai/client/models/tempotraceattributeintvalue.py
new file mode 100644
index 00000000..6ffe7efe
--- /dev/null
+++ b/src/mistralai/client/models/tempotraceattributeintvalue.py
@@ -0,0 +1,23 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: c4659ad7a2a5
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+import pydantic
+from typing_extensions import Annotated, TypedDict
+
+
+class TempoTraceAttributeIntValueTypedDict(TypedDict):
+ int_value: str
+ r"""The integer value of the attribute"""
+
+
+class TempoTraceAttributeIntValue(BaseModel):
+ int_value: Annotated[str, pydantic.Field(alias="intValue")]
+ r"""The integer value of the attribute"""
+
+
+try:
+ TempoTraceAttributeIntValue.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/tempotraceattributestringvalue.py b/src/mistralai/client/models/tempotraceattributestringvalue.py
new file mode 100644
index 00000000..f4dea639
--- /dev/null
+++ b/src/mistralai/client/models/tempotraceattributestringvalue.py
@@ -0,0 +1,23 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 2dcdd05a5115
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+import pydantic
+from typing_extensions import Annotated, TypedDict
+
+
+class TempoTraceAttributeStringValueTypedDict(TypedDict):
+ string_value: str
+ r"""The string value of the attribute"""
+
+
+class TempoTraceAttributeStringValue(BaseModel):
+ string_value: Annotated[str, pydantic.Field(alias="stringValue")]
+ r"""The string value of the attribute"""
+
+
+try:
+ TempoTraceAttributeStringValue.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/tempotracebatch.py b/src/mistralai/client/models/tempotracebatch.py
new file mode 100644
index 00000000..7f508047
--- /dev/null
+++ b/src/mistralai/client/models/tempotracebatch.py
@@ -0,0 +1,48 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 969acd9d6220
+
+from __future__ import annotations
+from .tempotraceresource import TempoTraceResource, TempoTraceResourceTypedDict
+from .tempotracescopespan import TempoTraceScopeSpan, TempoTraceScopeSpanTypedDict
+from mistralai.client.types import BaseModel, UNSET_SENTINEL
+import pydantic
+from pydantic import model_serializer
+from typing import List, Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class TempoTraceBatchTypedDict(TypedDict):
+ resource: TempoTraceResourceTypedDict
+ scope_spans: NotRequired[List[TempoTraceScopeSpanTypedDict]]
+ r"""The spans of the scope"""
+
+
+class TempoTraceBatch(BaseModel):
+ resource: TempoTraceResource
+
+ scope_spans: Annotated[
+ Optional[List[TempoTraceScopeSpan]], pydantic.Field(alias="scopeSpans")
+ ] = None
+ r"""The spans of the scope"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["scopeSpans"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+
+ if val != UNSET_SENTINEL:
+ if val is not None or k not in optional_fields:
+ m[k] = val
+
+ return m
+
+
+try:
+ TempoTraceBatch.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/tempotraceevent.py b/src/mistralai/client/models/tempotraceevent.py
new file mode 100644
index 00000000..bda2115a
--- /dev/null
+++ b/src/mistralai/client/models/tempotraceevent.py
@@ -0,0 +1,52 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 98dd1b838524
+
+from __future__ import annotations
+from .tempotraceattribute import TempoTraceAttribute, TempoTraceAttributeTypedDict
+from mistralai.client.types import BaseModel, UNSET_SENTINEL
+import pydantic
+from pydantic import model_serializer
+from typing import List, Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class TempoTraceEventTypedDict(TypedDict):
+ name: str
+ r"""The name of the event"""
+ time_unix_nano: str
+ r"""The time of the event in Unix nano"""
+ attributes: NotRequired[List[TempoTraceAttributeTypedDict]]
+ r"""The attributes of the event"""
+
+
+class TempoTraceEvent(BaseModel):
+ name: str
+ r"""The name of the event"""
+
+ time_unix_nano: Annotated[str, pydantic.Field(alias="timeUnixNano")]
+ r"""The time of the event in Unix nano"""
+
+ attributes: Optional[List[TempoTraceAttribute]] = None
+ r"""The attributes of the event"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["attributes"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+
+ if val != UNSET_SENTINEL:
+ if val is not None or k not in optional_fields:
+ m[k] = val
+
+ return m
+
+
+try:
+ TempoTraceEvent.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/tempotraceresource.py b/src/mistralai/client/models/tempotraceresource.py
new file mode 100644
index 00000000..42c4d56a
--- /dev/null
+++ b/src/mistralai/client/models/tempotraceresource.py
@@ -0,0 +1,35 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: cc8a7ff3feea
+
+from __future__ import annotations
+from .tempotraceattribute import TempoTraceAttribute, TempoTraceAttributeTypedDict
+from mistralai.client.types import BaseModel, UNSET_SENTINEL
+from pydantic import model_serializer
+from typing import List, Optional
+from typing_extensions import NotRequired, TypedDict
+
+
+class TempoTraceResourceTypedDict(TypedDict):
+ attributes: NotRequired[List[TempoTraceAttributeTypedDict]]
+ r"""The attributes of the resource"""
+
+
+class TempoTraceResource(BaseModel):
+ attributes: Optional[List[TempoTraceAttribute]] = None
+ r"""The attributes of the resource"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["attributes"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+
+ if val != UNSET_SENTINEL:
+ if val is not None or k not in optional_fields:
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/tempotracescope.py b/src/mistralai/client/models/tempotracescope.py
new file mode 100644
index 00000000..3e302f81
--- /dev/null
+++ b/src/mistralai/client/models/tempotracescope.py
@@ -0,0 +1,16 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: a2da1a3b8198
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+from typing_extensions import TypedDict
+
+
+class TempoTraceScopeTypedDict(TypedDict):
+ name: str
+ r"""The name of the span"""
+
+
+class TempoTraceScope(BaseModel):
+ name: str
+ r"""The name of the span"""
diff --git a/src/mistralai/client/models/tempotracescopekind.py b/src/mistralai/client/models/tempotracescopekind.py
new file mode 100644
index 00000000..a26d5c38
--- /dev/null
+++ b/src/mistralai/client/models/tempotracescopekind.py
@@ -0,0 +1,16 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 40c697c1e617
+
+from __future__ import annotations
+from mistralai.client.types import UnrecognizedStr
+from typing import Literal, Union
+
+
+TempoTraceScopeKind = Union[
+ Literal[
+ "SPAN_KIND_INTERNAL",
+ "SPAN_KIND_SERVER",
+ "SPAN_KIND_CLIENT",
+ ],
+ UnrecognizedStr,
+]
diff --git a/src/mistralai/client/models/tempotracescopespan.py b/src/mistralai/client/models/tempotracescopespan.py
new file mode 100644
index 00000000..362f3c99
--- /dev/null
+++ b/src/mistralai/client/models/tempotracescopespan.py
@@ -0,0 +1,39 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: cb248e9c0a00
+
+from __future__ import annotations
+from .tempotracescope import TempoTraceScope, TempoTraceScopeTypedDict
+from .tempotracespan import TempoTraceSpan, TempoTraceSpanTypedDict
+from mistralai.client.types import BaseModel, UNSET_SENTINEL
+from pydantic import model_serializer
+from typing import List, Optional
+from typing_extensions import NotRequired, TypedDict
+
+
+class TempoTraceScopeSpanTypedDict(TypedDict):
+ scope: TempoTraceScopeTypedDict
+ spans: NotRequired[List[TempoTraceSpanTypedDict]]
+ r"""The spans of the scope"""
+
+
+class TempoTraceScopeSpan(BaseModel):
+ scope: TempoTraceScope
+
+ spans: Optional[List[TempoTraceSpan]] = None
+ r"""The spans of the scope"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["spans"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+
+ if val != UNSET_SENTINEL:
+ if val is not None or k not in optional_fields:
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/tempotracespan.py b/src/mistralai/client/models/tempotracespan.py
new file mode 100644
index 00000000..51b181ba
--- /dev/null
+++ b/src/mistralai/client/models/tempotracespan.py
@@ -0,0 +1,99 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: f36568c83a96
+
+from __future__ import annotations
+from .tempotraceattribute import TempoTraceAttribute, TempoTraceAttributeTypedDict
+from .tempotraceevent import TempoTraceEvent, TempoTraceEventTypedDict
+from .tempotracescopekind import TempoTraceScopeKind
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+import pydantic
+from pydantic import model_serializer
+from typing import List, Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class TempoTraceSpanTypedDict(TypedDict):
+ trace_id: str
+ r"""The trace ID of the scope"""
+ span_id: str
+ r"""The span ID of the scope"""
+ name: str
+ r"""The name of the scope"""
+ kind: TempoTraceScopeKind
+ start_time_unix_nano: str
+ r"""The start time of the scope in Unix nano"""
+ end_time_unix_nano: str
+ r"""The end time of the scope in Unix nano"""
+ parent_span_id: NotRequired[Nullable[str]]
+ r"""The parent span ID of the scope"""
+ attributes: NotRequired[List[TempoTraceAttributeTypedDict]]
+ r"""The attributes of the scope"""
+ events: NotRequired[List[TempoTraceEventTypedDict]]
+ r"""The events of the scope"""
+
+
+class TempoTraceSpan(BaseModel):
+ trace_id: Annotated[str, pydantic.Field(alias="traceId")]
+ r"""The trace ID of the scope"""
+
+ span_id: Annotated[str, pydantic.Field(alias="spanId")]
+ r"""The span ID of the scope"""
+
+ name: str
+ r"""The name of the scope"""
+
+ kind: TempoTraceScopeKind
+
+ start_time_unix_nano: Annotated[str, pydantic.Field(alias="startTimeUnixNano")]
+ r"""The start time of the scope in Unix nano"""
+
+ end_time_unix_nano: Annotated[str, pydantic.Field(alias="endTimeUnixNano")]
+ r"""The end time of the scope in Unix nano"""
+
+ parent_span_id: Annotated[
+ OptionalNullable[str], pydantic.Field(alias="parentSpanId")
+ ] = UNSET
+ r"""The parent span ID of the scope"""
+
+ attributes: Optional[List[TempoTraceAttribute]] = None
+ r"""The attributes of the scope"""
+
+ events: Optional[List[TempoTraceEvent]] = None
+ r"""The events of the scope"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["parentSpanId", "attributes", "events"])
+ nullable_fields = set(["parentSpanId"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ TempoTraceSpan.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/terminate_workflow_execution_v1_workflows_executions_execution_id_terminate_postop.py b/src/mistralai/client/models/terminate_workflow_execution_v1_workflows_executions_execution_id_terminate_postop.py
new file mode 100644
index 00000000..771c1650
--- /dev/null
+++ b/src/mistralai/client/models/terminate_workflow_execution_v1_workflows_executions_execution_id_terminate_postop.py
@@ -0,0 +1,21 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 458eee7d2603
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+from mistralai.client.utils import FieldMetadata, PathParamMetadata
+from typing_extensions import Annotated, TypedDict
+
+
+class TerminateWorkflowExecutionV1WorkflowsExecutionsExecutionIDTerminatePostRequestTypedDict(
+ TypedDict
+):
+ execution_id: str
+
+
+class TerminateWorkflowExecutionV1WorkflowsExecutionsExecutionIDTerminatePostRequest(
+ BaseModel
+):
+ execution_id: Annotated[
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
+ ]
diff --git a/src/mistralai/client/models/timeseriesmetric.py b/src/mistralai/client/models/timeseriesmetric.py
new file mode 100644
index 00000000..a0eb0c9c
--- /dev/null
+++ b/src/mistralai/client/models/timeseriesmetric.py
@@ -0,0 +1,37 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 7f91751795ac
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+from typing import List, Union
+from typing_extensions import TypeAliasType, TypedDict
+
+
+TimeSeriesMetricValue1TypedDict = TypeAliasType(
+ "TimeSeriesMetricValue1TypedDict", Union[int, float]
+)
+
+
+TimeSeriesMetricValue1 = TypeAliasType("TimeSeriesMetricValue1", Union[int, float])
+
+
+TimeSeriesMetricValue2TypedDict = TypeAliasType(
+ "TimeSeriesMetricValue2TypedDict", Union[int, TimeSeriesMetricValue1TypedDict]
+)
+
+
+TimeSeriesMetricValue2 = TypeAliasType(
+ "TimeSeriesMetricValue2", Union[int, TimeSeriesMetricValue1]
+)
+
+
+class TimeSeriesMetricTypedDict(TypedDict):
+ r"""Time-series metric with timestamp-value pairs."""
+
+ value: List[List[TimeSeriesMetricValue2TypedDict]]
+
+
+class TimeSeriesMetric(BaseModel):
+ r"""Time-series metric with timestamp-value pairs."""
+
+ value: List[List[TimeSeriesMetricValue2]]
diff --git a/src/mistralai/client/models/unarchive_workflow_v1_workflows_workflow_identifier_unarchive_putop.py b/src/mistralai/client/models/unarchive_workflow_v1_workflows_workflow_identifier_unarchive_putop.py
new file mode 100644
index 00000000..3dc55512
--- /dev/null
+++ b/src/mistralai/client/models/unarchive_workflow_v1_workflows_workflow_identifier_unarchive_putop.py
@@ -0,0 +1,19 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 9df426343c2c
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+from mistralai.client.utils import FieldMetadata, PathParamMetadata
+from typing_extensions import Annotated, TypedDict
+
+
+class UnarchiveWorkflowV1WorkflowsWorkflowIdentifierUnarchivePutRequestTypedDict(
+ TypedDict
+):
+ workflow_identifier: str
+
+
+class UnarchiveWorkflowV1WorkflowsWorkflowIdentifierUnarchivePutRequest(BaseModel):
+ workflow_identifier: Annotated[
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
+ ]
diff --git a/src/mistralai/client/models/unschedule_workflow_v1_workflows_schedules_schedule_id_deleteop.py b/src/mistralai/client/models/unschedule_workflow_v1_workflows_schedules_schedule_id_deleteop.py
new file mode 100644
index 00000000..eac992d5
--- /dev/null
+++ b/src/mistralai/client/models/unschedule_workflow_v1_workflows_schedules_schedule_id_deleteop.py
@@ -0,0 +1,17 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 114f67717003
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+from mistralai.client.utils import FieldMetadata, PathParamMetadata
+from typing_extensions import Annotated, TypedDict
+
+
+class UnscheduleWorkflowV1WorkflowsSchedulesScheduleIDDeleteRequestTypedDict(TypedDict):
+ schedule_id: str
+
+
+class UnscheduleWorkflowV1WorkflowsSchedulesScheduleIDDeleteRequest(BaseModel):
+ schedule_id: Annotated[
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
+ ]
diff --git a/src/mistralai/client/models/update_workflow_execution_v1_workflows_executions_execution_id_updates_postop.py b/src/mistralai/client/models/update_workflow_execution_v1_workflows_executions_execution_id_updates_postop.py
new file mode 100644
index 00000000..30ff2bae
--- /dev/null
+++ b/src/mistralai/client/models/update_workflow_execution_v1_workflows_executions_execution_id_updates_postop.py
@@ -0,0 +1,28 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 5799cc4ab66e
+
+from __future__ import annotations
+from .updateinvocationbody import UpdateInvocationBody, UpdateInvocationBodyTypedDict
+from mistralai.client.types import BaseModel
+from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata
+from typing_extensions import Annotated, TypedDict
+
+
+class UpdateWorkflowExecutionV1WorkflowsExecutionsExecutionIDUpdatesPostRequestTypedDict(
+ TypedDict
+):
+ execution_id: str
+ update_invocation_body: UpdateInvocationBodyTypedDict
+
+
+class UpdateWorkflowExecutionV1WorkflowsExecutionsExecutionIDUpdatesPostRequest(
+ BaseModel
+):
+ execution_id: Annotated[
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
+ ]
+
+ update_invocation_body: Annotated[
+ UpdateInvocationBody,
+ FieldMetadata(request=RequestMetadata(media_type="application/json")),
+ ]
diff --git a/src/mistralai/client/models/update_workflow_v1_workflows_workflow_identifier_putop.py b/src/mistralai/client/models/update_workflow_v1_workflows_workflow_identifier_putop.py
new file mode 100644
index 00000000..9ee9b6d2
--- /dev/null
+++ b/src/mistralai/client/models/update_workflow_v1_workflows_workflow_identifier_putop.py
@@ -0,0 +1,24 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: c537bd5a9dd1
+
+from __future__ import annotations
+from .workflowupdaterequest import WorkflowUpdateRequest, WorkflowUpdateRequestTypedDict
+from mistralai.client.types import BaseModel
+from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata
+from typing_extensions import Annotated, TypedDict
+
+
+class UpdateWorkflowV1WorkflowsWorkflowIdentifierPutRequestTypedDict(TypedDict):
+ workflow_identifier: str
+ workflow_update_request: WorkflowUpdateRequestTypedDict
+
+
+class UpdateWorkflowV1WorkflowsWorkflowIdentifierPutRequest(BaseModel):
+ workflow_identifier: Annotated[
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
+ ]
+
+ workflow_update_request: Annotated[
+ WorkflowUpdateRequest,
+ FieldMetadata(request=RequestMetadata(media_type="application/json")),
+ ]
diff --git a/src/mistralai/client/models/updatedefinition.py b/src/mistralai/client/models/updatedefinition.py
new file mode 100644
index 00000000..e878c876
--- /dev/null
+++ b/src/mistralai/client/models/updatedefinition.py
@@ -0,0 +1,64 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 143f97683a02
+
+from __future__ import annotations
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing import Any, Dict
+from typing_extensions import NotRequired, TypedDict
+
+
+class UpdateDefinitionTypedDict(TypedDict):
+ name: str
+ r"""Name of the update"""
+ input_schema: Dict[str, Any]
+ r"""Input JSON schema of the update's model"""
+ description: NotRequired[Nullable[str]]
+ r"""Description of the update"""
+ output_schema: NotRequired[Nullable[Dict[str, Any]]]
+ r"""Output JSON schema of the update's model"""
+
+
+class UpdateDefinition(BaseModel):
+ name: str
+ r"""Name of the update"""
+
+ input_schema: Dict[str, Any]
+ r"""Input JSON schema of the update's model"""
+
+ description: OptionalNullable[str] = UNSET
+ r"""Description of the update"""
+
+ output_schema: OptionalNullable[Dict[str, Any]] = UNSET
+ r"""Output JSON schema of the update's model"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["description", "output_schema"])
+ nullable_fields = set(["description", "output_schema"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/updateinvocationbody.py b/src/mistralai/client/models/updateinvocationbody.py
new file mode 100644
index 00000000..a300bd20
--- /dev/null
+++ b/src/mistralai/client/models/updateinvocationbody.py
@@ -0,0 +1,68 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: b8558eff0be0
+
+from __future__ import annotations
+from .networkencodedinput import NetworkEncodedInput, NetworkEncodedInputTypedDict
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing import Any, Dict, Union
+from typing_extensions import NotRequired, TypeAliasType, TypedDict
+
+
+UpdateInvocationBodyInputTypedDict = TypeAliasType(
+ "UpdateInvocationBodyInputTypedDict",
+ Union[NetworkEncodedInputTypedDict, Dict[str, Any]],
+)
+r"""Input data for the update, matching its schema"""
+
+
+UpdateInvocationBodyInput = TypeAliasType(
+ "UpdateInvocationBodyInput", Union[NetworkEncodedInput, Dict[str, Any]]
+)
+r"""Input data for the update, matching its schema"""
+
+
+class UpdateInvocationBodyTypedDict(TypedDict):
+ name: str
+ r"""The name of the update to request"""
+ input: NotRequired[Nullable[UpdateInvocationBodyInputTypedDict]]
+ r"""Input data for the update, matching its schema"""
+
+
+class UpdateInvocationBody(BaseModel):
+ name: str
+ r"""The name of the update to request"""
+
+ input: OptionalNullable[UpdateInvocationBodyInput] = UNSET
+ r"""Input data for the update, matching its schema"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["input"])
+ nullable_fields = set(["input"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/updateworkflowresponse.py b/src/mistralai/client/models/updateworkflowresponse.py
new file mode 100644
index 00000000..699bef98
--- /dev/null
+++ b/src/mistralai/client/models/updateworkflowresponse.py
@@ -0,0 +1,20 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: f5dcf717a0a1
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+from typing import Any
+from typing_extensions import TypedDict
+
+
+class UpdateWorkflowResponseTypedDict(TypedDict):
+ update_name: str
+ result: Any
+ r"""The result of the Update workflow call"""
+
+
+class UpdateWorkflowResponse(BaseModel):
+ update_name: str
+
+ result: Any
+ r"""The result of the Update workflow call"""
diff --git a/src/mistralai/client/models/workflow.py b/src/mistralai/client/models/workflow.py
new file mode 100644
index 00000000..5edf326c
--- /dev/null
+++ b/src/mistralai/client/models/workflow.py
@@ -0,0 +1,106 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 1548cd73984e
+
+from __future__ import annotations
+from .workflowtype import WorkflowType
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing import Optional
+from typing_extensions import NotRequired, TypedDict
+
+
+class WorkflowTypedDict(TypedDict):
+ id: str
+ r"""Unique identifier of the workflow"""
+ name: str
+ r"""Name of the workflow"""
+ display_name: str
+ r"""Display name of the workflow"""
+ type: WorkflowType
+ customer_id: str
+ r"""Customer ID of the workflow"""
+ workspace_id: str
+ r"""Workspace ID of the workflow"""
+ description: NotRequired[Nullable[str]]
+ r"""Description of the workflow"""
+ shared_namespace: NotRequired[Nullable[str]]
+ r"""Reserved namespace for shared workflows (e.g., 'shared:my-shared-workflow')"""
+ available_in_chat_assistant: NotRequired[bool]
+ r"""Whether the workflow is available in chat assistant"""
+ is_technical: NotRequired[bool]
+ r"""Whether the workflow is technical (e.g. SDK-managed)"""
+ archived: NotRequired[bool]
+ r"""Whether the workflow is archived"""
+
+
+class Workflow(BaseModel):
+ id: str
+ r"""Unique identifier of the workflow"""
+
+ name: str
+ r"""Name of the workflow"""
+
+ display_name: str
+ r"""Display name of the workflow"""
+
+ type: WorkflowType
+
+ customer_id: str
+ r"""Customer ID of the workflow"""
+
+ workspace_id: str
+ r"""Workspace ID of the workflow"""
+
+ description: OptionalNullable[str] = UNSET
+ r"""Description of the workflow"""
+
+ shared_namespace: OptionalNullable[str] = UNSET
+ r"""Reserved namespace for shared workflows (e.g., 'shared:my-shared-workflow')"""
+
+ available_in_chat_assistant: Optional[bool] = False
+ r"""Whether the workflow is available in chat assistant"""
+
+ is_technical: Optional[bool] = False
+ r"""Whether the workflow is technical (e.g. SDK-managed)"""
+
+ archived: Optional[bool] = False
+ r"""Whether the workflow is archived"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ [
+ "description",
+ "shared_namespace",
+ "available_in_chat_assistant",
+ "is_technical",
+ "archived",
+ ]
+ )
+ nullable_fields = set(["description", "shared_namespace"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/workflowarchiveresponse.py b/src/mistralai/client/models/workflowarchiveresponse.py
new file mode 100644
index 00000000..18eeccf2
--- /dev/null
+++ b/src/mistralai/client/models/workflowarchiveresponse.py
@@ -0,0 +1,15 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 64c479b7f9da
+
+from __future__ import annotations
+from .workflow import Workflow, WorkflowTypedDict
+from mistralai.client.types import BaseModel
+from typing_extensions import TypedDict
+
+
+class WorkflowArchiveResponseTypedDict(TypedDict):
+ workflow: WorkflowTypedDict
+
+
+class WorkflowArchiveResponse(BaseModel):
+ workflow: Workflow
diff --git a/src/mistralai/client/models/workflowbasicdefinition.py b/src/mistralai/client/models/workflowbasicdefinition.py
new file mode 100644
index 00000000..d2f3db1f
--- /dev/null
+++ b/src/mistralai/client/models/workflowbasicdefinition.py
@@ -0,0 +1,71 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 34623036478d
+
+from __future__ import annotations
+from .workflowmetadata import WorkflowMetadata, WorkflowMetadataTypedDict
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing import Optional
+from typing_extensions import NotRequired, TypedDict
+
+
+class WorkflowBasicDefinitionTypedDict(TypedDict):
+ id: str
+ name: str
+ r"""The name of the workflow"""
+ display_name: str
+ r"""The display name of the workflow"""
+ archived: bool
+ r"""Whether the workflow is archived"""
+ description: NotRequired[Nullable[str]]
+ r"""A description of the workflow"""
+ metadata: NotRequired[WorkflowMetadataTypedDict]
+
+
+class WorkflowBasicDefinition(BaseModel):
+ id: str
+
+ name: str
+ r"""The name of the workflow"""
+
+ display_name: str
+ r"""The display name of the workflow"""
+
+ archived: bool
+ r"""Whether the workflow is archived"""
+
+ description: OptionalNullable[str] = UNSET
+ r"""A description of the workflow"""
+
+ metadata: Optional[WorkflowMetadata] = None
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["description", "metadata"])
+ nullable_fields = set(["description"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/workflowcodedefinition.py b/src/mistralai/client/models/workflowcodedefinition.py
new file mode 100644
index 00000000..f71b9ff1
--- /dev/null
+++ b/src/mistralai/client/models/workflowcodedefinition.py
@@ -0,0 +1,91 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 36fd5b898ddd
+
+from __future__ import annotations
+from .querydefinition import QueryDefinition, QueryDefinitionTypedDict
+from .signaldefinition import SignalDefinition, SignalDefinitionTypedDict
+from .updatedefinition import UpdateDefinition, UpdateDefinitionTypedDict
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing import Any, Dict, List, Optional
+from typing_extensions import NotRequired, TypedDict
+
+
+class WorkflowCodeDefinitionTypedDict(TypedDict):
+ input_schema: Dict[str, Any]
+ r"""Input schema of the workflow's run method"""
+ output_schema: NotRequired[Nullable[Dict[str, Any]]]
+ r"""Output schema of the workflow's run method"""
+ signals: NotRequired[List[SignalDefinitionTypedDict]]
+ r"""Signal handlers defined by the workflow"""
+ queries: NotRequired[List[QueryDefinitionTypedDict]]
+ r"""Query handlers defined by the workflow"""
+ updates: NotRequired[List[UpdateDefinitionTypedDict]]
+ r"""Update handlers defined by the workflow"""
+ enforce_determinism: NotRequired[bool]
+ r"""Whether the workflow enforces deterministic execution"""
+ execution_timeout: NotRequired[float]
+ r"""Maximum total execution time including retries and continue-as-new"""
+
+
+class WorkflowCodeDefinition(BaseModel):
+ input_schema: Dict[str, Any]
+ r"""Input schema of the workflow's run method"""
+
+ output_schema: OptionalNullable[Dict[str, Any]] = UNSET
+ r"""Output schema of the workflow's run method"""
+
+ signals: Optional[List[SignalDefinition]] = None
+ r"""Signal handlers defined by the workflow"""
+
+ queries: Optional[List[QueryDefinition]] = None
+ r"""Query handlers defined by the workflow"""
+
+ updates: Optional[List[UpdateDefinition]] = None
+ r"""Update handlers defined by the workflow"""
+
+ enforce_determinism: Optional[bool] = False
+ r"""Whether the workflow enforces deterministic execution"""
+
+ execution_timeout: Optional[float] = None
+ r"""Maximum total execution time including retries and continue-as-new"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ [
+ "output_schema",
+ "signals",
+ "queries",
+ "updates",
+ "enforce_determinism",
+ "execution_timeout",
+ ]
+ )
+ nullable_fields = set(["output_schema"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/workfloweventbatchrequest.py b/src/mistralai/client/models/workfloweventbatchrequest.py
new file mode 100644
index 00000000..fba4a160
--- /dev/null
+++ b/src/mistralai/client/models/workfloweventbatchrequest.py
@@ -0,0 +1,137 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: c0c0986a6b07
+
+from __future__ import annotations
+from .activitytaskcompletedrequest import (
+ ActivityTaskCompletedRequest,
+ ActivityTaskCompletedRequestTypedDict,
+)
+from .activitytaskfailedrequest import (
+ ActivityTaskFailedRequest,
+ ActivityTaskFailedRequestTypedDict,
+)
+from .activitytaskretryingrequest import (
+ ActivityTaskRetryingRequest,
+ ActivityTaskRetryingRequestTypedDict,
+)
+from .activitytaskstartedrequest import (
+ ActivityTaskStartedRequest,
+ ActivityTaskStartedRequestTypedDict,
+)
+from .customtaskcanceledrequest import (
+ CustomTaskCanceledRequest,
+ CustomTaskCanceledRequestTypedDict,
+)
+from .customtaskcompletedrequest import (
+ CustomTaskCompletedRequest,
+ CustomTaskCompletedRequestTypedDict,
+)
+from .customtaskfailedrequest import (
+ CustomTaskFailedRequest,
+ CustomTaskFailedRequestTypedDict,
+)
+from .customtaskinprogressrequest import (
+ CustomTaskInProgressRequest,
+ CustomTaskInProgressRequestTypedDict,
+)
+from .customtaskstartedrequest import (
+ CustomTaskStartedRequest,
+ CustomTaskStartedRequestTypedDict,
+)
+from .customtasktimedoutrequest import (
+ CustomTaskTimedOutRequest,
+ CustomTaskTimedOutRequestTypedDict,
+)
+from .workflowexecutioncanceledrequest import (
+ WorkflowExecutionCanceledRequest,
+ WorkflowExecutionCanceledRequestTypedDict,
+)
+from .workflowexecutioncompletedrequest import (
+ WorkflowExecutionCompletedRequest,
+ WorkflowExecutionCompletedRequestTypedDict,
+)
+from .workflowexecutioncontinuedasnewrequest import (
+ WorkflowExecutionContinuedAsNewRequest,
+ WorkflowExecutionContinuedAsNewRequestTypedDict,
+)
+from .workflowexecutionfailedrequest import (
+ WorkflowExecutionFailedRequest,
+ WorkflowExecutionFailedRequestTypedDict,
+)
+from .workflowexecutionstartedrequest import (
+ WorkflowExecutionStartedRequest,
+ WorkflowExecutionStartedRequestTypedDict,
+)
+from .workflowtaskfailedrequest import (
+ WorkflowTaskFailedRequest,
+ WorkflowTaskFailedRequestTypedDict,
+)
+from .workflowtasktimedoutrequest import (
+ WorkflowTaskTimedOutRequest,
+ WorkflowTaskTimedOutRequestTypedDict,
+)
+from mistralai.client.types import BaseModel
+from typing import List, Union
+from typing_extensions import TypeAliasType, TypedDict
+
+
+WorkflowEventBatchRequestEventTypedDict = TypeAliasType(
+ "WorkflowEventBatchRequestEventTypedDict",
+ Union[
+ WorkflowExecutionStartedRequestTypedDict,
+ WorkflowExecutionCompletedRequestTypedDict,
+ WorkflowExecutionFailedRequestTypedDict,
+ WorkflowExecutionCanceledRequestTypedDict,
+ WorkflowExecutionContinuedAsNewRequestTypedDict,
+ WorkflowTaskTimedOutRequestTypedDict,
+ WorkflowTaskFailedRequestTypedDict,
+ CustomTaskStartedRequestTypedDict,
+ CustomTaskInProgressRequestTypedDict,
+ CustomTaskCompletedRequestTypedDict,
+ CustomTaskFailedRequestTypedDict,
+ CustomTaskTimedOutRequestTypedDict,
+ CustomTaskCanceledRequestTypedDict,
+ ActivityTaskStartedRequestTypedDict,
+ ActivityTaskCompletedRequestTypedDict,
+ ActivityTaskRetryingRequestTypedDict,
+ ActivityTaskFailedRequestTypedDict,
+ ],
+)
+
+
+WorkflowEventBatchRequestEvent = TypeAliasType(
+ "WorkflowEventBatchRequestEvent",
+ Union[
+ WorkflowExecutionStartedRequest,
+ WorkflowExecutionCompletedRequest,
+ WorkflowExecutionFailedRequest,
+ WorkflowExecutionCanceledRequest,
+ WorkflowExecutionContinuedAsNewRequest,
+ WorkflowTaskTimedOutRequest,
+ WorkflowTaskFailedRequest,
+ CustomTaskStartedRequest,
+ CustomTaskInProgressRequest,
+ CustomTaskCompletedRequest,
+ CustomTaskFailedRequest,
+ CustomTaskTimedOutRequest,
+ CustomTaskCanceledRequest,
+ ActivityTaskStartedRequest,
+ ActivityTaskCompletedRequest,
+ ActivityTaskRetryingRequest,
+ ActivityTaskFailedRequest,
+ ],
+)
+
+
+class WorkflowEventBatchRequestTypedDict(TypedDict):
+ r"""Request model containing multiple workflow events."""
+
+ events: List[WorkflowEventBatchRequestEventTypedDict]
+ r"""List of workflow events to send."""
+
+
+class WorkflowEventBatchRequest(BaseModel):
+ r"""Request model containing multiple workflow events."""
+
+ events: List[WorkflowEventBatchRequestEvent]
+ r"""List of workflow events to send."""
diff --git a/src/mistralai/client/models/workfloweventbatchresponse.py b/src/mistralai/client/models/workfloweventbatchresponse.py
new file mode 100644
index 00000000..0aa842a3
--- /dev/null
+++ b/src/mistralai/client/models/workfloweventbatchresponse.py
@@ -0,0 +1,74 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 4cafe49944be
+
+from __future__ import annotations
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+ UnrecognizedStr,
+)
+from pydantic import model_serializer
+from typing import Literal, Union
+from typing_extensions import NotRequired, TypedDict
+
+
+WorkflowEventBatchResponseStatus = Union[
+ Literal[
+ "success",
+ "error",
+ ],
+ UnrecognizedStr,
+]
+r"""Status of the batch event reception"""
+
+
+class WorkflowEventBatchResponseTypedDict(TypedDict):
+ r"""Response model for batch workflow event reception."""
+
+ status: WorkflowEventBatchResponseStatus
+ r"""Status of the batch event reception"""
+ events_received: int
+ r"""Number of events successfully received"""
+ message: NotRequired[Nullable[str]]
+ r"""Optional message"""
+
+
+class WorkflowEventBatchResponse(BaseModel):
+ r"""Response model for batch workflow event reception."""
+
+ status: WorkflowEventBatchResponseStatus
+ r"""Status of the batch event reception"""
+
+ events_received: int
+ r"""Number of events successfully received"""
+
+ message: OptionalNullable[str] = UNSET
+ r"""Optional message"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["message"])
+ nullable_fields = set(["message"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/workfloweventrequest.py b/src/mistralai/client/models/workfloweventrequest.py
new file mode 100644
index 00000000..0a2c7579
--- /dev/null
+++ b/src/mistralai/client/models/workfloweventrequest.py
@@ -0,0 +1,139 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: f41edbb269a4
+
+from __future__ import annotations
+from .activitytaskcompletedrequest import (
+ ActivityTaskCompletedRequest,
+ ActivityTaskCompletedRequestTypedDict,
+)
+from .activitytaskfailedrequest import (
+ ActivityTaskFailedRequest,
+ ActivityTaskFailedRequestTypedDict,
+)
+from .activitytaskretryingrequest import (
+ ActivityTaskRetryingRequest,
+ ActivityTaskRetryingRequestTypedDict,
+)
+from .activitytaskstartedrequest import (
+ ActivityTaskStartedRequest,
+ ActivityTaskStartedRequestTypedDict,
+)
+from .customtaskcanceledrequest import (
+ CustomTaskCanceledRequest,
+ CustomTaskCanceledRequestTypedDict,
+)
+from .customtaskcompletedrequest import (
+ CustomTaskCompletedRequest,
+ CustomTaskCompletedRequestTypedDict,
+)
+from .customtaskfailedrequest import (
+ CustomTaskFailedRequest,
+ CustomTaskFailedRequestTypedDict,
+)
+from .customtaskinprogressrequest import (
+ CustomTaskInProgressRequest,
+ CustomTaskInProgressRequestTypedDict,
+)
+from .customtaskstartedrequest import (
+ CustomTaskStartedRequest,
+ CustomTaskStartedRequestTypedDict,
+)
+from .customtasktimedoutrequest import (
+ CustomTaskTimedOutRequest,
+ CustomTaskTimedOutRequestTypedDict,
+)
+from .workflowexecutioncanceledrequest import (
+ WorkflowExecutionCanceledRequest,
+ WorkflowExecutionCanceledRequestTypedDict,
+)
+from .workflowexecutioncompletedrequest import (
+ WorkflowExecutionCompletedRequest,
+ WorkflowExecutionCompletedRequestTypedDict,
+)
+from .workflowexecutioncontinuedasnewrequest import (
+ WorkflowExecutionContinuedAsNewRequest,
+ WorkflowExecutionContinuedAsNewRequestTypedDict,
+)
+from .workflowexecutionfailedrequest import (
+ WorkflowExecutionFailedRequest,
+ WorkflowExecutionFailedRequestTypedDict,
+)
+from .workflowexecutionstartedrequest import (
+ WorkflowExecutionStartedRequest,
+ WorkflowExecutionStartedRequestTypedDict,
+)
+from .workflowtaskfailedrequest import (
+ WorkflowTaskFailedRequest,
+ WorkflowTaskFailedRequestTypedDict,
+)
+from .workflowtasktimedoutrequest import (
+ WorkflowTaskTimedOutRequest,
+ WorkflowTaskTimedOutRequestTypedDict,
+)
+from mistralai.client.types import BaseModel
+from typing import Union
+from typing_extensions import TypeAliasType, TypedDict
+
+
+WorkflowEventRequestEventTypedDict = TypeAliasType(
+ "WorkflowEventRequestEventTypedDict",
+ Union[
+ WorkflowExecutionStartedRequestTypedDict,
+ WorkflowExecutionCompletedRequestTypedDict,
+ WorkflowExecutionFailedRequestTypedDict,
+ WorkflowExecutionCanceledRequestTypedDict,
+ WorkflowExecutionContinuedAsNewRequestTypedDict,
+ WorkflowTaskTimedOutRequestTypedDict,
+ WorkflowTaskFailedRequestTypedDict,
+ CustomTaskStartedRequestTypedDict,
+ CustomTaskInProgressRequestTypedDict,
+ CustomTaskCompletedRequestTypedDict,
+ CustomTaskFailedRequestTypedDict,
+ CustomTaskTimedOutRequestTypedDict,
+ CustomTaskCanceledRequestTypedDict,
+ ActivityTaskStartedRequestTypedDict,
+ ActivityTaskCompletedRequestTypedDict,
+ ActivityTaskRetryingRequestTypedDict,
+ ActivityTaskFailedRequestTypedDict,
+ ],
+)
+r"""The workflow event payload."""
+
+
+WorkflowEventRequestEvent = TypeAliasType(
+ "WorkflowEventRequestEvent",
+ Union[
+ WorkflowExecutionStartedRequest,
+ WorkflowExecutionCompletedRequest,
+ WorkflowExecutionFailedRequest,
+ WorkflowExecutionCanceledRequest,
+ WorkflowExecutionContinuedAsNewRequest,
+ WorkflowTaskTimedOutRequest,
+ WorkflowTaskFailedRequest,
+ CustomTaskStartedRequest,
+ CustomTaskInProgressRequest,
+ CustomTaskCompletedRequest,
+ CustomTaskFailedRequest,
+ CustomTaskTimedOutRequest,
+ CustomTaskCanceledRequest,
+ ActivityTaskStartedRequest,
+ ActivityTaskCompletedRequest,
+ ActivityTaskRetryingRequest,
+ ActivityTaskFailedRequest,
+ ],
+)
+r"""The workflow event payload."""
+
+
+class WorkflowEventRequestTypedDict(TypedDict):
+ r"""Request model containing a workflow event."""
+
+ event: WorkflowEventRequestEventTypedDict
+ r"""The workflow event payload."""
+
+
+class WorkflowEventRequest(BaseModel):
+ r"""Request model containing a workflow event."""
+
+ event: WorkflowEventRequestEvent
+ r"""The workflow event payload."""
diff --git a/src/mistralai/client/models/workfloweventresponse.py b/src/mistralai/client/models/workfloweventresponse.py
new file mode 100644
index 00000000..4649da80
--- /dev/null
+++ b/src/mistralai/client/models/workfloweventresponse.py
@@ -0,0 +1,69 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: e1a984989f65
+
+from __future__ import annotations
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+ UnrecognizedStr,
+)
+from pydantic import model_serializer
+from typing import Literal, Union
+from typing_extensions import NotRequired, TypedDict
+
+
+WorkflowEventResponseStatus = Union[
+ Literal[
+ "success",
+ "error",
+ ],
+ UnrecognizedStr,
+]
+r"""Status of the event reception"""
+
+
+class WorkflowEventResponseTypedDict(TypedDict):
+ r"""Response model for workflow event reception."""
+
+ status: WorkflowEventResponseStatus
+ r"""Status of the event reception"""
+ message: NotRequired[Nullable[str]]
+ r"""Optional message"""
+
+
+class WorkflowEventResponse(BaseModel):
+ r"""Response model for workflow event reception."""
+
+ status: WorkflowEventResponseStatus
+ r"""Status of the event reception"""
+
+ message: OptionalNullable[str] = UNSET
+ r"""Optional message"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["message"])
+ nullable_fields = set(["message"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/workfloweventtype.py b/src/mistralai/client/models/workfloweventtype.py
new file mode 100644
index 00000000..8c386b01
--- /dev/null
+++ b/src/mistralai/client/models/workfloweventtype.py
@@ -0,0 +1,26 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: b4aeeb03b57a
+
+from __future__ import annotations
+from typing import Literal
+
+
+WorkflowEventType = Literal[
+ "WORKFLOW_EXECUTION_STARTED",
+ "WORKFLOW_EXECUTION_COMPLETED",
+ "WORKFLOW_EXECUTION_FAILED",
+ "WORKFLOW_EXECUTION_CANCELED",
+ "WORKFLOW_EXECUTION_CONTINUED_AS_NEW",
+ "WORKFLOW_TASK_TIMED_OUT",
+ "WORKFLOW_TASK_FAILED",
+ "CUSTOM_TASK_STARTED",
+ "CUSTOM_TASK_IN_PROGRESS",
+ "CUSTOM_TASK_COMPLETED",
+ "CUSTOM_TASK_FAILED",
+ "CUSTOM_TASK_TIMED_OUT",
+ "CUSTOM_TASK_CANCELED",
+ "ACTIVITY_TASK_STARTED",
+ "ACTIVITY_TASK_COMPLETED",
+ "ACTIVITY_TASK_RETRYING",
+ "ACTIVITY_TASK_FAILED",
+]
diff --git a/src/mistralai/client/models/workflowexecutioncanceledattributes.py b/src/mistralai/client/models/workflowexecutioncanceledattributes.py
new file mode 100644
index 00000000..6c06bfa9
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutioncanceledattributes.py
@@ -0,0 +1,57 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: c0802a5de5e5
+
+from __future__ import annotations
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing_extensions import NotRequired, TypedDict
+
+
+class WorkflowExecutionCanceledAttributesTypedDict(TypedDict):
+ r"""Attributes for workflow execution canceled events."""
+
+ task_id: str
+ r"""Unique identifier for the task within the workflow execution."""
+ reason: NotRequired[Nullable[str]]
+ r"""Optional reason provided for the cancellation."""
+
+
+class WorkflowExecutionCanceledAttributes(BaseModel):
+ r"""Attributes for workflow execution canceled events."""
+
+ task_id: str
+ r"""Unique identifier for the task within the workflow execution."""
+
+ reason: OptionalNullable[str] = UNSET
+ r"""Optional reason provided for the cancellation."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["reason"])
+ nullable_fields = set(["reason"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/workflowexecutioncanceledrequest.py b/src/mistralai/client/models/workflowexecutioncanceledrequest.py
new file mode 100644
index 00000000..09873446
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutioncanceledrequest.py
@@ -0,0 +1,120 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 7bdfdeddd5c4
+
+from __future__ import annotations
+from .workflowexecutioncanceledattributes import (
+ WorkflowExecutionCanceledAttributes,
+ WorkflowExecutionCanceledAttributesTypedDict,
+)
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class WorkflowExecutionCanceledRequestTypedDict(TypedDict):
+ r"""Emitted when a workflow execution is canceled.
+
+ This is a terminal event indicating the workflow was explicitly canceled.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: WorkflowExecutionCanceledAttributesTypedDict
+ r"""Attributes for workflow execution canceled events."""
+ event_timestamp: NotRequired[int]
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ parent_workflow_exec_id: NotRequired[Nullable[str]]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ event_type: Literal["WORKFLOW_EXECUTION_CANCELED"]
+ r"""Event type discriminator."""
+
+
+class WorkflowExecutionCanceledRequest(BaseModel):
+ r"""Emitted when a workflow execution is canceled.
+
+ This is a terminal event indicating the workflow was explicitly canceled.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: WorkflowExecutionCanceledAttributes
+ r"""Attributes for workflow execution canceled events."""
+
+ event_timestamp: Optional[int] = None
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ parent_workflow_exec_id: OptionalNullable[str] = UNSET
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["WORKFLOW_EXECUTION_CANCELED"]],
+ AfterValidator(validate_const("WORKFLOW_EXECUTION_CANCELED")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "WORKFLOW_EXECUTION_CANCELED"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ ["event_timestamp", "parent_workflow_exec_id", "event_type"]
+ )
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ WorkflowExecutionCanceledRequest.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/workflowexecutioncanceledresponse.py b/src/mistralai/client/models/workflowexecutioncanceledresponse.py
new file mode 100644
index 00000000..ee1c4e71
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutioncanceledresponse.py
@@ -0,0 +1,112 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: bcc392d67222
+
+from __future__ import annotations
+from .workflowexecutioncanceledattributes import (
+ WorkflowExecutionCanceledAttributes,
+ WorkflowExecutionCanceledAttributesTypedDict,
+)
+from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, TypedDict
+
+
+class WorkflowExecutionCanceledResponseTypedDict(TypedDict):
+ r"""Emitted when a workflow execution is canceled.
+
+ This is a terminal event indicating the workflow was explicitly canceled.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: WorkflowExecutionCanceledAttributesTypedDict
+ r"""Attributes for workflow execution canceled events."""
+ event_type: Literal["WORKFLOW_EXECUTION_CANCELED"]
+ r"""Event type discriminator."""
+
+
+class WorkflowExecutionCanceledResponse(BaseModel):
+ r"""Emitted when a workflow execution is canceled.
+
+ This is a terminal event indicating the workflow was explicitly canceled.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: WorkflowExecutionCanceledAttributes
+ r"""Attributes for workflow execution canceled events."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["WORKFLOW_EXECUTION_CANCELED"]],
+ AfterValidator(validate_const("WORKFLOW_EXECUTION_CANCELED")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "WORKFLOW_EXECUTION_CANCELED"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["event_type"])
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ WorkflowExecutionCanceledResponse.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/workflowexecutioncompletedattributesrequest.py b/src/mistralai/client/models/workflowexecutioncompletedattributesrequest.py
new file mode 100644
index 00000000..fb48a0fc
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutioncompletedattributesrequest.py
@@ -0,0 +1,32 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 9d69c2f471c1
+
+from __future__ import annotations
+from .jsonpayloadrequest import JSONPayloadRequest, JSONPayloadRequestTypedDict
+from mistralai.client.types import BaseModel
+from typing_extensions import TypedDict
+
+
+class WorkflowExecutionCompletedAttributesRequestTypedDict(TypedDict):
+ r"""Attributes for workflow execution completed events."""
+
+ task_id: str
+ r"""Unique identifier for the task within the workflow execution."""
+ result: JSONPayloadRequestTypedDict
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
+
+
+class WorkflowExecutionCompletedAttributesRequest(BaseModel):
+ r"""Attributes for workflow execution completed events."""
+
+ task_id: str
+ r"""Unique identifier for the task within the workflow execution."""
+
+ result: JSONPayloadRequest
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
diff --git a/src/mistralai/client/models/workflowexecutioncompletedattributesresponse.py b/src/mistralai/client/models/workflowexecutioncompletedattributesresponse.py
new file mode 100644
index 00000000..3afc8dcf
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutioncompletedattributesresponse.py
@@ -0,0 +1,32 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: f831331b0eb1
+
+from __future__ import annotations
+from .jsonpayloadresponse import JSONPayloadResponse, JSONPayloadResponseTypedDict
+from mistralai.client.types import BaseModel
+from typing_extensions import TypedDict
+
+
+class WorkflowExecutionCompletedAttributesResponseTypedDict(TypedDict):
+ r"""Attributes for workflow execution completed events."""
+
+ task_id: str
+ r"""Unique identifier for the task within the workflow execution."""
+ result: JSONPayloadResponseTypedDict
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
+
+
+class WorkflowExecutionCompletedAttributesResponse(BaseModel):
+ r"""Attributes for workflow execution completed events."""
+
+ task_id: str
+ r"""Unique identifier for the task within the workflow execution."""
+
+ result: JSONPayloadResponse
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
diff --git a/src/mistralai/client/models/workflowexecutioncompletedrequest.py b/src/mistralai/client/models/workflowexecutioncompletedrequest.py
new file mode 100644
index 00000000..81bde0f1
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutioncompletedrequest.py
@@ -0,0 +1,120 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 54326e5805ed
+
+from __future__ import annotations
+from .workflowexecutioncompletedattributesrequest import (
+ WorkflowExecutionCompletedAttributesRequest,
+ WorkflowExecutionCompletedAttributesRequestTypedDict,
+)
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class WorkflowExecutionCompletedRequestTypedDict(TypedDict):
+ r"""Emitted when a workflow execution completes successfully.
+
+ This is a terminal event indicating the workflow finished without errors.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: WorkflowExecutionCompletedAttributesRequestTypedDict
+ r"""Attributes for workflow execution completed events."""
+ event_timestamp: NotRequired[int]
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ parent_workflow_exec_id: NotRequired[Nullable[str]]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ event_type: Literal["WORKFLOW_EXECUTION_COMPLETED"]
+ r"""Event type discriminator."""
+
+
+class WorkflowExecutionCompletedRequest(BaseModel):
+ r"""Emitted when a workflow execution completes successfully.
+
+ This is a terminal event indicating the workflow finished without errors.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: WorkflowExecutionCompletedAttributesRequest
+ r"""Attributes for workflow execution completed events."""
+
+ event_timestamp: Optional[int] = None
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ parent_workflow_exec_id: OptionalNullable[str] = UNSET
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["WORKFLOW_EXECUTION_COMPLETED"]],
+ AfterValidator(validate_const("WORKFLOW_EXECUTION_COMPLETED")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "WORKFLOW_EXECUTION_COMPLETED"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ ["event_timestamp", "parent_workflow_exec_id", "event_type"]
+ )
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ WorkflowExecutionCompletedRequest.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/workflowexecutioncompletedresponse.py b/src/mistralai/client/models/workflowexecutioncompletedresponse.py
new file mode 100644
index 00000000..17716668
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutioncompletedresponse.py
@@ -0,0 +1,112 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: a1bec20dfb0e
+
+from __future__ import annotations
+from .workflowexecutioncompletedattributesresponse import (
+ WorkflowExecutionCompletedAttributesResponse,
+ WorkflowExecutionCompletedAttributesResponseTypedDict,
+)
+from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, TypedDict
+
+
+class WorkflowExecutionCompletedResponseTypedDict(TypedDict):
+ r"""Emitted when a workflow execution completes successfully.
+
+ This is a terminal event indicating the workflow finished without errors.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: WorkflowExecutionCompletedAttributesResponseTypedDict
+ r"""Attributes for workflow execution completed events."""
+ event_type: Literal["WORKFLOW_EXECUTION_COMPLETED"]
+ r"""Event type discriminator."""
+
+
+class WorkflowExecutionCompletedResponse(BaseModel):
+ r"""Emitted when a workflow execution completes successfully.
+
+ This is a terminal event indicating the workflow finished without errors.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: WorkflowExecutionCompletedAttributesResponse
+ r"""Attributes for workflow execution completed events."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["WORKFLOW_EXECUTION_COMPLETED"]],
+ AfterValidator(validate_const("WORKFLOW_EXECUTION_COMPLETED")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "WORKFLOW_EXECUTION_COMPLETED"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["event_type"])
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ WorkflowExecutionCompletedResponse.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/workflowexecutioncontinuedasnewattributesrequest.py b/src/mistralai/client/models/workflowexecutioncontinuedasnewattributesrequest.py
new file mode 100644
index 00000000..1aba37ae
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutioncontinuedasnewattributesrequest.py
@@ -0,0 +1,42 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 8517ed95b5c7
+
+from __future__ import annotations
+from .jsonpayloadrequest import JSONPayloadRequest, JSONPayloadRequestTypedDict
+from mistralai.client.types import BaseModel
+from typing_extensions import TypedDict
+
+
+class WorkflowExecutionContinuedAsNewAttributesRequestTypedDict(TypedDict):
+ r"""Attributes for workflow execution continued-as-new events."""
+
+ task_id: str
+ r"""Unique identifier for the task within the workflow execution."""
+ new_execution_run_id: str
+ r"""The run ID of the new workflow execution that continues this workflow."""
+ workflow_name: str
+ r"""The registered name of the continued workflow."""
+ input: JSONPayloadRequestTypedDict
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
+
+
+class WorkflowExecutionContinuedAsNewAttributesRequest(BaseModel):
+ r"""Attributes for workflow execution continued-as-new events."""
+
+ task_id: str
+ r"""Unique identifier for the task within the workflow execution."""
+
+ new_execution_run_id: str
+ r"""The run ID of the new workflow execution that continues this workflow."""
+
+ workflow_name: str
+ r"""The registered name of the continued workflow."""
+
+ input: JSONPayloadRequest
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
diff --git a/src/mistralai/client/models/workflowexecutioncontinuedasnewattributesresponse.py b/src/mistralai/client/models/workflowexecutioncontinuedasnewattributesresponse.py
new file mode 100644
index 00000000..943e5ebc
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutioncontinuedasnewattributesresponse.py
@@ -0,0 +1,42 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 2dd61dcd7b48
+
+from __future__ import annotations
+from .jsonpayloadresponse import JSONPayloadResponse, JSONPayloadResponseTypedDict
+from mistralai.client.types import BaseModel
+from typing_extensions import TypedDict
+
+
+class WorkflowExecutionContinuedAsNewAttributesResponseTypedDict(TypedDict):
+ r"""Attributes for workflow execution continued-as-new events."""
+
+ task_id: str
+ r"""Unique identifier for the task within the workflow execution."""
+ new_execution_run_id: str
+ r"""The run ID of the new workflow execution that continues this workflow."""
+ workflow_name: str
+ r"""The registered name of the continued workflow."""
+ input: JSONPayloadResponseTypedDict
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
+
+
+class WorkflowExecutionContinuedAsNewAttributesResponse(BaseModel):
+ r"""Attributes for workflow execution continued-as-new events."""
+
+ task_id: str
+ r"""Unique identifier for the task within the workflow execution."""
+
+ new_execution_run_id: str
+ r"""The run ID of the new workflow execution that continues this workflow."""
+
+ workflow_name: str
+ r"""The registered name of the continued workflow."""
+
+ input: JSONPayloadResponse
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
diff --git a/src/mistralai/client/models/workflowexecutioncontinuedasnewrequest.py b/src/mistralai/client/models/workflowexecutioncontinuedasnewrequest.py
new file mode 100644
index 00000000..f8c94604
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutioncontinuedasnewrequest.py
@@ -0,0 +1,122 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: d1057d583b8c
+
+from __future__ import annotations
+from .workflowexecutioncontinuedasnewattributesrequest import (
+ WorkflowExecutionContinuedAsNewAttributesRequest,
+ WorkflowExecutionContinuedAsNewAttributesRequestTypedDict,
+)
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class WorkflowExecutionContinuedAsNewRequestTypedDict(TypedDict):
+ r"""Emitted when a workflow continues as a new execution.
+
+ This occurs when a workflow uses continue-as-new to reset its history
+ while maintaining logical continuity.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: WorkflowExecutionContinuedAsNewAttributesRequestTypedDict
+ r"""Attributes for workflow execution continued-as-new events."""
+ event_timestamp: NotRequired[int]
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ parent_workflow_exec_id: NotRequired[Nullable[str]]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ event_type: Literal["WORKFLOW_EXECUTION_CONTINUED_AS_NEW"]
+ r"""Event type discriminator."""
+
+
+class WorkflowExecutionContinuedAsNewRequest(BaseModel):
+ r"""Emitted when a workflow continues as a new execution.
+
+ This occurs when a workflow uses continue-as-new to reset its history
+ while maintaining logical continuity.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: WorkflowExecutionContinuedAsNewAttributesRequest
+ r"""Attributes for workflow execution continued-as-new events."""
+
+ event_timestamp: Optional[int] = None
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ parent_workflow_exec_id: OptionalNullable[str] = UNSET
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["WORKFLOW_EXECUTION_CONTINUED_AS_NEW"]],
+ AfterValidator(validate_const("WORKFLOW_EXECUTION_CONTINUED_AS_NEW")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "WORKFLOW_EXECUTION_CONTINUED_AS_NEW"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ ["event_timestamp", "parent_workflow_exec_id", "event_type"]
+ )
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ WorkflowExecutionContinuedAsNewRequest.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/workflowexecutioncontinuedasnewresponse.py b/src/mistralai/client/models/workflowexecutioncontinuedasnewresponse.py
new file mode 100644
index 00000000..0f60a5be
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutioncontinuedasnewresponse.py
@@ -0,0 +1,114 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: be26cd87dcb3
+
+from __future__ import annotations
+from .workflowexecutioncontinuedasnewattributesresponse import (
+ WorkflowExecutionContinuedAsNewAttributesResponse,
+ WorkflowExecutionContinuedAsNewAttributesResponseTypedDict,
+)
+from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, TypedDict
+
+
+class WorkflowExecutionContinuedAsNewResponseTypedDict(TypedDict):
+ r"""Emitted when a workflow continues as a new execution.
+
+ This occurs when a workflow uses continue-as-new to reset its history
+ while maintaining logical continuity.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: WorkflowExecutionContinuedAsNewAttributesResponseTypedDict
+ r"""Attributes for workflow execution continued-as-new events."""
+ event_type: Literal["WORKFLOW_EXECUTION_CONTINUED_AS_NEW"]
+ r"""Event type discriminator."""
+
+
+class WorkflowExecutionContinuedAsNewResponse(BaseModel):
+ r"""Emitted when a workflow continues as a new execution.
+
+ This occurs when a workflow uses continue-as-new to reset its history
+ while maintaining logical continuity.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: WorkflowExecutionContinuedAsNewAttributesResponse
+ r"""Attributes for workflow execution continued-as-new events."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["WORKFLOW_EXECUTION_CONTINUED_AS_NEW"]],
+ AfterValidator(validate_const("WORKFLOW_EXECUTION_CONTINUED_AS_NEW")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "WORKFLOW_EXECUTION_CONTINUED_AS_NEW"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["event_type"])
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ WorkflowExecutionContinuedAsNewResponse.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/workflowexecutionfailedattributes.py b/src/mistralai/client/models/workflowexecutionfailedattributes.py
new file mode 100644
index 00000000..1e61f344
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutionfailedattributes.py
@@ -0,0 +1,26 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: dbb7fb36a4fd
+
+from __future__ import annotations
+from .failure import Failure, FailureTypedDict
+from mistralai.client.types import BaseModel
+from typing_extensions import TypedDict
+
+
+class WorkflowExecutionFailedAttributesTypedDict(TypedDict):
+ r"""Attributes for workflow execution failed events."""
+
+ task_id: str
+ r"""Unique identifier for the task within the workflow execution."""
+ failure: FailureTypedDict
+ r"""Represents an error or exception that occurred during execution."""
+
+
+class WorkflowExecutionFailedAttributes(BaseModel):
+ r"""Attributes for workflow execution failed events."""
+
+ task_id: str
+ r"""Unique identifier for the task within the workflow execution."""
+
+ failure: Failure
+ r"""Represents an error or exception that occurred during execution."""
diff --git a/src/mistralai/client/models/workflowexecutionfailedrequest.py b/src/mistralai/client/models/workflowexecutionfailedrequest.py
new file mode 100644
index 00000000..5c4e445a
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutionfailedrequest.py
@@ -0,0 +1,120 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 873155c8e314
+
+from __future__ import annotations
+from .workflowexecutionfailedattributes import (
+ WorkflowExecutionFailedAttributes,
+ WorkflowExecutionFailedAttributesTypedDict,
+)
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class WorkflowExecutionFailedRequestTypedDict(TypedDict):
+ r"""Emitted when a workflow execution fails due to an unhandled exception.
+
+ This is a terminal event indicating the workflow ended with an error.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: WorkflowExecutionFailedAttributesTypedDict
+ r"""Attributes for workflow execution failed events."""
+ event_timestamp: NotRequired[int]
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ parent_workflow_exec_id: NotRequired[Nullable[str]]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ event_type: Literal["WORKFLOW_EXECUTION_FAILED"]
+ r"""Event type discriminator."""
+
+
+class WorkflowExecutionFailedRequest(BaseModel):
+ r"""Emitted when a workflow execution fails due to an unhandled exception.
+
+ This is a terminal event indicating the workflow ended with an error.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: WorkflowExecutionFailedAttributes
+ r"""Attributes for workflow execution failed events."""
+
+ event_timestamp: Optional[int] = None
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ parent_workflow_exec_id: OptionalNullable[str] = UNSET
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["WORKFLOW_EXECUTION_FAILED"]],
+ AfterValidator(validate_const("WORKFLOW_EXECUTION_FAILED")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "WORKFLOW_EXECUTION_FAILED"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ ["event_timestamp", "parent_workflow_exec_id", "event_type"]
+ )
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ WorkflowExecutionFailedRequest.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/workflowexecutionfailedresponse.py b/src/mistralai/client/models/workflowexecutionfailedresponse.py
new file mode 100644
index 00000000..687d33a9
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutionfailedresponse.py
@@ -0,0 +1,112 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: a0836009f9de
+
+from __future__ import annotations
+from .workflowexecutionfailedattributes import (
+ WorkflowExecutionFailedAttributes,
+ WorkflowExecutionFailedAttributesTypedDict,
+)
+from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, TypedDict
+
+
+class WorkflowExecutionFailedResponseTypedDict(TypedDict):
+ r"""Emitted when a workflow execution fails due to an unhandled exception.
+
+ This is a terminal event indicating the workflow ended with an error.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: WorkflowExecutionFailedAttributesTypedDict
+ r"""Attributes for workflow execution failed events."""
+ event_type: Literal["WORKFLOW_EXECUTION_FAILED"]
+ r"""Event type discriminator."""
+
+
+class WorkflowExecutionFailedResponse(BaseModel):
+ r"""Emitted when a workflow execution fails due to an unhandled exception.
+
+ This is a terminal event indicating the workflow ended with an error.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: WorkflowExecutionFailedAttributes
+ r"""Attributes for workflow execution failed events."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["WORKFLOW_EXECUTION_FAILED"]],
+ AfterValidator(validate_const("WORKFLOW_EXECUTION_FAILED")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "WORKFLOW_EXECUTION_FAILED"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["event_type"])
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ WorkflowExecutionFailedResponse.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/workflowexecutionlistresponse.py b/src/mistralai/client/models/workflowexecutionlistresponse.py
new file mode 100644
index 00000000..442ed972
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutionlistresponse.py
@@ -0,0 +1,58 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: c2b2e1ab4821
+
+from __future__ import annotations
+from .workflowexecutionwithoutresultresponse import (
+ WorkflowExecutionWithoutResultResponse,
+ WorkflowExecutionWithoutResultResponseTypedDict,
+)
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing import List
+from typing_extensions import NotRequired, TypedDict
+
+
+class WorkflowExecutionListResponseTypedDict(TypedDict):
+ executions: List[WorkflowExecutionWithoutResultResponseTypedDict]
+ r"""A list of workflow executions"""
+ next_page_token: NotRequired[Nullable[str]]
+ r"""Token to use for fetching the next page of results. Null if this is the last page."""
+
+
+class WorkflowExecutionListResponse(BaseModel):
+ executions: List[WorkflowExecutionWithoutResultResponse]
+ r"""A list of workflow executions"""
+
+ next_page_token: OptionalNullable[str] = UNSET
+ r"""Token to use for fetching the next page of results. Null if this is the last page."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["next_page_token"])
+ nullable_fields = set(["next_page_token"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/workflowexecutionprogresstraceevent.py b/src/mistralai/client/models/workflowexecutionprogresstraceevent.py
new file mode 100644
index 00000000..ffebdb82
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutionprogresstraceevent.py
@@ -0,0 +1,100 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 6ea7078aaca3
+
+from __future__ import annotations
+from .eventprogressstatus import EventProgressStatus
+from .eventtype import EventType
+from .workflowexecutiontracesummaryattributesvalues import (
+ WorkflowExecutionTraceSummaryAttributesValues,
+ WorkflowExecutionTraceSummaryAttributesValuesTypedDict,
+)
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing import Dict, Optional
+from typing_extensions import NotRequired, TypedDict
+
+
+class WorkflowExecutionProgressTraceEventTypedDict(TypedDict):
+ name: str
+ r"""Name of the event"""
+ id: str
+ r"""The ID of the event"""
+ timestamp_unix_nano: int
+ r"""The timestamp of the event in nanoseconds since the Unix epoch"""
+ attributes: Dict[
+ str, Nullable[WorkflowExecutionTraceSummaryAttributesValuesTypedDict]
+ ]
+ r"""The attributes of the event"""
+ start_time_unix_ms: int
+ r"""The start time of the event in milliseconds since the Unix epoch"""
+ type: NotRequired[EventType]
+ internal: NotRequired[bool]
+ r"""Whether the event is internal"""
+ status: NotRequired[EventProgressStatus]
+ end_time_unix_ms: NotRequired[Nullable[int]]
+ r"""The end time of the event in milliseconds since the Unix epoch"""
+ error: NotRequired[Nullable[str]]
+ r"""The error message, if any"""
+
+
+class WorkflowExecutionProgressTraceEvent(BaseModel):
+ name: str
+ r"""Name of the event"""
+
+ id: str
+ r"""The ID of the event"""
+
+ timestamp_unix_nano: int
+ r"""The timestamp of the event in nanoseconds since the Unix epoch"""
+
+ attributes: Dict[str, Nullable[WorkflowExecutionTraceSummaryAttributesValues]]
+ r"""The attributes of the event"""
+
+ start_time_unix_ms: int
+ r"""The start time of the event in milliseconds since the Unix epoch"""
+
+ type: Optional[EventType] = None
+
+ internal: Optional[bool] = False
+ r"""Whether the event is internal"""
+
+ status: Optional[EventProgressStatus] = None
+
+ end_time_unix_ms: OptionalNullable[int] = UNSET
+ r"""The end time of the event in milliseconds since the Unix epoch"""
+
+ error: OptionalNullable[str] = UNSET
+ r"""The error message, if any"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ ["type", "internal", "status", "end_time_unix_ms", "error"]
+ )
+ nullable_fields = set(["end_time_unix_ms", "error"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/workflowexecutionrequest.py b/src/mistralai/client/models/workflowexecutionrequest.py
new file mode 100644
index 00000000..bf6a5fa0
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutionrequest.py
@@ -0,0 +1,110 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 806340497ed4
+
+from __future__ import annotations
+from .networkencodedinput import NetworkEncodedInput, NetworkEncodedInputTypedDict
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+import pydantic
+from pydantic import model_serializer
+from typing import Any, Dict, Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class WorkflowExecutionRequestTypedDict(TypedDict):
+ execution_id: NotRequired[Nullable[str]]
+ r"""Allows you to specify a custom execution ID. If not provided, a random ID will be generated."""
+ input: NotRequired[Nullable[Dict[str, Any]]]
+ r"""The input to the workflow. This should be a dictionary that matches the workflow's input schema."""
+ encoded_input: NotRequired[Nullable[NetworkEncodedInputTypedDict]]
+ r"""Encoded input to the workflow, used when payload encoding is enabled."""
+ wait_for_result: NotRequired[bool]
+ r"""If true, wait for the workflow to complete and return the result directly."""
+ timeout_seconds: NotRequired[Nullable[float]]
+ r"""Maximum time to wait for completion when wait_for_result is true."""
+ custom_tracing_attributes: NotRequired[Nullable[Dict[str, str]]]
+ task_queue: NotRequired[Nullable[str]]
+ r"""Deprecated. Use deployment_name instead."""
+ deployment_name: NotRequired[Nullable[str]]
+ r"""Name of the deployment to route this execution to"""
+
+
+class WorkflowExecutionRequest(BaseModel):
+ execution_id: OptionalNullable[str] = UNSET
+ r"""Allows you to specify a custom execution ID. If not provided, a random ID will be generated."""
+
+ input: OptionalNullable[Dict[str, Any]] = UNSET
+ r"""The input to the workflow. This should be a dictionary that matches the workflow's input schema."""
+
+ encoded_input: OptionalNullable[NetworkEncodedInput] = UNSET
+ r"""Encoded input to the workflow, used when payload encoding is enabled."""
+
+ wait_for_result: Optional[bool] = False
+ r"""If true, wait for the workflow to complete and return the result directly."""
+
+ timeout_seconds: OptionalNullable[float] = UNSET
+ r"""Maximum time to wait for completion when wait_for_result is true."""
+
+ custom_tracing_attributes: OptionalNullable[Dict[str, str]] = UNSET
+
+ task_queue: Annotated[
+ OptionalNullable[str],
+ pydantic.Field(
+ deprecated="warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
+ ),
+ ] = UNSET
+ r"""Deprecated. Use deployment_name instead."""
+
+ deployment_name: OptionalNullable[str] = UNSET
+ r"""Name of the deployment to route this execution to"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ [
+ "execution_id",
+ "input",
+ "encoded_input",
+ "wait_for_result",
+ "timeout_seconds",
+ "custom_tracing_attributes",
+ "task_queue",
+ "deployment_name",
+ ]
+ )
+ nullable_fields = set(
+ [
+ "execution_id",
+ "input",
+ "encoded_input",
+ "timeout_seconds",
+ "custom_tracing_attributes",
+ "task_queue",
+ "deployment_name",
+ ]
+ )
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/workflowexecutionresponse.py b/src/mistralai/client/models/workflowexecutionresponse.py
new file mode 100644
index 00000000..84398375
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutionresponse.py
@@ -0,0 +1,93 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 758786637be5
+
+from __future__ import annotations
+from .workflowexecutionstatus import WorkflowExecutionStatus
+from datetime import datetime
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing import Any
+from typing_extensions import NotRequired, TypedDict
+
+
+class WorkflowExecutionResponseTypedDict(TypedDict):
+ workflow_name: str
+ r"""The name of the workflow"""
+ execution_id: str
+ r"""The ID of the workflow execution"""
+ root_execution_id: str
+ r"""The root execution ID of the workflow execution"""
+ status: Nullable[WorkflowExecutionStatus]
+ r"""The status of the workflow execution"""
+ start_time: datetime
+ r"""The start time of the workflow execution"""
+ end_time: Nullable[datetime]
+ r"""The end time of the workflow execution, if available"""
+ result: Nullable[Any]
+ r"""The result of the workflow execution, if available"""
+ parent_execution_id: NotRequired[Nullable[str]]
+ r"""The parent execution ID of the workflow execution"""
+ total_duration_ms: NotRequired[Nullable[int]]
+ r"""The total duration of the trace in milliseconds"""
+
+
+class WorkflowExecutionResponse(BaseModel):
+ workflow_name: str
+ r"""The name of the workflow"""
+
+ execution_id: str
+ r"""The ID of the workflow execution"""
+
+ root_execution_id: str
+ r"""The root execution ID of the workflow execution"""
+
+ status: Nullable[WorkflowExecutionStatus]
+ r"""The status of the workflow execution"""
+
+ start_time: datetime
+ r"""The start time of the workflow execution"""
+
+ end_time: Nullable[datetime]
+ r"""The end time of the workflow execution, if available"""
+
+ result: Nullable[Any]
+ r"""The result of the workflow execution, if available"""
+
+ parent_execution_id: OptionalNullable[str] = UNSET
+ r"""The parent execution ID of the workflow execution"""
+
+ total_duration_ms: OptionalNullable[int] = UNSET
+ r"""The total duration of the trace in milliseconds"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["parent_execution_id", "total_duration_ms"])
+ nullable_fields = set(
+ ["parent_execution_id", "status", "end_time", "total_duration_ms", "result"]
+ )
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/workflowexecutionstartedattributesrequest.py b/src/mistralai/client/models/workflowexecutionstartedattributesrequest.py
new file mode 100644
index 00000000..ff74345f
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutionstartedattributesrequest.py
@@ -0,0 +1,37 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: ee480cd77d79
+
+from __future__ import annotations
+from .jsonpayloadrequest import JSONPayloadRequest, JSONPayloadRequestTypedDict
+from mistralai.client.types import BaseModel
+from typing_extensions import TypedDict
+
+
+class WorkflowExecutionStartedAttributesRequestTypedDict(TypedDict):
+ r"""Attributes for workflow execution started events."""
+
+ task_id: str
+ r"""Unique identifier for the task within the workflow execution."""
+ workflow_name: str
+ r"""The registered name of the workflow being executed."""
+ input: JSONPayloadRequestTypedDict
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
+
+
+class WorkflowExecutionStartedAttributesRequest(BaseModel):
+ r"""Attributes for workflow execution started events."""
+
+ task_id: str
+ r"""Unique identifier for the task within the workflow execution."""
+
+ workflow_name: str
+ r"""The registered name of the workflow being executed."""
+
+ input: JSONPayloadRequest
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
diff --git a/src/mistralai/client/models/workflowexecutionstartedattributesresponse.py b/src/mistralai/client/models/workflowexecutionstartedattributesresponse.py
new file mode 100644
index 00000000..74e55c29
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutionstartedattributesresponse.py
@@ -0,0 +1,37 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: d2276919a895
+
+from __future__ import annotations
+from .jsonpayloadresponse import JSONPayloadResponse, JSONPayloadResponseTypedDict
+from mistralai.client.types import BaseModel
+from typing_extensions import TypedDict
+
+
+class WorkflowExecutionStartedAttributesResponseTypedDict(TypedDict):
+ r"""Attributes for workflow execution started events."""
+
+ task_id: str
+ r"""Unique identifier for the task within the workflow execution."""
+ workflow_name: str
+ r"""The registered name of the workflow being executed."""
+ input: JSONPayloadResponseTypedDict
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
+
+
+class WorkflowExecutionStartedAttributesResponse(BaseModel):
+ r"""Attributes for workflow execution started events."""
+
+ task_id: str
+ r"""Unique identifier for the task within the workflow execution."""
+
+ workflow_name: str
+ r"""The registered name of the workflow being executed."""
+
+ input: JSONPayloadResponse
+ r"""A payload containing arbitrary JSON data.
+
+ Used for complete state snapshots or final results.
+ """
diff --git a/src/mistralai/client/models/workflowexecutionstartedrequest.py b/src/mistralai/client/models/workflowexecutionstartedrequest.py
new file mode 100644
index 00000000..0ce8bbce
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutionstartedrequest.py
@@ -0,0 +1,120 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 15e73dddf8c4
+
+from __future__ import annotations
+from .workflowexecutionstartedattributesrequest import (
+ WorkflowExecutionStartedAttributesRequest,
+ WorkflowExecutionStartedAttributesRequestTypedDict,
+)
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class WorkflowExecutionStartedRequestTypedDict(TypedDict):
+ r"""Emitted when a workflow execution begins.
+
+ This is the first event in any workflow execution lifecycle.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: WorkflowExecutionStartedAttributesRequestTypedDict
+ r"""Attributes for workflow execution started events."""
+ event_timestamp: NotRequired[int]
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ parent_workflow_exec_id: NotRequired[Nullable[str]]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ event_type: Literal["WORKFLOW_EXECUTION_STARTED"]
+ r"""Event type discriminator."""
+
+
+class WorkflowExecutionStartedRequest(BaseModel):
+ r"""Emitted when a workflow execution begins.
+
+ This is the first event in any workflow execution lifecycle.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: WorkflowExecutionStartedAttributesRequest
+ r"""Attributes for workflow execution started events."""
+
+ event_timestamp: Optional[int] = None
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ parent_workflow_exec_id: OptionalNullable[str] = UNSET
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["WORKFLOW_EXECUTION_STARTED"]],
+ AfterValidator(validate_const("WORKFLOW_EXECUTION_STARTED")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "WORKFLOW_EXECUTION_STARTED"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ ["event_timestamp", "parent_workflow_exec_id", "event_type"]
+ )
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ WorkflowExecutionStartedRequest.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/workflowexecutionstartedresponse.py b/src/mistralai/client/models/workflowexecutionstartedresponse.py
new file mode 100644
index 00000000..fa1e46a0
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutionstartedresponse.py
@@ -0,0 +1,112 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: e1a597c911ea
+
+from __future__ import annotations
+from .workflowexecutionstartedattributesresponse import (
+ WorkflowExecutionStartedAttributesResponse,
+ WorkflowExecutionStartedAttributesResponseTypedDict,
+)
+from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, TypedDict
+
+
+class WorkflowExecutionStartedResponseTypedDict(TypedDict):
+ r"""Emitted when a workflow execution begins.
+
+ This is the first event in any workflow execution lifecycle.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: WorkflowExecutionStartedAttributesResponseTypedDict
+ r"""Attributes for workflow execution started events."""
+ event_type: Literal["WORKFLOW_EXECUTION_STARTED"]
+ r"""Event type discriminator."""
+
+
+class WorkflowExecutionStartedResponse(BaseModel):
+ r"""Emitted when a workflow execution begins.
+
+ This is the first event in any workflow execution lifecycle.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: WorkflowExecutionStartedAttributesResponse
+ r"""Attributes for workflow execution started events."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["WORKFLOW_EXECUTION_STARTED"]],
+ AfterValidator(validate_const("WORKFLOW_EXECUTION_STARTED")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "WORKFLOW_EXECUTION_STARTED"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["event_type"])
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ WorkflowExecutionStartedResponse.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/workflowexecutionstatus.py b/src/mistralai/client/models/workflowexecutionstatus.py
new file mode 100644
index 00000000..611f3aa1
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutionstatus.py
@@ -0,0 +1,21 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 56a16810d5de
+
+from __future__ import annotations
+from mistralai.client.types import UnrecognizedStr
+from typing import Literal, Union
+
+
+WorkflowExecutionStatus = Union[
+ Literal[
+ "RUNNING",
+ "COMPLETED",
+ "FAILED",
+ "CANCELED",
+ "TERMINATED",
+ "CONTINUED_AS_NEW",
+ "TIMED_OUT",
+ "RETRYING_AFTER_ERROR",
+ ],
+ UnrecognizedStr,
+]
diff --git a/src/mistralai/client/models/workflowexecutionsyncresponse.py b/src/mistralai/client/models/workflowexecutionsyncresponse.py
new file mode 100644
index 00000000..6ed0f038
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutionsyncresponse.py
@@ -0,0 +1,31 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: ef77c54a11b5
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+from typing import Any
+from typing_extensions import TypedDict
+
+
+class WorkflowExecutionSyncResponseTypedDict(TypedDict):
+ r"""Response model for synchronous workflow execution"""
+
+ workflow_name: str
+ r"""Name of the workflow that was executed"""
+ execution_id: str
+ r"""ID of the workflow execution"""
+ result: Any
+ r"""The result of the workflow execution"""
+
+
+class WorkflowExecutionSyncResponse(BaseModel):
+ r"""Response model for synchronous workflow execution"""
+
+ workflow_name: str
+ r"""Name of the workflow that was executed"""
+
+ execution_id: str
+ r"""ID of the workflow execution"""
+
+ result: Any
+ r"""The result of the workflow execution"""
diff --git a/src/mistralai/client/models/workflowexecutiontraceevent.py b/src/mistralai/client/models/workflowexecutiontraceevent.py
new file mode 100644
index 00000000..f74cd5ab
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutiontraceevent.py
@@ -0,0 +1,64 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 584e0a98082a
+
+from __future__ import annotations
+from .eventtype import EventType
+from .workflowexecutiontracesummaryattributesvalues import (
+ WorkflowExecutionTraceSummaryAttributesValues,
+ WorkflowExecutionTraceSummaryAttributesValuesTypedDict,
+)
+from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL
+from pydantic import model_serializer
+from typing import Dict, Optional
+from typing_extensions import NotRequired, TypedDict
+
+
+class WorkflowExecutionTraceEventTypedDict(TypedDict):
+ name: str
+ r"""Name of the event"""
+ id: str
+ r"""The ID of the event"""
+ timestamp_unix_nano: int
+ r"""The timestamp of the event in nanoseconds since the Unix epoch"""
+ attributes: Dict[
+ str, Nullable[WorkflowExecutionTraceSummaryAttributesValuesTypedDict]
+ ]
+ r"""The attributes of the event"""
+ type: NotRequired[EventType]
+ internal: NotRequired[bool]
+ r"""Whether the event is internal"""
+
+
+class WorkflowExecutionTraceEvent(BaseModel):
+ name: str
+ r"""Name of the event"""
+
+ id: str
+ r"""The ID of the event"""
+
+ timestamp_unix_nano: int
+ r"""The timestamp of the event in nanoseconds since the Unix epoch"""
+
+ attributes: Dict[str, Nullable[WorkflowExecutionTraceSummaryAttributesValues]]
+ r"""The attributes of the event"""
+
+ type: Optional[EventType] = None
+
+ internal: Optional[bool] = False
+ r"""Whether the event is internal"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["type", "internal"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+
+ if val != UNSET_SENTINEL:
+ if val is not None or k not in optional_fields:
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/workflowexecutiontraceeventsresponse.py b/src/mistralai/client/models/workflowexecutiontraceeventsresponse.py
new file mode 100644
index 00000000..0c24a1ea
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutiontraceeventsresponse.py
@@ -0,0 +1,121 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 94d92762ccb7
+
+from __future__ import annotations
+from .workflowexecutionprogresstraceevent import (
+ WorkflowExecutionProgressTraceEvent,
+ WorkflowExecutionProgressTraceEventTypedDict,
+)
+from .workflowexecutionstatus import WorkflowExecutionStatus
+from .workflowexecutiontraceevent import (
+ WorkflowExecutionTraceEvent,
+ WorkflowExecutionTraceEventTypedDict,
+)
+from datetime import datetime
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing import Any, List, Optional, Union
+from typing_extensions import NotRequired, TypeAliasType, TypedDict
+
+
+WorkflowExecutionTraceEventsResponseEventTypedDict = TypeAliasType(
+ "WorkflowExecutionTraceEventsResponseEventTypedDict",
+ Union[
+ WorkflowExecutionTraceEventTypedDict,
+ WorkflowExecutionProgressTraceEventTypedDict,
+ ],
+)
+
+
+WorkflowExecutionTraceEventsResponseEvent = TypeAliasType(
+ "WorkflowExecutionTraceEventsResponseEvent",
+ Union[WorkflowExecutionTraceEvent, WorkflowExecutionProgressTraceEvent],
+)
+
+
+class WorkflowExecutionTraceEventsResponseTypedDict(TypedDict):
+ workflow_name: str
+ r"""The name of the workflow"""
+ execution_id: str
+ r"""The ID of the workflow execution"""
+ root_execution_id: str
+ r"""The root execution ID of the workflow execution"""
+ status: Nullable[WorkflowExecutionStatus]
+ r"""The status of the workflow execution"""
+ start_time: datetime
+ r"""The start time of the workflow execution"""
+ end_time: Nullable[datetime]
+ r"""The end time of the workflow execution, if available"""
+ result: Nullable[Any]
+ r"""The result of the workflow execution, if available"""
+ parent_execution_id: NotRequired[Nullable[str]]
+ r"""The parent execution ID of the workflow execution"""
+ total_duration_ms: NotRequired[Nullable[int]]
+ r"""The total duration of the trace in milliseconds"""
+ events: NotRequired[List[WorkflowExecutionTraceEventsResponseEventTypedDict]]
+ r"""The events of the workflow execution"""
+
+
+class WorkflowExecutionTraceEventsResponse(BaseModel):
+ workflow_name: str
+ r"""The name of the workflow"""
+
+ execution_id: str
+ r"""The ID of the workflow execution"""
+
+ root_execution_id: str
+ r"""The root execution ID of the workflow execution"""
+
+ status: Nullable[WorkflowExecutionStatus]
+ r"""The status of the workflow execution"""
+
+ start_time: datetime
+ r"""The start time of the workflow execution"""
+
+ end_time: Nullable[datetime]
+ r"""The end time of the workflow execution, if available"""
+
+ result: Nullable[Any]
+ r"""The result of the workflow execution, if available"""
+
+ parent_execution_id: OptionalNullable[str] = UNSET
+ r"""The parent execution ID of the workflow execution"""
+
+ total_duration_ms: OptionalNullable[int] = UNSET
+ r"""The total duration of the trace in milliseconds"""
+
+ events: Optional[List[WorkflowExecutionTraceEventsResponseEvent]] = None
+ r"""The events of the workflow execution"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["parent_execution_id", "total_duration_ms", "events"])
+ nullable_fields = set(
+ ["parent_execution_id", "status", "end_time", "total_duration_ms", "result"]
+ )
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/workflowexecutiontraceotelresponse.py b/src/mistralai/client/models/workflowexecutiontraceotelresponse.py
new file mode 100644
index 00000000..b4320b83
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutiontraceotelresponse.py
@@ -0,0 +1,124 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 7531bf461dc2
+
+from __future__ import annotations
+from .tempogettraceresponse import TempoGetTraceResponse, TempoGetTraceResponseTypedDict
+from .workflowexecutionstatus import WorkflowExecutionStatus
+from datetime import datetime
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing import Any
+from typing_extensions import NotRequired, TypedDict
+
+
+class WorkflowExecutionTraceOTelResponseTypedDict(TypedDict):
+ workflow_name: str
+ r"""The name of the workflow"""
+ execution_id: str
+ r"""The ID of the workflow execution"""
+ root_execution_id: str
+ r"""The root execution ID of the workflow execution"""
+ status: Nullable[WorkflowExecutionStatus]
+ r"""The status of the workflow execution"""
+ start_time: datetime
+ r"""The start time of the workflow execution"""
+ end_time: Nullable[datetime]
+ r"""The end time of the workflow execution, if available"""
+ result: Nullable[Any]
+ r"""The result of the workflow execution, if available"""
+ data_source: str
+ r"""The data source of the trace"""
+ parent_execution_id: NotRequired[Nullable[str]]
+ r"""The parent execution ID of the workflow execution"""
+ total_duration_ms: NotRequired[Nullable[int]]
+ r"""The total duration of the trace in milliseconds"""
+ otel_trace_id: NotRequired[Nullable[str]]
+ r"""The ID of the trace"""
+ otel_trace_data: NotRequired[Nullable[TempoGetTraceResponseTypedDict]]
+ r"""The raw OpenTelemetry trace data"""
+
+
+class WorkflowExecutionTraceOTelResponse(BaseModel):
+ workflow_name: str
+ r"""The name of the workflow"""
+
+ execution_id: str
+ r"""The ID of the workflow execution"""
+
+ root_execution_id: str
+ r"""The root execution ID of the workflow execution"""
+
+ status: Nullable[WorkflowExecutionStatus]
+ r"""The status of the workflow execution"""
+
+ start_time: datetime
+ r"""The start time of the workflow execution"""
+
+ end_time: Nullable[datetime]
+ r"""The end time of the workflow execution, if available"""
+
+ result: Nullable[Any]
+ r"""The result of the workflow execution, if available"""
+
+ data_source: str
+ r"""The data source of the trace"""
+
+ parent_execution_id: OptionalNullable[str] = UNSET
+ r"""The parent execution ID of the workflow execution"""
+
+ total_duration_ms: OptionalNullable[int] = UNSET
+ r"""The total duration of the trace in milliseconds"""
+
+ otel_trace_id: OptionalNullable[str] = UNSET
+ r"""The ID of the trace"""
+
+ otel_trace_data: OptionalNullable[TempoGetTraceResponse] = UNSET
+ r"""The raw OpenTelemetry trace data"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ [
+ "parent_execution_id",
+ "total_duration_ms",
+ "otel_trace_id",
+ "otel_trace_data",
+ ]
+ )
+ nullable_fields = set(
+ [
+ "parent_execution_id",
+ "status",
+ "end_time",
+ "total_duration_ms",
+ "result",
+ "otel_trace_id",
+ "otel_trace_data",
+ ]
+ )
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/workflowexecutiontracesummaryattributesvalues.py b/src/mistralai/client/models/workflowexecutiontracesummaryattributesvalues.py
new file mode 100644
index 00000000..e288146d
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutiontracesummaryattributesvalues.py
@@ -0,0 +1,17 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 5e3448a39a40
+
+from __future__ import annotations
+from typing import Union
+from typing_extensions import TypeAliasType
+
+
+WorkflowExecutionTraceSummaryAttributesValuesTypedDict = TypeAliasType(
+ "WorkflowExecutionTraceSummaryAttributesValuesTypedDict",
+ Union[str, int, float, bool],
+)
+
+
+WorkflowExecutionTraceSummaryAttributesValues = TypeAliasType(
+ "WorkflowExecutionTraceSummaryAttributesValues", Union[str, int, float, bool]
+)
diff --git a/src/mistralai/client/models/workflowexecutiontracesummaryresponse.py b/src/mistralai/client/models/workflowexecutiontracesummaryresponse.py
new file mode 100644
index 00000000..d2cc7c6b
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutiontracesummaryresponse.py
@@ -0,0 +1,109 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 2ea199810f5f
+
+from __future__ import annotations
+from .workflowexecutionstatus import WorkflowExecutionStatus
+from .workflowexecutiontracesummaryspan import (
+ WorkflowExecutionTraceSummarySpan,
+ WorkflowExecutionTraceSummarySpanTypedDict,
+)
+from datetime import datetime
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing import Any
+from typing_extensions import NotRequired, TypedDict
+
+
+class WorkflowExecutionTraceSummaryResponseTypedDict(TypedDict):
+ workflow_name: str
+ r"""The name of the workflow"""
+ execution_id: str
+ r"""The ID of the workflow execution"""
+ root_execution_id: str
+ r"""The root execution ID of the workflow execution"""
+ status: Nullable[WorkflowExecutionStatus]
+ r"""The status of the workflow execution"""
+ start_time: datetime
+ r"""The start time of the workflow execution"""
+ end_time: Nullable[datetime]
+ r"""The end time of the workflow execution, if available"""
+ result: Nullable[Any]
+ r"""The result of the workflow execution, if available"""
+ parent_execution_id: NotRequired[Nullable[str]]
+ r"""The parent execution ID of the workflow execution"""
+ total_duration_ms: NotRequired[Nullable[int]]
+ r"""The total duration of the trace in milliseconds"""
+ span_tree: NotRequired[Nullable[WorkflowExecutionTraceSummarySpanTypedDict]]
+ r"""The root span of the trace"""
+
+
+class WorkflowExecutionTraceSummaryResponse(BaseModel):
+ workflow_name: str
+ r"""The name of the workflow"""
+
+ execution_id: str
+ r"""The ID of the workflow execution"""
+
+ root_execution_id: str
+ r"""The root execution ID of the workflow execution"""
+
+ status: Nullable[WorkflowExecutionStatus]
+ r"""The status of the workflow execution"""
+
+ start_time: datetime
+ r"""The start time of the workflow execution"""
+
+ end_time: Nullable[datetime]
+ r"""The end time of the workflow execution, if available"""
+
+ result: Nullable[Any]
+ r"""The result of the workflow execution, if available"""
+
+ parent_execution_id: OptionalNullable[str] = UNSET
+ r"""The parent execution ID of the workflow execution"""
+
+ total_duration_ms: OptionalNullable[int] = UNSET
+ r"""The total duration of the trace in milliseconds"""
+
+ span_tree: OptionalNullable[WorkflowExecutionTraceSummarySpan] = UNSET
+ r"""The root span of the trace"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["parent_execution_id", "total_duration_ms", "span_tree"])
+ nullable_fields = set(
+ [
+ "parent_execution_id",
+ "status",
+ "end_time",
+ "total_duration_ms",
+ "result",
+ "span_tree",
+ ]
+ )
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/workflowexecutiontracesummaryspan.py b/src/mistralai/client/models/workflowexecutiontracesummaryspan.py
new file mode 100644
index 00000000..e80db90f
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutiontracesummaryspan.py
@@ -0,0 +1,83 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 750353cbd052
+
+from __future__ import annotations
+from .workflowexecutiontraceevent import (
+ WorkflowExecutionTraceEvent,
+ WorkflowExecutionTraceEventTypedDict,
+)
+from .workflowexecutiontracesummaryattributesvalues import (
+ WorkflowExecutionTraceSummaryAttributesValues,
+ WorkflowExecutionTraceSummaryAttributesValuesTypedDict,
+)
+from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL
+from pydantic import model_serializer
+from typing import Dict, List, Optional
+from typing_extensions import NotRequired, TypedDict
+
+
+class WorkflowExecutionTraceSummarySpanTypedDict(TypedDict):
+ span_id: str
+ r"""The ID of the span"""
+ name: str
+ r"""The name of the span"""
+ start_time_unix_nano: int
+ r"""The start time of the span in nanoseconds since the Unix epoch"""
+ end_time_unix_nano: Nullable[int]
+ r"""The end time of the span in nanoseconds since the Unix epoch"""
+ attributes: Dict[
+ str, Nullable[WorkflowExecutionTraceSummaryAttributesValuesTypedDict]
+ ]
+ r"""The attributes of the span"""
+ events: List[WorkflowExecutionTraceEventTypedDict]
+ r"""The events of the span"""
+ children: NotRequired[List[WorkflowExecutionTraceSummarySpanTypedDict]]
+ r"""The child spans of the span"""
+
+
+class WorkflowExecutionTraceSummarySpan(BaseModel):
+ span_id: str
+ r"""The ID of the span"""
+
+ name: str
+ r"""The name of the span"""
+
+ start_time_unix_nano: int
+ r"""The start time of the span in nanoseconds since the Unix epoch"""
+
+ end_time_unix_nano: Nullable[int]
+ r"""The end time of the span in nanoseconds since the Unix epoch"""
+
+ attributes: Dict[str, Nullable[WorkflowExecutionTraceSummaryAttributesValues]]
+ r"""The attributes of the span"""
+
+ events: List[WorkflowExecutionTraceEvent]
+ r"""The events of the span"""
+
+ children: Optional[List[WorkflowExecutionTraceSummarySpan]] = None
+ r"""The child spans of the span"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["children"])
+ nullable_fields = set(["end_time_unix_nano"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/workflowexecutionwithoutresultresponse.py b/src/mistralai/client/models/workflowexecutionwithoutresultresponse.py
new file mode 100644
index 00000000..082653c8
--- /dev/null
+++ b/src/mistralai/client/models/workflowexecutionwithoutresultresponse.py
@@ -0,0 +1,87 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: dd70ba8def79
+
+from __future__ import annotations
+from .workflowexecutionstatus import WorkflowExecutionStatus
+from datetime import datetime
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing_extensions import NotRequired, TypedDict
+
+
+class WorkflowExecutionWithoutResultResponseTypedDict(TypedDict):
+ workflow_name: str
+ r"""The name of the workflow"""
+ execution_id: str
+ r"""The ID of the workflow execution"""
+ root_execution_id: str
+ r"""The root execution ID of the workflow execution"""
+ status: Nullable[WorkflowExecutionStatus]
+ r"""The status of the workflow execution"""
+ start_time: datetime
+ r"""The start time of the workflow execution"""
+ end_time: Nullable[datetime]
+ r"""The end time of the workflow execution, if available"""
+ parent_execution_id: NotRequired[Nullable[str]]
+ r"""The parent execution ID of the workflow execution"""
+ total_duration_ms: NotRequired[Nullable[int]]
+ r"""The total duration of the trace in milliseconds"""
+
+
+class WorkflowExecutionWithoutResultResponse(BaseModel):
+ workflow_name: str
+ r"""The name of the workflow"""
+
+ execution_id: str
+ r"""The ID of the workflow execution"""
+
+ root_execution_id: str
+ r"""The root execution ID of the workflow execution"""
+
+ status: Nullable[WorkflowExecutionStatus]
+ r"""The status of the workflow execution"""
+
+ start_time: datetime
+ r"""The start time of the workflow execution"""
+
+ end_time: Nullable[datetime]
+ r"""The end time of the workflow execution, if available"""
+
+ parent_execution_id: OptionalNullable[str] = UNSET
+ r"""The parent execution ID of the workflow execution"""
+
+ total_duration_ms: OptionalNullable[int] = UNSET
+ r"""The total duration of the trace in milliseconds"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["parent_execution_id", "total_duration_ms"])
+ nullable_fields = set(
+ ["parent_execution_id", "status", "end_time", "total_duration_ms"]
+ )
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/workflowgetresponse.py b/src/mistralai/client/models/workflowgetresponse.py
new file mode 100644
index 00000000..ef1391c9
--- /dev/null
+++ b/src/mistralai/client/models/workflowgetresponse.py
@@ -0,0 +1,18 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 230f55a36ebf
+
+from __future__ import annotations
+from .workflowwithworkerstatus import (
+ WorkflowWithWorkerStatus,
+ WorkflowWithWorkerStatusTypedDict,
+)
+from mistralai.client.types import BaseModel
+from typing_extensions import TypedDict
+
+
+class WorkflowGetResponseTypedDict(TypedDict):
+ workflow: WorkflowWithWorkerStatusTypedDict
+
+
+class WorkflowGetResponse(BaseModel):
+ workflow: WorkflowWithWorkerStatus
diff --git a/src/mistralai/client/models/workflowlistresponse.py b/src/mistralai/client/models/workflowlistresponse.py
new file mode 100644
index 00000000..a6497e82
--- /dev/null
+++ b/src/mistralai/client/models/workflowlistresponse.py
@@ -0,0 +1,39 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: b255b05e7395
+
+from __future__ import annotations
+from .workflowbasicdefinition import (
+ WorkflowBasicDefinition,
+ WorkflowBasicDefinitionTypedDict,
+)
+from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL
+from pydantic import model_serializer
+from typing import List
+from typing_extensions import TypedDict
+
+
+class WorkflowListResponseTypedDict(TypedDict):
+ workflows: List[WorkflowBasicDefinitionTypedDict]
+ r"""A list of workflows"""
+ next_cursor: Nullable[str]
+
+
+class WorkflowListResponse(BaseModel):
+ workflows: List[WorkflowBasicDefinition]
+ r"""A list of workflows"""
+
+ next_cursor: Nullable[str]
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+
+ if val != UNSET_SENTINEL:
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/workflowmetadata.py b/src/mistralai/client/models/workflowmetadata.py
new file mode 100644
index 00000000..59752539
--- /dev/null
+++ b/src/mistralai/client/models/workflowmetadata.py
@@ -0,0 +1,48 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 0435707d6944
+
+from __future__ import annotations
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing_extensions import NotRequired, TypedDict
+
+
+class WorkflowMetadataTypedDict(TypedDict):
+ shared_namespace: NotRequired[Nullable[str]]
+ r"""Namespace for shared workflows, None if user-owned"""
+
+
+class WorkflowMetadata(BaseModel):
+ shared_namespace: OptionalNullable[str] = UNSET
+ r"""Namespace for shared workflows, None if user-owned"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["shared_namespace"])
+ nullable_fields = set(["shared_namespace"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/workflowmetrics.py b/src/mistralai/client/models/workflowmetrics.py
new file mode 100644
index 00000000..d80bb3db
--- /dev/null
+++ b/src/mistralai/client/models/workflowmetrics.py
@@ -0,0 +1,53 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 471fb1e10716
+
+from __future__ import annotations
+from .scalarmetric import ScalarMetric, ScalarMetricTypedDict
+from .timeseriesmetric import TimeSeriesMetric, TimeSeriesMetricTypedDict
+from mistralai.client.types import BaseModel
+from typing_extensions import TypedDict
+
+
+class WorkflowMetricsTypedDict(TypedDict):
+ r"""Complete metrics for a specific workflow.
+
+ This type combines all metric categories.
+ """
+
+ execution_count: ScalarMetricTypedDict
+ r"""Scalar metric with a single value."""
+ success_count: ScalarMetricTypedDict
+ r"""Scalar metric with a single value."""
+ error_count: ScalarMetricTypedDict
+ r"""Scalar metric with a single value."""
+ average_latency_ms: ScalarMetricTypedDict
+ r"""Scalar metric with a single value."""
+ latency_over_time: TimeSeriesMetricTypedDict
+ r"""Time-series metric with timestamp-value pairs."""
+ retry_rate: ScalarMetricTypedDict
+ r"""Scalar metric with a single value."""
+
+
+class WorkflowMetrics(BaseModel):
+ r"""Complete metrics for a specific workflow.
+
+ This type combines all metric categories.
+ """
+
+ execution_count: ScalarMetric
+ r"""Scalar metric with a single value."""
+
+ success_count: ScalarMetric
+ r"""Scalar metric with a single value."""
+
+ error_count: ScalarMetric
+ r"""Scalar metric with a single value."""
+
+ average_latency_ms: ScalarMetric
+ r"""Scalar metric with a single value."""
+
+ latency_over_time: TimeSeriesMetric
+ r"""Time-series metric with timestamp-value pairs."""
+
+ retry_rate: ScalarMetric
+ r"""Scalar metric with a single value."""
diff --git a/src/mistralai/client/models/workflowregistration.py b/src/mistralai/client/models/workflowregistration.py
new file mode 100644
index 00000000..e23f9fb2
--- /dev/null
+++ b/src/mistralai/client/models/workflowregistration.py
@@ -0,0 +1,77 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 2b937728c88b
+
+from __future__ import annotations
+from .workflow import Workflow, WorkflowTypedDict
+from .workflowcodedefinition import (
+ WorkflowCodeDefinition,
+ WorkflowCodeDefinitionTypedDict,
+)
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing import Optional
+from typing_extensions import NotRequired, TypedDict
+
+
+class WorkflowRegistrationTypedDict(TypedDict):
+ id: str
+ r"""Unique identifier of the workflow registration"""
+ task_queue: str
+ r"""Project name of the workflow"""
+ definition: WorkflowCodeDefinitionTypedDict
+ workflow_id: str
+ r"""Workflow ID of the workflow"""
+ workflow: NotRequired[Nullable[WorkflowTypedDict]]
+ r"""Workflow of the workflow registration"""
+ compatible_with_chat_assistant: NotRequired[bool]
+ r"""Whether the workflow is compatible with chat assistant"""
+
+
+class WorkflowRegistration(BaseModel):
+ id: str
+ r"""Unique identifier of the workflow registration"""
+
+ task_queue: str
+ r"""Project name of the workflow"""
+
+ definition: WorkflowCodeDefinition
+
+ workflow_id: str
+ r"""Workflow ID of the workflow"""
+
+ workflow: OptionalNullable[Workflow] = UNSET
+ r"""Workflow of the workflow registration"""
+
+ compatible_with_chat_assistant: Optional[bool] = False
+ r"""Whether the workflow is compatible with chat assistant"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["workflow", "compatible_with_chat_assistant"])
+ nullable_fields = set(["workflow"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/workflowregistrationgetresponse.py b/src/mistralai/client/models/workflowregistrationgetresponse.py
new file mode 100644
index 00000000..3105856d
--- /dev/null
+++ b/src/mistralai/client/models/workflowregistrationgetresponse.py
@@ -0,0 +1,21 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: f063656f22ae
+
+from __future__ import annotations
+from .workflowregistrationwithworkerstatus import (
+ WorkflowRegistrationWithWorkerStatus,
+ WorkflowRegistrationWithWorkerStatusTypedDict,
+)
+from mistralai.client.types import BaseModel
+from typing_extensions import TypedDict
+
+
+class WorkflowRegistrationGetResponseTypedDict(TypedDict):
+ workflow_registration: WorkflowRegistrationWithWorkerStatusTypedDict
+ workflow_version: WorkflowRegistrationWithWorkerStatusTypedDict
+
+
+class WorkflowRegistrationGetResponse(BaseModel):
+ workflow_registration: WorkflowRegistrationWithWorkerStatus
+
+ workflow_version: WorkflowRegistrationWithWorkerStatus
diff --git a/src/mistralai/client/models/workflowregistrationlistresponse.py b/src/mistralai/client/models/workflowregistrationlistresponse.py
new file mode 100644
index 00000000..e66f219b
--- /dev/null
+++ b/src/mistralai/client/models/workflowregistrationlistresponse.py
@@ -0,0 +1,41 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 3752e5b776db
+
+from __future__ import annotations
+from .workflowregistration import WorkflowRegistration, WorkflowRegistrationTypedDict
+from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL
+from pydantic import model_serializer
+from typing import List
+from typing_extensions import TypedDict
+
+
+class WorkflowRegistrationListResponseTypedDict(TypedDict):
+ workflow_registrations: List[WorkflowRegistrationTypedDict]
+ r"""A list of workflow registrations"""
+ next_cursor: Nullable[str]
+ workflow_versions: List[WorkflowRegistrationTypedDict]
+ r"""Deprecated: use workflow_registrations"""
+
+
+class WorkflowRegistrationListResponse(BaseModel):
+ workflow_registrations: List[WorkflowRegistration]
+ r"""A list of workflow registrations"""
+
+ next_cursor: Nullable[str]
+
+ workflow_versions: List[WorkflowRegistration]
+ r"""Deprecated: use workflow_registrations"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+
+ if val != UNSET_SENTINEL:
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/workflowregistrationwithworkerstatus.py b/src/mistralai/client/models/workflowregistrationwithworkerstatus.py
new file mode 100644
index 00000000..c0d9a69e
--- /dev/null
+++ b/src/mistralai/client/models/workflowregistrationwithworkerstatus.py
@@ -0,0 +1,82 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 23b661b9496f
+
+from __future__ import annotations
+from .workflow import Workflow, WorkflowTypedDict
+from .workflowcodedefinition import (
+ WorkflowCodeDefinition,
+ WorkflowCodeDefinitionTypedDict,
+)
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing import Optional
+from typing_extensions import NotRequired, TypedDict
+
+
+class WorkflowRegistrationWithWorkerStatusTypedDict(TypedDict):
+ id: str
+ r"""Unique identifier of the workflow registration"""
+ task_queue: str
+ r"""Project name of the workflow"""
+ definition: WorkflowCodeDefinitionTypedDict
+ workflow_id: str
+ r"""Workflow ID of the workflow"""
+ active: bool
+ r"""Whether the workflow registration is active"""
+ workflow: NotRequired[Nullable[WorkflowTypedDict]]
+ r"""Workflow of the workflow registration"""
+ compatible_with_chat_assistant: NotRequired[bool]
+ r"""Whether the workflow is compatible with chat assistant"""
+
+
+class WorkflowRegistrationWithWorkerStatus(BaseModel):
+ id: str
+ r"""Unique identifier of the workflow registration"""
+
+ task_queue: str
+ r"""Project name of the workflow"""
+
+ definition: WorkflowCodeDefinition
+
+ workflow_id: str
+ r"""Workflow ID of the workflow"""
+
+ active: bool
+ r"""Whether the workflow registration is active"""
+
+ workflow: OptionalNullable[Workflow] = UNSET
+ r"""Workflow of the workflow registration"""
+
+ compatible_with_chat_assistant: Optional[bool] = False
+ r"""Whether the workflow is compatible with chat assistant"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["workflow", "compatible_with_chat_assistant"])
+ nullable_fields = set(["workflow"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/workflowschedulelistresponse.py b/src/mistralai/client/models/workflowschedulelistresponse.py
new file mode 100644
index 00000000..3c9eb3d7
--- /dev/null
+++ b/src/mistralai/client/models/workflowschedulelistresponse.py
@@ -0,0 +1,21 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: e5247c5183bb
+
+from __future__ import annotations
+from .scheduledefinitionoutput import (
+ ScheduleDefinitionOutput,
+ ScheduleDefinitionOutputTypedDict,
+)
+from mistralai.client.types import BaseModel
+from typing import List
+from typing_extensions import TypedDict
+
+
+class WorkflowScheduleListResponseTypedDict(TypedDict):
+ schedules: List[ScheduleDefinitionOutputTypedDict]
+ r"""A list of workflow schedules"""
+
+
+class WorkflowScheduleListResponse(BaseModel):
+ schedules: List[ScheduleDefinitionOutput]
+ r"""A list of workflow schedules"""
diff --git a/src/mistralai/client/models/workflowschedulerequest.py b/src/mistralai/client/models/workflowschedulerequest.py
new file mode 100644
index 00000000..2ded66b9
--- /dev/null
+++ b/src/mistralai/client/models/workflowschedulerequest.py
@@ -0,0 +1,115 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 2d8256704c6b
+
+from __future__ import annotations
+from .scheduledefinition import ScheduleDefinition, ScheduleDefinitionTypedDict
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+import pydantic
+from pydantic import model_serializer
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class WorkflowScheduleRequestTypedDict(TypedDict):
+ schedule: ScheduleDefinitionTypedDict
+ r"""Specification of the times scheduled actions may occur.
+
+ The times are the union of :py:attr:`calendars`, :py:attr:`intervals`, and
+ :py:attr:`cron_expressions` excluding anything in :py:attr:`skip`.
+
+ Used for input where schedule_id is optional (can be provided or auto-generated).
+ """
+ workflow_registration_id: NotRequired[Nullable[str]]
+ r"""The ID of the workflow registration to schedule"""
+ workflow_version_id: NotRequired[Nullable[str]]
+ r"""Deprecated: use workflow_registration_id"""
+ workflow_identifier: NotRequired[Nullable[str]]
+ r"""The name or ID of the workflow to schedule"""
+ workflow_task_queue: NotRequired[Nullable[str]]
+ r"""Deprecated. Use deployment_name instead."""
+ schedule_id: NotRequired[Nullable[str]]
+ r"""Allows you to specify a custom schedule ID. If not provided, a random ID will be generated."""
+ deployment_name: NotRequired[Nullable[str]]
+ r"""Name of the deployment to route this schedule to"""
+
+
+class WorkflowScheduleRequest(BaseModel):
+ schedule: ScheduleDefinition
+ r"""Specification of the times scheduled actions may occur.
+
+ The times are the union of :py:attr:`calendars`, :py:attr:`intervals`, and
+ :py:attr:`cron_expressions` excluding anything in :py:attr:`skip`.
+
+ Used for input where schedule_id is optional (can be provided or auto-generated).
+ """
+
+ workflow_registration_id: OptionalNullable[str] = UNSET
+ r"""The ID of the workflow registration to schedule"""
+
+ workflow_version_id: OptionalNullable[str] = UNSET
+ r"""Deprecated: use workflow_registration_id"""
+
+ workflow_identifier: OptionalNullable[str] = UNSET
+ r"""The name or ID of the workflow to schedule"""
+
+ workflow_task_queue: Annotated[
+ OptionalNullable[str],
+ pydantic.Field(
+ deprecated="warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
+ ),
+ ] = UNSET
+ r"""Deprecated. Use deployment_name instead."""
+
+ schedule_id: OptionalNullable[str] = UNSET
+ r"""Allows you to specify a custom schedule ID. If not provided, a random ID will be generated."""
+
+ deployment_name: OptionalNullable[str] = UNSET
+ r"""Name of the deployment to route this schedule to"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ [
+ "workflow_registration_id",
+ "workflow_version_id",
+ "workflow_identifier",
+ "workflow_task_queue",
+ "schedule_id",
+ "deployment_name",
+ ]
+ )
+ nullable_fields = set(
+ [
+ "workflow_registration_id",
+ "workflow_version_id",
+ "workflow_identifier",
+ "workflow_task_queue",
+ "schedule_id",
+ "deployment_name",
+ ]
+ )
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/workflowscheduleresponse.py b/src/mistralai/client/models/workflowscheduleresponse.py
new file mode 100644
index 00000000..d74ba1da
--- /dev/null
+++ b/src/mistralai/client/models/workflowscheduleresponse.py
@@ -0,0 +1,16 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: c77172c4a9f6
+
+from __future__ import annotations
+from mistralai.client.types import BaseModel
+from typing_extensions import TypedDict
+
+
+class WorkflowScheduleResponseTypedDict(TypedDict):
+ schedule_id: str
+ r"""The ID of the schedule"""
+
+
+class WorkflowScheduleResponse(BaseModel):
+ schedule_id: str
+ r"""The ID of the schedule"""
diff --git a/src/mistralai/client/models/workflowtaskfailedattributes.py b/src/mistralai/client/models/workflowtaskfailedattributes.py
new file mode 100644
index 00000000..49444347
--- /dev/null
+++ b/src/mistralai/client/models/workflowtaskfailedattributes.py
@@ -0,0 +1,26 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: c4c09c4d5ea7
+
+from __future__ import annotations
+from .failure import Failure, FailureTypedDict
+from mistralai.client.types import BaseModel
+from typing_extensions import TypedDict
+
+
+class WorkflowTaskFailedAttributesTypedDict(TypedDict):
+ r"""Attributes for workflow task failed events."""
+
+ task_id: str
+ r"""Unique identifier for the task within the workflow execution."""
+ failure: FailureTypedDict
+ r"""Represents an error or exception that occurred during execution."""
+
+
+class WorkflowTaskFailedAttributes(BaseModel):
+ r"""Attributes for workflow task failed events."""
+
+ task_id: str
+ r"""Unique identifier for the task within the workflow execution."""
+
+ failure: Failure
+ r"""Represents an error or exception that occurred during execution."""
diff --git a/src/mistralai/client/models/workflowtaskfailedrequest.py b/src/mistralai/client/models/workflowtaskfailedrequest.py
new file mode 100644
index 00000000..9075b0bc
--- /dev/null
+++ b/src/mistralai/client/models/workflowtaskfailedrequest.py
@@ -0,0 +1,122 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: d26184215fe3
+
+from __future__ import annotations
+from .workflowtaskfailedattributes import (
+ WorkflowTaskFailedAttributes,
+ WorkflowTaskFailedAttributesTypedDict,
+)
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class WorkflowTaskFailedRequestTypedDict(TypedDict):
+ r"""Emitted when a workflow task fails.
+
+ This indicates an error occurred during workflow task execution,
+ which may trigger a retry depending on configuration.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: WorkflowTaskFailedAttributesTypedDict
+ r"""Attributes for workflow task failed events."""
+ event_timestamp: NotRequired[int]
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ parent_workflow_exec_id: NotRequired[Nullable[str]]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ event_type: Literal["WORKFLOW_TASK_FAILED"]
+ r"""Event type discriminator."""
+
+
+class WorkflowTaskFailedRequest(BaseModel):
+ r"""Emitted when a workflow task fails.
+
+ This indicates an error occurred during workflow task execution,
+ which may trigger a retry depending on configuration.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: WorkflowTaskFailedAttributes
+ r"""Attributes for workflow task failed events."""
+
+ event_timestamp: Optional[int] = None
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ parent_workflow_exec_id: OptionalNullable[str] = UNSET
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["WORKFLOW_TASK_FAILED"]],
+ AfterValidator(validate_const("WORKFLOW_TASK_FAILED")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "WORKFLOW_TASK_FAILED"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ ["event_timestamp", "parent_workflow_exec_id", "event_type"]
+ )
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ WorkflowTaskFailedRequest.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/workflowtaskfailedresponse.py b/src/mistralai/client/models/workflowtaskfailedresponse.py
new file mode 100644
index 00000000..154145e3
--- /dev/null
+++ b/src/mistralai/client/models/workflowtaskfailedresponse.py
@@ -0,0 +1,114 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: a02b01867b7f
+
+from __future__ import annotations
+from .workflowtaskfailedattributes import (
+ WorkflowTaskFailedAttributes,
+ WorkflowTaskFailedAttributesTypedDict,
+)
+from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, TypedDict
+
+
+class WorkflowTaskFailedResponseTypedDict(TypedDict):
+ r"""Emitted when a workflow task fails.
+
+ This indicates an error occurred during workflow task execution,
+ which may trigger a retry depending on configuration.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: WorkflowTaskFailedAttributesTypedDict
+ r"""Attributes for workflow task failed events."""
+ event_type: Literal["WORKFLOW_TASK_FAILED"]
+ r"""Event type discriminator."""
+
+
+class WorkflowTaskFailedResponse(BaseModel):
+ r"""Emitted when a workflow task fails.
+
+ This indicates an error occurred during workflow task execution,
+ which may trigger a retry depending on configuration.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: WorkflowTaskFailedAttributes
+ r"""Attributes for workflow task failed events."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["WORKFLOW_TASK_FAILED"]],
+ AfterValidator(validate_const("WORKFLOW_TASK_FAILED")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "WORKFLOW_TASK_FAILED"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["event_type"])
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ WorkflowTaskFailedResponse.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/workflowtasktimedoutattributes.py b/src/mistralai/client/models/workflowtasktimedoutattributes.py
new file mode 100644
index 00000000..1824990e
--- /dev/null
+++ b/src/mistralai/client/models/workflowtasktimedoutattributes.py
@@ -0,0 +1,57 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 43369570cb96
+
+from __future__ import annotations
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing_extensions import NotRequired, TypedDict
+
+
+class WorkflowTaskTimedOutAttributesTypedDict(TypedDict):
+ r"""Attributes for workflow task timed out events."""
+
+ task_id: str
+ r"""Unique identifier for the task within the workflow execution."""
+ timeout_type: NotRequired[Nullable[str]]
+ r"""The type of timeout that occurred (e.g., 'START_TO_CLOSE', 'SCHEDULE_TO_START')."""
+
+
+class WorkflowTaskTimedOutAttributes(BaseModel):
+ r"""Attributes for workflow task timed out events."""
+
+ task_id: str
+ r"""Unique identifier for the task within the workflow execution."""
+
+ timeout_type: OptionalNullable[str] = UNSET
+ r"""The type of timeout that occurred (e.g., 'START_TO_CLOSE', 'SCHEDULE_TO_START')."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["timeout_type"])
+ nullable_fields = set(["timeout_type"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/workflowtasktimedoutrequest.py b/src/mistralai/client/models/workflowtasktimedoutrequest.py
new file mode 100644
index 00000000..86a6bcdb
--- /dev/null
+++ b/src/mistralai/client/models/workflowtasktimedoutrequest.py
@@ -0,0 +1,122 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 70f37007e50f
+
+from __future__ import annotations
+from .workflowtasktimedoutattributes import (
+ WorkflowTaskTimedOutAttributes,
+ WorkflowTaskTimedOutAttributesTypedDict,
+)
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+class WorkflowTaskTimedOutRequestTypedDict(TypedDict):
+ r"""Emitted when a workflow task times out.
+
+ This indicates the workflow task (a unit of workflow execution) exceeded
+ its configured timeout.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: WorkflowTaskTimedOutAttributesTypedDict
+ r"""Attributes for workflow task timed out events."""
+ event_timestamp: NotRequired[int]
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ parent_workflow_exec_id: NotRequired[Nullable[str]]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ event_type: Literal["WORKFLOW_TASK_TIMED_OUT"]
+ r"""Event type discriminator."""
+
+
+class WorkflowTaskTimedOutRequest(BaseModel):
+ r"""Emitted when a workflow task times out.
+
+ This indicates the workflow task (a unit of workflow execution) exceeded
+ its configured timeout.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: WorkflowTaskTimedOutAttributes
+ r"""Attributes for workflow task timed out events."""
+
+ event_timestamp: Optional[int] = None
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ parent_workflow_exec_id: OptionalNullable[str] = UNSET
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["WORKFLOW_TASK_TIMED_OUT"]],
+ AfterValidator(validate_const("WORKFLOW_TASK_TIMED_OUT")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "WORKFLOW_TASK_TIMED_OUT"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ ["event_timestamp", "parent_workflow_exec_id", "event_type"]
+ )
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ WorkflowTaskTimedOutRequest.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/workflowtasktimedoutresponse.py b/src/mistralai/client/models/workflowtasktimedoutresponse.py
new file mode 100644
index 00000000..2c3350e0
--- /dev/null
+++ b/src/mistralai/client/models/workflowtasktimedoutresponse.py
@@ -0,0 +1,114 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 8b7540c47083
+
+from __future__ import annotations
+from .workflowtasktimedoutattributes import (
+ WorkflowTaskTimedOutAttributes,
+ WorkflowTaskTimedOutAttributesTypedDict,
+)
+from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL
+from mistralai.client.utils import validate_const
+import pydantic
+from pydantic import model_serializer
+from pydantic.functional_validators import AfterValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, TypedDict
+
+
+class WorkflowTaskTimedOutResponseTypedDict(TypedDict):
+ r"""Emitted when a workflow task times out.
+
+ This indicates the workflow task (a unit of workflow execution) exceeded
+ its configured timeout.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+ attributes: WorkflowTaskTimedOutAttributesTypedDict
+ r"""Attributes for workflow task timed out events."""
+ event_type: Literal["WORKFLOW_TASK_TIMED_OUT"]
+ r"""Event type discriminator."""
+
+
+class WorkflowTaskTimedOutResponse(BaseModel):
+ r"""Emitted when a workflow task times out.
+
+ This indicates the workflow task (a unit of workflow execution) exceeded
+ its configured timeout.
+ """
+
+ event_id: str
+ r"""Unique identifier for this event instance."""
+
+ event_timestamp: int
+ r"""Unix timestamp in nanoseconds when the event was created."""
+
+ root_workflow_exec_id: str
+ r"""Execution ID of the root workflow that initiated this execution chain."""
+
+ parent_workflow_exec_id: Nullable[str]
+ r"""Execution ID of the parent workflow that initiated this execution. If this is a root workflow, this field is not set."""
+
+ workflow_exec_id: str
+ r"""Execution ID of the workflow that emitted this event."""
+
+ workflow_run_id: str
+ r"""Run ID of the workflow execution. Changes on continue-as-new while workflow_exec_id stays the same."""
+
+ workflow_name: str
+ r"""The registered name of the workflow that emitted this event."""
+
+ attributes: WorkflowTaskTimedOutAttributes
+ r"""Attributes for workflow task timed out events."""
+
+ event_type: Annotated[
+ Annotated[
+ Optional[Literal["WORKFLOW_TASK_TIMED_OUT"]],
+ AfterValidator(validate_const("WORKFLOW_TASK_TIMED_OUT")),
+ ],
+ pydantic.Field(alias="event_type"),
+ ] = "WORKFLOW_TASK_TIMED_OUT"
+ r"""Event type discriminator."""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(["event_type"])
+ nullable_fields = set(["parent_workflow_exec_id"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
+
+
+try:
+ WorkflowTaskTimedOutResponse.model_rebuild()
+except NameError:
+ pass
diff --git a/src/mistralai/client/models/workflowtype.py b/src/mistralai/client/models/workflowtype.py
new file mode 100644
index 00000000..67858c88
--- /dev/null
+++ b/src/mistralai/client/models/workflowtype.py
@@ -0,0 +1,8 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 01f37d193b17
+
+from __future__ import annotations
+from typing import Literal
+
+
+WorkflowType = Literal["code",]
diff --git a/src/mistralai/client/models/workflowunarchiveresponse.py b/src/mistralai/client/models/workflowunarchiveresponse.py
new file mode 100644
index 00000000..16717856
--- /dev/null
+++ b/src/mistralai/client/models/workflowunarchiveresponse.py
@@ -0,0 +1,15 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 30bdd050feac
+
+from __future__ import annotations
+from .workflow import Workflow, WorkflowTypedDict
+from mistralai.client.types import BaseModel
+from typing_extensions import TypedDict
+
+
+class WorkflowUnarchiveResponseTypedDict(TypedDict):
+ workflow: WorkflowTypedDict
+
+
+class WorkflowUnarchiveResponse(BaseModel):
+ workflow: Workflow
diff --git a/src/mistralai/client/models/workflowupdaterequest.py b/src/mistralai/client/models/workflowupdaterequest.py
new file mode 100644
index 00000000..480fa47d
--- /dev/null
+++ b/src/mistralai/client/models/workflowupdaterequest.py
@@ -0,0 +1,62 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 8f3878d3c7c7
+
+from __future__ import annotations
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing_extensions import NotRequired, TypedDict
+
+
+class WorkflowUpdateRequestTypedDict(TypedDict):
+ display_name: NotRequired[Nullable[str]]
+ r"""New display name value"""
+ description: NotRequired[Nullable[str]]
+ r"""New description value"""
+ available_in_chat_assistant: NotRequired[Nullable[bool]]
+ r"""Whether to make the workflow available in the chat assistant"""
+
+
+class WorkflowUpdateRequest(BaseModel):
+ display_name: OptionalNullable[str] = UNSET
+ r"""New display name value"""
+
+ description: OptionalNullable[str] = UNSET
+ r"""New description value"""
+
+ available_in_chat_assistant: OptionalNullable[bool] = UNSET
+ r"""Whether to make the workflow available in the chat assistant"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ ["display_name", "description", "available_in_chat_assistant"]
+ )
+ nullable_fields = set(
+ ["display_name", "description", "available_in_chat_assistant"]
+ )
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/models/workflowupdateresponse.py b/src/mistralai/client/models/workflowupdateresponse.py
new file mode 100644
index 00000000..3336e448
--- /dev/null
+++ b/src/mistralai/client/models/workflowupdateresponse.py
@@ -0,0 +1,15 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 0ae165cc7a82
+
+from __future__ import annotations
+from .workflow import Workflow, WorkflowTypedDict
+from mistralai.client.types import BaseModel
+from typing_extensions import TypedDict
+
+
+class WorkflowUpdateResponseTypedDict(TypedDict):
+ workflow: WorkflowTypedDict
+
+
+class WorkflowUpdateResponse(BaseModel):
+ workflow: Workflow
diff --git a/src/mistralai/client/models/workflowwithworkerstatus.py b/src/mistralai/client/models/workflowwithworkerstatus.py
new file mode 100644
index 00000000..7f469007
--- /dev/null
+++ b/src/mistralai/client/models/workflowwithworkerstatus.py
@@ -0,0 +1,111 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: e1055203af7d
+
+from __future__ import annotations
+from .workflowtype import WorkflowType
+from mistralai.client.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
+from pydantic import model_serializer
+from typing import Optional
+from typing_extensions import NotRequired, TypedDict
+
+
+class WorkflowWithWorkerStatusTypedDict(TypedDict):
+ id: str
+ r"""Unique identifier of the workflow"""
+ name: str
+ r"""Name of the workflow"""
+ display_name: str
+ r"""Display name of the workflow"""
+ type: WorkflowType
+ customer_id: str
+ r"""Customer ID of the workflow"""
+ workspace_id: str
+ r"""Workspace ID of the workflow"""
+ active: bool
+ r"""Whether the workflow is active"""
+ description: NotRequired[Nullable[str]]
+ r"""Description of the workflow"""
+ shared_namespace: NotRequired[Nullable[str]]
+ r"""Reserved namespace for shared workflows (e.g., 'shared:my-shared-workflow')"""
+ available_in_chat_assistant: NotRequired[bool]
+ r"""Whether the workflow is available in chat assistant"""
+ is_technical: NotRequired[bool]
+ r"""Whether the workflow is technical (e.g. SDK-managed)"""
+ archived: NotRequired[bool]
+ r"""Whether the workflow is archived"""
+
+
+class WorkflowWithWorkerStatus(BaseModel):
+ id: str
+ r"""Unique identifier of the workflow"""
+
+ name: str
+ r"""Name of the workflow"""
+
+ display_name: str
+ r"""Display name of the workflow"""
+
+ type: WorkflowType
+
+ customer_id: str
+ r"""Customer ID of the workflow"""
+
+ workspace_id: str
+ r"""Workspace ID of the workflow"""
+
+ active: bool
+ r"""Whether the workflow is active"""
+
+ description: OptionalNullable[str] = UNSET
+ r"""Description of the workflow"""
+
+ shared_namespace: OptionalNullable[str] = UNSET
+ r"""Reserved namespace for shared workflows (e.g., 'shared:my-shared-workflow')"""
+
+ available_in_chat_assistant: Optional[bool] = False
+ r"""Whether the workflow is available in chat assistant"""
+
+ is_technical: Optional[bool] = False
+ r"""Whether the workflow is technical (e.g. SDK-managed)"""
+
+ archived: Optional[bool] = False
+ r"""Whether the workflow is archived"""
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = set(
+ [
+ "description",
+ "shared_namespace",
+ "available_in_chat_assistant",
+ "is_technical",
+ "archived",
+ ]
+ )
+ nullable_fields = set(["description", "shared_namespace"])
+ serialized = handler(self)
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k, serialized.get(n))
+ is_nullable_and_explicitly_set = (
+ k in nullable_fields
+ and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member
+ )
+
+ if val != UNSET_SENTINEL:
+ if (
+ val is not None
+ or k not in optional_fields
+ or is_nullable_and_explicitly_set
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/mistralai/client/runs.py b/src/mistralai/client/runs.py
new file mode 100644
index 00000000..01b667cf
--- /dev/null
+++ b/src/mistralai/client/runs.py
@@ -0,0 +1,644 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 4297d58aeb21
+
+from .basesdk import BaseSDK
+from jsonpath import JSONPath
+from mistralai.client import errors, models, utils
+from mistralai.client._hooks import HookContext
+from mistralai.client.types import OptionalNullable, UNSET
+from mistralai.client.utils import get_security_from_env
+from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response
+from typing import Any, Awaitable, Dict, List, Mapping, Optional, Union
+
+
+class Runs(BaseSDK):
+ def list_runs(
+ self,
+ *,
+ workflow_identifier: OptionalNullable[str] = UNSET,
+ search: OptionalNullable[str] = UNSET,
+ status: OptionalNullable[
+ Union[
+ models.ListRunsV1WorkflowsRunsGetStatus,
+ models.ListRunsV1WorkflowsRunsGetStatusTypedDict,
+ ]
+ ] = UNSET,
+ page_size: Optional[int] = 50,
+ next_page_token: OptionalNullable[str] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> Optional[models.ListRunsV1WorkflowsRunsGetResponse]:
+ r"""List Runs
+
+ :param workflow_identifier: Filter by workflow name or id
+ :param search: Search by workflow name, display name or id
+ :param status: Filter by workflow status
+ :param page_size: Number of items per page
+ :param next_page_token: Token for the next page of results
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.ListRunsV1WorkflowsRunsGetRequest(
+ workflow_identifier=workflow_identifier,
+ search=search,
+ status=status,
+ page_size=page_size,
+ next_page_token=next_page_token,
+ )
+
+ req = self._build_request(
+ method="GET",
+ path="/v1/workflows/runs",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="list_runs_v1_workflows_runs_get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ def next_func() -> Optional[models.ListRunsV1WorkflowsRunsGetResponse]:
+ body = utils.unmarshal_json(http_res.text, Union[Dict[Any, Any], List[Any]])
+
+ next_cursor = JSONPath("$.next_page_token").parse(body)
+
+ if len(next_cursor) == 0:
+ return None
+
+ next_cursor = next_cursor[0]
+ if next_cursor is None or str(next_cursor).strip() == "":
+ return None
+ results = JSONPath("$.executions").parse(body)
+ if len(results) == 0 or len(results[0]) == 0:
+ return None
+ limit = request.page_size if not request.page_size is None else 50
+ if len(results[0]) < limit:
+ return None
+
+ return self.list_runs(
+ workflow_identifier=workflow_identifier,
+ search=search,
+ status=status,
+ page_size=page_size,
+ next_page_token=next_cursor,
+ retries=retries,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return models.ListRunsV1WorkflowsRunsGetResponse(
+ result=unmarshal_json_response(
+ models.WorkflowExecutionListResponse, http_res
+ ),
+ next=next_func,
+ )
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def list_runs_async(
+ self,
+ *,
+ workflow_identifier: OptionalNullable[str] = UNSET,
+ search: OptionalNullable[str] = UNSET,
+ status: OptionalNullable[
+ Union[
+ models.ListRunsV1WorkflowsRunsGetStatus,
+ models.ListRunsV1WorkflowsRunsGetStatusTypedDict,
+ ]
+ ] = UNSET,
+ page_size: Optional[int] = 50,
+ next_page_token: OptionalNullable[str] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> Optional[models.ListRunsV1WorkflowsRunsGetResponse]:
+ r"""List Runs
+
+ :param workflow_identifier: Filter by workflow name or id
+ :param search: Search by workflow name, display name or id
+ :param status: Filter by workflow status
+ :param page_size: Number of items per page
+ :param next_page_token: Token for the next page of results
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.ListRunsV1WorkflowsRunsGetRequest(
+ workflow_identifier=workflow_identifier,
+ search=search,
+ status=status,
+ page_size=page_size,
+ next_page_token=next_page_token,
+ )
+
+ req = self._build_request_async(
+ method="GET",
+ path="/v1/workflows/runs",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="list_runs_v1_workflows_runs_get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ def next_func() -> (
+ Awaitable[Optional[models.ListRunsV1WorkflowsRunsGetResponse]]
+ ):
+ body = utils.unmarshal_json(http_res.text, Union[Dict[Any, Any], List[Any]])
+
+ async def empty_result():
+ return None
+
+ next_cursor = JSONPath("$.next_page_token").parse(body)
+
+ if len(next_cursor) == 0:
+ return empty_result()
+
+ next_cursor = next_cursor[0]
+ if next_cursor is None or str(next_cursor).strip() == "":
+ return empty_result()
+ results = JSONPath("$.executions").parse(body)
+ if len(results) == 0 or len(results[0]) == 0:
+ return empty_result()
+ limit = request.page_size if not request.page_size is None else 50
+ if len(results[0]) < limit:
+ return empty_result()
+
+ return self.list_runs_async(
+ workflow_identifier=workflow_identifier,
+ search=search,
+ status=status,
+ page_size=page_size,
+ next_page_token=next_cursor,
+ retries=retries,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return models.ListRunsV1WorkflowsRunsGetResponse(
+ result=unmarshal_json_response(
+ models.WorkflowExecutionListResponse, http_res
+ ),
+ next=next_func,
+ )
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ def get_run(
+ self,
+ *,
+ run_id: str,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowExecutionResponse:
+ r"""Get Run
+
+ :param run_id:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetRunV1WorkflowsRunsRunIDGetRequest(
+ run_id=run_id,
+ )
+
+ req = self._build_request(
+ method="GET",
+ path="/v1/workflows/runs/{run_id}",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_run_v1_workflows_runs__run_id__get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.WorkflowExecutionResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def get_run_async(
+ self,
+ *,
+ run_id: str,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowExecutionResponse:
+ r"""Get Run
+
+ :param run_id:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetRunV1WorkflowsRunsRunIDGetRequest(
+ run_id=run_id,
+ )
+
+ req = self._build_request_async(
+ method="GET",
+ path="/v1/workflows/runs/{run_id}",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_run_v1_workflows_runs__run_id__get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.WorkflowExecutionResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ def get_run_history(
+ self,
+ *,
+ run_id: str,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> Any:
+ r"""Get Run History
+
+ :param run_id:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetRunHistoryV1WorkflowsRunsRunIDHistoryGetRequest(
+ run_id=run_id,
+ )
+
+ req = self._build_request(
+ method="GET",
+ path="/v1/workflows/runs/{run_id}/history",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_run_history_v1_workflows_runs__run_id__history_get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(Any, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def get_run_history_async(
+ self,
+ *,
+ run_id: str,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> Any:
+ r"""Get Run History
+
+ :param run_id:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetRunHistoryV1WorkflowsRunsRunIDHistoryGetRequest(
+ run_id=run_id,
+ )
+
+ req = self._build_request_async(
+ method="GET",
+ path="/v1/workflows/runs/{run_id}/history",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_run_history_v1_workflows_runs__run_id__history_get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(Any, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
diff --git a/src/mistralai/client/schedules.py b/src/mistralai/client/schedules.py
new file mode 100644
index 00000000..d6f2e5ff
--- /dev/null
+++ b/src/mistralai/client/schedules.py
@@ -0,0 +1,570 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: d3b4fe452390
+
+from .basesdk import BaseSDK
+from mistralai.client import errors, models, utils
+from mistralai.client._hooks import HookContext
+from mistralai.client.types import OptionalNullable, UNSET
+from mistralai.client.utils import get_security_from_env
+from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response
+from typing import Any, Mapping, Optional, Union
+
+
+class Schedules(BaseSDK):
+ def get_schedules(
+ self,
+ *,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowScheduleListResponse:
+ r"""Get Schedules
+
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+ req = self._build_request(
+ method="GET",
+ path="/v1/workflows/schedules",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=None,
+ request_body_required=False,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_schedules_v1_workflows_schedules_get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(
+ models.WorkflowScheduleListResponse, http_res
+ )
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def get_schedules_async(
+ self,
+ *,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowScheduleListResponse:
+ r"""Get Schedules
+
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+ req = self._build_request_async(
+ method="GET",
+ path="/v1/workflows/schedules",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=None,
+ request_body_required=False,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_schedules_v1_workflows_schedules_get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(
+ models.WorkflowScheduleListResponse, http_res
+ )
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ def schedule_workflow(
+ self,
+ *,
+ schedule: Union[models.ScheduleDefinition, models.ScheduleDefinitionTypedDict],
+ workflow_registration_id: OptionalNullable[str] = UNSET,
+ workflow_version_id: OptionalNullable[str] = UNSET,
+ workflow_identifier: OptionalNullable[str] = UNSET,
+ workflow_task_queue: OptionalNullable[str] = UNSET,
+ schedule_id: OptionalNullable[str] = UNSET,
+ deployment_name: OptionalNullable[str] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowScheduleResponse:
+ r"""Schedule Workflow
+
+ :param schedule: Specification of the times scheduled actions may occur.
+
+ The times are the union of :py:attr:`calendars`, :py:attr:`intervals`, and
+ :py:attr:`cron_expressions` excluding anything in :py:attr:`skip`.
+
+ Used for input where schedule_id is optional (can be provided or auto-generated).
+ :param workflow_registration_id: The ID of the workflow registration to schedule
+ :param workflow_version_id: Deprecated: use workflow_registration_id
+ :param workflow_identifier: The name or ID of the workflow to schedule
+ :param workflow_task_queue: Deprecated. Use deployment_name instead.
+ :param schedule_id: Allows you to specify a custom schedule ID. If not provided, a random ID will be generated.
+ :param deployment_name: Name of the deployment to route this schedule to
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.WorkflowScheduleRequest(
+ schedule=utils.get_pydantic_model(schedule, models.ScheduleDefinition),
+ workflow_registration_id=workflow_registration_id,
+ workflow_version_id=workflow_version_id,
+ workflow_identifier=workflow_identifier,
+ workflow_task_queue=workflow_task_queue,
+ schedule_id=schedule_id,
+ deployment_name=deployment_name,
+ )
+
+ req = self._build_request(
+ method="POST",
+ path="/v1/workflows/schedules",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=True,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ get_serialized_body=lambda: utils.serialize_request_body(
+ request, False, False, "json", models.WorkflowScheduleRequest
+ ),
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="schedule_workflow_v1_workflows_schedules_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "201", "application/json"):
+ return unmarshal_json_response(models.WorkflowScheduleResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def schedule_workflow_async(
+ self,
+ *,
+ schedule: Union[models.ScheduleDefinition, models.ScheduleDefinitionTypedDict],
+ workflow_registration_id: OptionalNullable[str] = UNSET,
+ workflow_version_id: OptionalNullable[str] = UNSET,
+ workflow_identifier: OptionalNullable[str] = UNSET,
+ workflow_task_queue: OptionalNullable[str] = UNSET,
+ schedule_id: OptionalNullable[str] = UNSET,
+ deployment_name: OptionalNullable[str] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowScheduleResponse:
+ r"""Schedule Workflow
+
+ :param schedule: Specification of the times scheduled actions may occur.
+
+ The times are the union of :py:attr:`calendars`, :py:attr:`intervals`, and
+ :py:attr:`cron_expressions` excluding anything in :py:attr:`skip`.
+
+ Used for input where schedule_id is optional (can be provided or auto-generated).
+ :param workflow_registration_id: The ID of the workflow registration to schedule
+ :param workflow_version_id: Deprecated: use workflow_registration_id
+ :param workflow_identifier: The name or ID of the workflow to schedule
+ :param workflow_task_queue: Deprecated. Use deployment_name instead.
+ :param schedule_id: Allows you to specify a custom schedule ID. If not provided, a random ID will be generated.
+ :param deployment_name: Name of the deployment to route this schedule to
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.WorkflowScheduleRequest(
+ schedule=utils.get_pydantic_model(schedule, models.ScheduleDefinition),
+ workflow_registration_id=workflow_registration_id,
+ workflow_version_id=workflow_version_id,
+ workflow_identifier=workflow_identifier,
+ workflow_task_queue=workflow_task_queue,
+ schedule_id=schedule_id,
+ deployment_name=deployment_name,
+ )
+
+ req = self._build_request_async(
+ method="POST",
+ path="/v1/workflows/schedules",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=True,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ get_serialized_body=lambda: utils.serialize_request_body(
+ request, False, False, "json", models.WorkflowScheduleRequest
+ ),
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="schedule_workflow_v1_workflows_schedules_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "201", "application/json"):
+ return unmarshal_json_response(models.WorkflowScheduleResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ def unschedule_workflow(
+ self,
+ *,
+ schedule_id: str,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ):
+ r"""Unschedule Workflow
+
+ :param schedule_id:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.UnscheduleWorkflowV1WorkflowsSchedulesScheduleIDDeleteRequest(
+ schedule_id=schedule_id,
+ )
+
+ req = self._build_request(
+ method="DELETE",
+ path="/v1/workflows/schedules/{schedule_id}",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="unschedule_workflow_v1_workflows_schedules__schedule_id__delete",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "204", "*"):
+ return
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def unschedule_workflow_async(
+ self,
+ *,
+ schedule_id: str,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ):
+ r"""Unschedule Workflow
+
+ :param schedule_id:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.UnscheduleWorkflowV1WorkflowsSchedulesScheduleIDDeleteRequest(
+ schedule_id=schedule_id,
+ )
+
+ req = self._build_request_async(
+ method="DELETE",
+ path="/v1/workflows/schedules/{schedule_id}",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="unschedule_workflow_v1_workflows_schedules__schedule_id__delete",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "204", "*"):
+ return
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
diff --git a/src/mistralai/client/sdk.py b/src/mistralai/client/sdk.py
index 52fc5d9a..03338f46 100644
--- a/src/mistralai/client/sdk.py
+++ b/src/mistralai/client/sdk.py
@@ -23,11 +23,13 @@
from mistralai.client.chat import Chat
from mistralai.client.classifiers import Classifiers
from mistralai.client.embeddings import Embeddings
+ from mistralai.client.events import Events
from mistralai.client.files import Files
from mistralai.client.fim import Fim
from mistralai.client.fine_tuning import FineTuning
from mistralai.client.models_ import Models
from mistralai.client.ocr import Ocr
+ from mistralai.client.workflows import Workflows
class Mistral(BaseSDK):
@@ -53,6 +55,8 @@ class Mistral(BaseSDK):
r"""Classifiers API."""
ocr: "Ocr"
r"""OCR API"""
+ workflows: "Workflows"
+ events: "Events"
_sub_sdk_map = {
"audio": ("mistralai.client.audio", "Audio"),
"models": ("mistralai.client.models_", "Models"),
@@ -66,6 +70,8 @@ class Mistral(BaseSDK):
"embeddings": ("mistralai.client.embeddings", "Embeddings"),
"classifiers": ("mistralai.client.classifiers", "Classifiers"),
"ocr": ("mistralai.client.ocr", "Ocr"),
+ "workflows": ("mistralai.client.workflows", "Workflows"),
+ "events": ("mistralai.client.events", "Events"),
}
def __init__(
diff --git a/src/mistralai/client/voices.py b/src/mistralai/client/voices.py
index 68d4bb5b..2d571857 100644
--- a/src/mistralai/client/voices.py
+++ b/src/mistralai/client/voices.py
@@ -16,6 +16,7 @@ def list(
*,
limit: Optional[int] = 10,
offset: Optional[int] = 0,
+ type_: Optional[models.ListVoicesV1AudioVoicesGetType] = "all",
retries: OptionalNullable[utils.RetryConfig] = UNSET,
server_url: Optional[str] = None,
timeout_ms: Optional[int] = None,
@@ -27,6 +28,7 @@ def list(
:param limit: Maximum number of voices to return
:param offset: Offset for pagination
+ :param type: Filter the voices between customs and presets
:param retries: Override the default retry configuration for this method
:param server_url: Override the default server URL for this method
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -45,6 +47,7 @@ def list(
request = models.ListVoicesV1AudioVoicesGetRequest(
limit=limit,
offset=offset,
+ type=type_,
)
req = self._build_request(
@@ -109,6 +112,7 @@ async def list_async(
*,
limit: Optional[int] = 10,
offset: Optional[int] = 0,
+ type_: Optional[models.ListVoicesV1AudioVoicesGetType] = "all",
retries: OptionalNullable[utils.RetryConfig] = UNSET,
server_url: Optional[str] = None,
timeout_ms: Optional[int] = None,
@@ -120,6 +124,7 @@ async def list_async(
:param limit: Maximum number of voices to return
:param offset: Offset for pagination
+ :param type: Filter the voices between customs and presets
:param retries: Override the default retry configuration for this method
:param server_url: Override the default server URL for this method
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -138,6 +143,7 @@ async def list_async(
request = models.ListVoicesV1AudioVoicesGetRequest(
limit=limit,
offset=offset,
+ type=type_,
)
req = self._build_request_async(
diff --git a/src/mistralai/client/workflows.py b/src/mistralai/client/workflows.py
new file mode 100644
index 00000000..f3150791
--- /dev/null
+++ b/src/mistralai/client/workflows.py
@@ -0,0 +1,2213 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: e2a0381191f6
+
+from .basesdk import BaseSDK
+from .sdkconfiguration import SDKConfiguration
+from jsonpath import JSONPath
+from mistralai.client import errors, models, utils
+from mistralai.client._hooks import HookContext
+from mistralai.client.deployments import Deployments
+from mistralai.client.executions import Executions
+from mistralai.client.metrics import Metrics
+from mistralai.client.runs import Runs
+from mistralai.client.schedules import Schedules
+from mistralai.client.types import OptionalNullable, UNSET
+from mistralai.client.utils import get_security_from_env
+from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response
+from mistralai.client.workflows_events import WorkflowsEvents
+from typing import Any, Awaitable, Dict, List, Mapping, Optional, Union
+from typing_extensions import deprecated
+
+# region imports
+import asyncio
+import time
+# endregion imports
+
+
+class Workflows(BaseSDK):
+ executions: Executions
+ metrics: Metrics
+ runs: Runs
+ schedules: Schedules
+ events: WorkflowsEvents
+ deployments: Deployments
+
+ def __init__(
+ self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None
+ ) -> None:
+ BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref)
+ self.sdk_configuration = sdk_config
+ self._init_sdks()
+
+ def _init_sdks(self):
+ self.executions = Executions(self.sdk_configuration, parent_ref=self.parent_ref)
+ self.metrics = Metrics(self.sdk_configuration, parent_ref=self.parent_ref)
+ self.runs = Runs(self.sdk_configuration, parent_ref=self.parent_ref)
+ self.schedules = Schedules(self.sdk_configuration, parent_ref=self.parent_ref)
+ self.events = WorkflowsEvents(
+ self.sdk_configuration, parent_ref=self.parent_ref
+ )
+ self.deployments = Deployments(
+ self.sdk_configuration, parent_ref=self.parent_ref
+ )
+
+ # region sdk-class-body
+ def execute_workflow_and_wait(
+ self,
+ workflow_identifier: str,
+ input: OptionalNullable[Dict[str, Any]] = UNSET,
+ execution_id: OptionalNullable[str] = UNSET,
+ deployment_name: OptionalNullable[str] = UNSET,
+ custom_tracing_attributes: OptionalNullable[Dict[str, str]] = UNSET,
+ polling_interval: int = 5,
+ max_attempts: Optional[int] = None,
+ use_api_sync: bool = False,
+ timeout_seconds: OptionalNullable[float] = UNSET,
+ ) -> Any:
+ """Execute a workflow and wait for its completion.
+
+ Args:
+ workflow_identifier: The workflow name or ID.
+ input: Input parameters for the workflow
+ execution_id: Optional custom execution ID
+ deployment_name: Name of the deployment to route this execution to
+ custom_tracing_attributes: Custom tracing attributes
+ polling_interval: Seconds between status checks when polling
+ max_attempts: Maximum number of polling attempts when polling (None for unlimited)
+ use_api_sync: Whether to use the API's built-in sync execution capability
+ timeout_seconds: Maximum time to wait in seconds when using API sync
+
+ Returns:
+ The workflow result directly
+
+ Raises:
+ TimeoutError: If max_attempts is reached and workflow is still running
+ RuntimeError: If workflow fails or terminates abnormally
+ """
+ if use_api_sync:
+ # Use the API's built-in synchronous execution
+ response = self.execute_workflow(
+ workflow_identifier=workflow_identifier,
+ input=input,
+ execution_id=execution_id,
+ wait_for_result=True,
+ timeout_seconds=timeout_seconds,
+ custom_tracing_attributes=custom_tracing_attributes,
+ deployment_name=deployment_name,
+ )
+ return response.result
+ # Use polling method
+ execution = self.execute_workflow(
+ workflow_identifier=workflow_identifier,
+ input=input,
+ execution_id=execution_id,
+ custom_tracing_attributes=custom_tracing_attributes,
+ deployment_name=deployment_name,
+ )
+
+ # Wait for completion
+ final_execution = self.wait_for_workflow_completion(
+ execution.execution_id, polling_interval, max_attempts
+ )
+
+ return final_execution.result
+
+ def wait_for_workflow_completion(
+ self,
+ execution_id: str,
+ polling_interval: int = 5,
+ max_attempts: Optional[int] = None,
+ ) -> models.WorkflowExecutionResponse:
+ """Wait for a workflow to complete by polling its status.
+
+ Args:
+ execution_id: Execution ID of the workflow
+ polling_interval: Seconds between status checks
+ max_attempts: Maximum number of polling attempts (None for unlimited)
+
+ Returns:
+ WorkflowExecutionResponse with the final execution details
+
+ Raises:
+ TimeoutError: If max_attempts is reached and workflow is still running
+ RuntimeError: If workflow fails or terminates abnormally
+ """
+ attempts = 0
+ while True:
+ response = self.executions.get_workflow_execution(execution_id=execution_id)
+
+ if response.status != "RUNNING":
+ if response.status == "COMPLETED":
+ return response
+ raise RuntimeError(f"Workflow failed with status: {response.status}")
+
+ attempts += 1
+ if max_attempts is not None and attempts >= max_attempts:
+ raise TimeoutError(
+ f"Workflow is still running after {max_attempts} polling attempts"
+ )
+
+ time.sleep(polling_interval)
+
+ async def execute_workflow_and_wait_async(
+ self,
+ workflow_identifier: str,
+ input: OptionalNullable[Dict[str, Any]] = UNSET,
+ execution_id: OptionalNullable[str] = UNSET,
+ deployment_name: OptionalNullable[str] = UNSET,
+ custom_tracing_attributes: OptionalNullable[Dict[str, str]] = UNSET,
+ polling_interval: int = 5,
+ max_attempts: Optional[int] = None,
+ use_api_sync: bool = False,
+ timeout_seconds: OptionalNullable[float] = UNSET,
+ ) -> Any:
+ """Execute a workflow and wait for its completion (async version).
+
+ Args:
+ workflow_identifier: The workflow name or ID.
+ input: Input parameters for the workflow
+ execution_id: Optional custom execution ID
+ deployment_name: Name of the deployment to route this execution to
+ custom_tracing_attributes: Custom tracing attributes
+ polling_interval: Seconds between status checks when polling
+ max_attempts: Maximum number of polling attempts when polling (None for unlimited)
+ use_api_sync: Whether to use the API's built-in sync execution capability
+ timeout_seconds: Maximum time to wait in seconds when using API sync
+
+ Returns:
+ The workflow result directly
+
+ Raises:
+ TimeoutError: If max_attempts is reached and workflow is still running
+ RuntimeError: If workflow fails or terminates abnormally
+ """
+ if use_api_sync:
+ # Use the API's built-in synchronous execution
+ response = await self.execute_workflow_async(
+ workflow_identifier=workflow_identifier,
+ input=input,
+ execution_id=execution_id,
+ wait_for_result=True,
+ timeout_seconds=timeout_seconds,
+ custom_tracing_attributes=custom_tracing_attributes,
+ deployment_name=deployment_name,
+ )
+ return response.result
+
+ # Use polling method
+ execution = await self.execute_workflow_async(
+ workflow_identifier=workflow_identifier,
+ input=input,
+ execution_id=execution_id,
+ custom_tracing_attributes=custom_tracing_attributes,
+ deployment_name=deployment_name,
+ )
+
+ # Wait for completion
+ final_execution = await self.wait_for_workflow_completion_async(
+ execution.execution_id, polling_interval, max_attempts
+ )
+
+ return final_execution.result
+
+ async def wait_for_workflow_completion_async(
+ self,
+ execution_id: str,
+ polling_interval: int = 5,
+ max_attempts: Optional[int] = None,
+ ) -> models.WorkflowExecutionResponse:
+ """Wait for a workflow to complete by polling its status (async version).
+
+ Args:
+ execution_id: Execution ID of the workflow
+ polling_interval: Seconds between status checks
+ max_attempts: Maximum number of polling attempts (None for unlimited)
+
+ Returns:
+ WorkflowExecutionResponse with the final execution details
+
+ Raises:
+ TimeoutError: If max_attempts is reached and workflow is still running
+ RuntimeError: If workflow fails or terminates abnormally
+ """
+ attempts = 0
+ while True:
+ response = await self.executions.get_workflow_execution_async(
+ execution_id=execution_id
+ )
+
+ if response.status != "RUNNING":
+ if response.status == "COMPLETED":
+ return response
+ raise RuntimeError(f"Workflow failed with status: {response.status}")
+
+ attempts += 1
+ if max_attempts is not None and attempts >= max_attempts:
+ raise TimeoutError(
+ f"Workflow is still running after {max_attempts} polling attempts"
+ )
+
+ await asyncio.sleep(polling_interval)
+
+ # endregion sdk-class-body
+
+ def get_workflows(
+ self,
+ *,
+ active_only: Optional[bool] = False,
+ include_shared: Optional[bool] = True,
+ available_in_chat_assistant: OptionalNullable[bool] = UNSET,
+ archived: OptionalNullable[bool] = UNSET,
+ cursor: OptionalNullable[str] = UNSET,
+ limit: Optional[int] = 50,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> Optional[models.GetWorkflowsV1WorkflowsGetResponse]:
+ r"""Get Workflows
+
+ :param active_only: Whether to only return active workflows
+ :param include_shared: Whether to include shared workflows
+ :param available_in_chat_assistant: Whether to only return workflows compatible with chat assistant
+ :param archived: Filter by archived state. False=exclude archived, True=only archived, None=include all
+ :param cursor: The cursor for pagination
+ :param limit: The maximum number of workflows to return
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetWorkflowsV1WorkflowsGetRequest(
+ active_only=active_only,
+ include_shared=include_shared,
+ available_in_chat_assistant=available_in_chat_assistant,
+ archived=archived,
+ cursor=cursor,
+ limit=limit,
+ )
+
+ req = self._build_request(
+ method="GET",
+ path="/v1/workflows",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_workflows_v1_workflows_get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ def next_func() -> Optional[models.GetWorkflowsV1WorkflowsGetResponse]:
+ body = utils.unmarshal_json(http_res.text, Union[Dict[Any, Any], List[Any]])
+
+ next_cursor = JSONPath("$.next_cursor").parse(body)
+
+ if len(next_cursor) == 0:
+ return None
+
+ next_cursor = next_cursor[0]
+ if next_cursor is None or str(next_cursor).strip() == "":
+ return None
+ results = JSONPath("$.workflows").parse(body)
+ if len(results) == 0 or len(results[0]) == 0:
+ return None
+ limit = request.limit if not request.limit is None else 50
+ if len(results[0]) < limit:
+ return None
+
+ return self.get_workflows(
+ active_only=active_only,
+ include_shared=include_shared,
+ available_in_chat_assistant=available_in_chat_assistant,
+ archived=archived,
+ cursor=next_cursor,
+ limit=limit,
+ retries=retries,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return models.GetWorkflowsV1WorkflowsGetResponse(
+ result=unmarshal_json_response(models.WorkflowListResponse, http_res),
+ next=next_func,
+ )
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def get_workflows_async(
+ self,
+ *,
+ active_only: Optional[bool] = False,
+ include_shared: Optional[bool] = True,
+ available_in_chat_assistant: OptionalNullable[bool] = UNSET,
+ archived: OptionalNullable[bool] = UNSET,
+ cursor: OptionalNullable[str] = UNSET,
+ limit: Optional[int] = 50,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> Optional[models.GetWorkflowsV1WorkflowsGetResponse]:
+ r"""Get Workflows
+
+ :param active_only: Whether to only return active workflows
+ :param include_shared: Whether to include shared workflows
+ :param available_in_chat_assistant: Whether to only return workflows compatible with chat assistant
+ :param archived: Filter by archived state. False=exclude archived, True=only archived, None=include all
+ :param cursor: The cursor for pagination
+ :param limit: The maximum number of workflows to return
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetWorkflowsV1WorkflowsGetRequest(
+ active_only=active_only,
+ include_shared=include_shared,
+ available_in_chat_assistant=available_in_chat_assistant,
+ archived=archived,
+ cursor=cursor,
+ limit=limit,
+ )
+
+ req = self._build_request_async(
+ method="GET",
+ path="/v1/workflows",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_workflows_v1_workflows_get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ def next_func() -> (
+ Awaitable[Optional[models.GetWorkflowsV1WorkflowsGetResponse]]
+ ):
+ body = utils.unmarshal_json(http_res.text, Union[Dict[Any, Any], List[Any]])
+
+ async def empty_result():
+ return None
+
+ next_cursor = JSONPath("$.next_cursor").parse(body)
+
+ if len(next_cursor) == 0:
+ return empty_result()
+
+ next_cursor = next_cursor[0]
+ if next_cursor is None or str(next_cursor).strip() == "":
+ return empty_result()
+ results = JSONPath("$.workflows").parse(body)
+ if len(results) == 0 or len(results[0]) == 0:
+ return empty_result()
+ limit = request.limit if not request.limit is None else 50
+ if len(results[0]) < limit:
+ return empty_result()
+
+ return self.get_workflows_async(
+ active_only=active_only,
+ include_shared=include_shared,
+ available_in_chat_assistant=available_in_chat_assistant,
+ archived=archived,
+ cursor=next_cursor,
+ limit=limit,
+ retries=retries,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return models.GetWorkflowsV1WorkflowsGetResponse(
+ result=unmarshal_json_response(models.WorkflowListResponse, http_res),
+ next=next_func,
+ )
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ def get_workflow_registrations(
+ self,
+ *,
+ workflow_id: OptionalNullable[str] = UNSET,
+ task_queue: OptionalNullable[str] = UNSET,
+ active_only: Optional[bool] = False,
+ include_shared: Optional[bool] = True,
+ workflow_search: OptionalNullable[str] = UNSET,
+ archived: OptionalNullable[bool] = UNSET,
+ with_workflow: Optional[bool] = False,
+ available_in_chat_assistant: OptionalNullable[bool] = UNSET,
+ limit: Optional[int] = 50,
+ cursor: OptionalNullable[str] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowRegistrationListResponse:
+ r"""Get Workflow Registrations
+
+ :param workflow_id: The workflow ID to filter by
+ :param task_queue: The task queue to filter by
+ :param active_only: Whether to only return active workflows versions
+ :param include_shared: Whether to include shared workflow versions
+ :param workflow_search: The workflow name to filter by
+ :param archived: Filter by archived state. False=exclude archived, True=only archived, None=include all
+ :param with_workflow: Whether to include the workflow definition
+ :param available_in_chat_assistant: Whether to only return workflows compatible with chat assistant
+ :param limit: The maximum number of workflows versions to return
+ :param cursor: The cursor for pagination
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetWorkflowRegistrationsV1WorkflowsRegistrationsGetRequest(
+ workflow_id=workflow_id,
+ task_queue=task_queue,
+ active_only=active_only,
+ include_shared=include_shared,
+ workflow_search=workflow_search,
+ archived=archived,
+ with_workflow=with_workflow,
+ available_in_chat_assistant=available_in_chat_assistant,
+ limit=limit,
+ cursor=cursor,
+ )
+
+ req = self._build_request(
+ method="GET",
+ path="/v1/workflows/registrations",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_workflow_registrations_v1_workflows_registrations_get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(
+ models.WorkflowRegistrationListResponse, http_res
+ )
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def get_workflow_registrations_async(
+ self,
+ *,
+ workflow_id: OptionalNullable[str] = UNSET,
+ task_queue: OptionalNullable[str] = UNSET,
+ active_only: Optional[bool] = False,
+ include_shared: Optional[bool] = True,
+ workflow_search: OptionalNullable[str] = UNSET,
+ archived: OptionalNullable[bool] = UNSET,
+ with_workflow: Optional[bool] = False,
+ available_in_chat_assistant: OptionalNullable[bool] = UNSET,
+ limit: Optional[int] = 50,
+ cursor: OptionalNullable[str] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowRegistrationListResponse:
+ r"""Get Workflow Registrations
+
+ :param workflow_id: The workflow ID to filter by
+ :param task_queue: The task queue to filter by
+ :param active_only: Whether to only return active workflows versions
+ :param include_shared: Whether to include shared workflow versions
+ :param workflow_search: The workflow name to filter by
+ :param archived: Filter by archived state. False=exclude archived, True=only archived, None=include all
+ :param with_workflow: Whether to include the workflow definition
+ :param available_in_chat_assistant: Whether to only return workflows compatible with chat assistant
+ :param limit: The maximum number of workflows versions to return
+ :param cursor: The cursor for pagination
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetWorkflowRegistrationsV1WorkflowsRegistrationsGetRequest(
+ workflow_id=workflow_id,
+ task_queue=task_queue,
+ active_only=active_only,
+ include_shared=include_shared,
+ workflow_search=workflow_search,
+ archived=archived,
+ with_workflow=with_workflow,
+ available_in_chat_assistant=available_in_chat_assistant,
+ limit=limit,
+ cursor=cursor,
+ )
+
+ req = self._build_request_async(
+ method="GET",
+ path="/v1/workflows/registrations",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_workflow_registrations_v1_workflows_registrations_get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(
+ models.WorkflowRegistrationListResponse, http_res
+ )
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ def execute_workflow(
+ self,
+ *,
+ workflow_identifier: str,
+ execution_id: OptionalNullable[str] = UNSET,
+ input: OptionalNullable[Dict[str, Any]] = UNSET,
+ encoded_input: OptionalNullable[
+ Union[models.NetworkEncodedInput, models.NetworkEncodedInputTypedDict]
+ ] = UNSET,
+ wait_for_result: Optional[bool] = False,
+ timeout_seconds: OptionalNullable[float] = UNSET,
+ custom_tracing_attributes: OptionalNullable[Dict[str, str]] = UNSET,
+ task_queue: OptionalNullable[str] = UNSET,
+ deployment_name: OptionalNullable[str] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.ResponseExecuteWorkflowV1WorkflowsWorkflowIdentifierExecutePost:
+ r"""Execute Workflow
+
+ :param workflow_identifier:
+ :param execution_id: Allows you to specify a custom execution ID. If not provided, a random ID will be generated.
+ :param input: The input to the workflow. This should be a dictionary that matches the workflow's input schema.
+ :param encoded_input: Encoded input to the workflow, used when payload encoding is enabled.
+ :param wait_for_result: If true, wait for the workflow to complete and return the result directly.
+ :param timeout_seconds: Maximum time to wait for completion when wait_for_result is true.
+ :param custom_tracing_attributes:
+ :param task_queue: Deprecated. Use deployment_name instead.
+ :param deployment_name: Name of the deployment to route this execution to
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.ExecuteWorkflowV1WorkflowsWorkflowIdentifierExecutePostRequest(
+ workflow_identifier=workflow_identifier,
+ workflow_execution_request=models.WorkflowExecutionRequest(
+ execution_id=execution_id,
+ input=input,
+ encoded_input=utils.get_pydantic_model(
+ encoded_input, OptionalNullable[models.NetworkEncodedInput]
+ ),
+ wait_for_result=wait_for_result,
+ timeout_seconds=timeout_seconds,
+ custom_tracing_attributes=custom_tracing_attributes,
+ task_queue=task_queue,
+ deployment_name=deployment_name,
+ ),
+ )
+
+ req = self._build_request(
+ method="POST",
+ path="/v1/workflows/{workflow_identifier}/execute",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=True,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ get_serialized_body=lambda: utils.serialize_request_body(
+ request.workflow_execution_request,
+ False,
+ False,
+ "json",
+ models.WorkflowExecutionRequest,
+ ),
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="execute_workflow_v1_workflows__workflow_identifier__execute_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(
+ models.ResponseExecuteWorkflowV1WorkflowsWorkflowIdentifierExecutePost,
+ http_res,
+ )
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def execute_workflow_async(
+ self,
+ *,
+ workflow_identifier: str,
+ execution_id: OptionalNullable[str] = UNSET,
+ input: OptionalNullable[Dict[str, Any]] = UNSET,
+ encoded_input: OptionalNullable[
+ Union[models.NetworkEncodedInput, models.NetworkEncodedInputTypedDict]
+ ] = UNSET,
+ wait_for_result: Optional[bool] = False,
+ timeout_seconds: OptionalNullable[float] = UNSET,
+ custom_tracing_attributes: OptionalNullable[Dict[str, str]] = UNSET,
+ task_queue: OptionalNullable[str] = UNSET,
+ deployment_name: OptionalNullable[str] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.ResponseExecuteWorkflowV1WorkflowsWorkflowIdentifierExecutePost:
+ r"""Execute Workflow
+
+ :param workflow_identifier:
+ :param execution_id: Allows you to specify a custom execution ID. If not provided, a random ID will be generated.
+ :param input: The input to the workflow. This should be a dictionary that matches the workflow's input schema.
+ :param encoded_input: Encoded input to the workflow, used when payload encoding is enabled.
+ :param wait_for_result: If true, wait for the workflow to complete and return the result directly.
+ :param timeout_seconds: Maximum time to wait for completion when wait_for_result is true.
+ :param custom_tracing_attributes:
+ :param task_queue: Deprecated. Use deployment_name instead.
+ :param deployment_name: Name of the deployment to route this execution to
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.ExecuteWorkflowV1WorkflowsWorkflowIdentifierExecutePostRequest(
+ workflow_identifier=workflow_identifier,
+ workflow_execution_request=models.WorkflowExecutionRequest(
+ execution_id=execution_id,
+ input=input,
+ encoded_input=utils.get_pydantic_model(
+ encoded_input, OptionalNullable[models.NetworkEncodedInput]
+ ),
+ wait_for_result=wait_for_result,
+ timeout_seconds=timeout_seconds,
+ custom_tracing_attributes=custom_tracing_attributes,
+ task_queue=task_queue,
+ deployment_name=deployment_name,
+ ),
+ )
+
+ req = self._build_request_async(
+ method="POST",
+ path="/v1/workflows/{workflow_identifier}/execute",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=True,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ get_serialized_body=lambda: utils.serialize_request_body(
+ request.workflow_execution_request,
+ False,
+ False,
+ "json",
+ models.WorkflowExecutionRequest,
+ ),
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="execute_workflow_v1_workflows__workflow_identifier__execute_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(
+ models.ResponseExecuteWorkflowV1WorkflowsWorkflowIdentifierExecutePost,
+ http_res,
+ )
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ @deprecated(
+ "warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
+ )
+ def execute_workflow_registration(
+ self,
+ *,
+ workflow_registration_id: str,
+ execution_id: OptionalNullable[str] = UNSET,
+ input: OptionalNullable[Dict[str, Any]] = UNSET,
+ encoded_input: OptionalNullable[
+ Union[models.NetworkEncodedInput, models.NetworkEncodedInputTypedDict]
+ ] = UNSET,
+ wait_for_result: Optional[bool] = False,
+ timeout_seconds: OptionalNullable[float] = UNSET,
+ custom_tracing_attributes: OptionalNullable[Dict[str, str]] = UNSET,
+ task_queue: OptionalNullable[str] = UNSET,
+ deployment_name: OptionalNullable[str] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.ResponseExecuteWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDExecutePost:
+ r"""Execute Workflow Registration
+
+ :param workflow_registration_id:
+ :param execution_id: Allows you to specify a custom execution ID. If not provided, a random ID will be generated.
+ :param input: The input to the workflow. This should be a dictionary that matches the workflow's input schema.
+ :param encoded_input: Encoded input to the workflow, used when payload encoding is enabled.
+ :param wait_for_result: If true, wait for the workflow to complete and return the result directly.
+ :param timeout_seconds: Maximum time to wait for completion when wait_for_result is true.
+ :param custom_tracing_attributes:
+ :param task_queue: Deprecated. Use deployment_name instead.
+ :param deployment_name: Name of the deployment to route this execution to
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.ExecuteWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDExecutePostRequest(
+ workflow_registration_id=workflow_registration_id,
+ workflow_execution_request=models.WorkflowExecutionRequest(
+ execution_id=execution_id,
+ input=input,
+ encoded_input=utils.get_pydantic_model(
+ encoded_input, OptionalNullable[models.NetworkEncodedInput]
+ ),
+ wait_for_result=wait_for_result,
+ timeout_seconds=timeout_seconds,
+ custom_tracing_attributes=custom_tracing_attributes,
+ task_queue=task_queue,
+ deployment_name=deployment_name,
+ ),
+ )
+
+ req = self._build_request(
+ method="POST",
+ path="/v1/workflows/registrations/{workflow_registration_id}/execute",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=True,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ get_serialized_body=lambda: utils.serialize_request_body(
+ request.workflow_execution_request,
+ False,
+ False,
+ "json",
+ models.WorkflowExecutionRequest,
+ ),
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="execute_workflow_registration_v1_workflows_registrations__workflow_registration_id__execute_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(
+ models.ResponseExecuteWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDExecutePost,
+ http_res,
+ )
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ @deprecated(
+ "warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
+ )
+ async def execute_workflow_registration_async(
+ self,
+ *,
+ workflow_registration_id: str,
+ execution_id: OptionalNullable[str] = UNSET,
+ input: OptionalNullable[Dict[str, Any]] = UNSET,
+ encoded_input: OptionalNullable[
+ Union[models.NetworkEncodedInput, models.NetworkEncodedInputTypedDict]
+ ] = UNSET,
+ wait_for_result: Optional[bool] = False,
+ timeout_seconds: OptionalNullable[float] = UNSET,
+ custom_tracing_attributes: OptionalNullable[Dict[str, str]] = UNSET,
+ task_queue: OptionalNullable[str] = UNSET,
+ deployment_name: OptionalNullable[str] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.ResponseExecuteWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDExecutePost:
+ r"""Execute Workflow Registration
+
+ :param workflow_registration_id:
+ :param execution_id: Allows you to specify a custom execution ID. If not provided, a random ID will be generated.
+ :param input: The input to the workflow. This should be a dictionary that matches the workflow's input schema.
+ :param encoded_input: Encoded input to the workflow, used when payload encoding is enabled.
+ :param wait_for_result: If true, wait for the workflow to complete and return the result directly.
+ :param timeout_seconds: Maximum time to wait for completion when wait_for_result is true.
+ :param custom_tracing_attributes:
+ :param task_queue: Deprecated. Use deployment_name instead.
+ :param deployment_name: Name of the deployment to route this execution to
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.ExecuteWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDExecutePostRequest(
+ workflow_registration_id=workflow_registration_id,
+ workflow_execution_request=models.WorkflowExecutionRequest(
+ execution_id=execution_id,
+ input=input,
+ encoded_input=utils.get_pydantic_model(
+ encoded_input, OptionalNullable[models.NetworkEncodedInput]
+ ),
+ wait_for_result=wait_for_result,
+ timeout_seconds=timeout_seconds,
+ custom_tracing_attributes=custom_tracing_attributes,
+ task_queue=task_queue,
+ deployment_name=deployment_name,
+ ),
+ )
+
+ req = self._build_request_async(
+ method="POST",
+ path="/v1/workflows/registrations/{workflow_registration_id}/execute",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=True,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ get_serialized_body=lambda: utils.serialize_request_body(
+ request.workflow_execution_request,
+ False,
+ False,
+ "json",
+ models.WorkflowExecutionRequest,
+ ),
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="execute_workflow_registration_v1_workflows_registrations__workflow_registration_id__execute_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(
+ models.ResponseExecuteWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDExecutePost,
+ http_res,
+ )
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ def get_workflow(
+ self,
+ *,
+ workflow_identifier: str,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowGetResponse:
+ r"""Get Workflow
+
+ :param workflow_identifier:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetWorkflowV1WorkflowsWorkflowIdentifierGetRequest(
+ workflow_identifier=workflow_identifier,
+ )
+
+ req = self._build_request(
+ method="GET",
+ path="/v1/workflows/{workflow_identifier}",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_workflow_v1_workflows__workflow_identifier__get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.WorkflowGetResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def get_workflow_async(
+ self,
+ *,
+ workflow_identifier: str,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowGetResponse:
+ r"""Get Workflow
+
+ :param workflow_identifier:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetWorkflowV1WorkflowsWorkflowIdentifierGetRequest(
+ workflow_identifier=workflow_identifier,
+ )
+
+ req = self._build_request_async(
+ method="GET",
+ path="/v1/workflows/{workflow_identifier}",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_workflow_v1_workflows__workflow_identifier__get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.WorkflowGetResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ def update_workflow(
+ self,
+ *,
+ workflow_identifier: str,
+ display_name: OptionalNullable[str] = UNSET,
+ description: OptionalNullable[str] = UNSET,
+ available_in_chat_assistant: OptionalNullable[bool] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowUpdateResponse:
+ r"""Update Workflow
+
+ :param workflow_identifier:
+ :param display_name: New display name value
+ :param description: New description value
+ :param available_in_chat_assistant: Whether to make the workflow available in the chat assistant
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.UpdateWorkflowV1WorkflowsWorkflowIdentifierPutRequest(
+ workflow_identifier=workflow_identifier,
+ workflow_update_request=models.WorkflowUpdateRequest(
+ display_name=display_name,
+ description=description,
+ available_in_chat_assistant=available_in_chat_assistant,
+ ),
+ )
+
+ req = self._build_request(
+ method="PUT",
+ path="/v1/workflows/{workflow_identifier}",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=True,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ get_serialized_body=lambda: utils.serialize_request_body(
+ request.workflow_update_request,
+ False,
+ False,
+ "json",
+ models.WorkflowUpdateRequest,
+ ),
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="update_workflow_v1_workflows__workflow_identifier__put",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.WorkflowUpdateResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def update_workflow_async(
+ self,
+ *,
+ workflow_identifier: str,
+ display_name: OptionalNullable[str] = UNSET,
+ description: OptionalNullable[str] = UNSET,
+ available_in_chat_assistant: OptionalNullable[bool] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowUpdateResponse:
+ r"""Update Workflow
+
+ :param workflow_identifier:
+ :param display_name: New display name value
+ :param description: New description value
+ :param available_in_chat_assistant: Whether to make the workflow available in the chat assistant
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.UpdateWorkflowV1WorkflowsWorkflowIdentifierPutRequest(
+ workflow_identifier=workflow_identifier,
+ workflow_update_request=models.WorkflowUpdateRequest(
+ display_name=display_name,
+ description=description,
+ available_in_chat_assistant=available_in_chat_assistant,
+ ),
+ )
+
+ req = self._build_request_async(
+ method="PUT",
+ path="/v1/workflows/{workflow_identifier}",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=True,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ get_serialized_body=lambda: utils.serialize_request_body(
+ request.workflow_update_request,
+ False,
+ False,
+ "json",
+ models.WorkflowUpdateRequest,
+ ),
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="update_workflow_v1_workflows__workflow_identifier__put",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.WorkflowUpdateResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ def get_workflow_registration(
+ self,
+ *,
+ workflow_registration_id: str,
+ with_workflow: Optional[bool] = False,
+ include_shared: Optional[bool] = True,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowRegistrationGetResponse:
+ r"""Get Workflow Registration
+
+ :param workflow_registration_id:
+ :param with_workflow: Whether to include the workflow definition
+ :param include_shared: Whether to include shared workflow versions
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDGetRequest(
+ workflow_registration_id=workflow_registration_id,
+ with_workflow=with_workflow,
+ include_shared=include_shared,
+ )
+
+ req = self._build_request(
+ method="GET",
+ path="/v1/workflows/registrations/{workflow_registration_id}",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_workflow_registration_v1_workflows_registrations__workflow_registration_id__get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(
+ models.WorkflowRegistrationGetResponse, http_res
+ )
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def get_workflow_registration_async(
+ self,
+ *,
+ workflow_registration_id: str,
+ with_workflow: Optional[bool] = False,
+ include_shared: Optional[bool] = True,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowRegistrationGetResponse:
+ r"""Get Workflow Registration
+
+ :param workflow_registration_id:
+ :param with_workflow: Whether to include the workflow definition
+ :param include_shared: Whether to include shared workflow versions
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetWorkflowRegistrationV1WorkflowsRegistrationsWorkflowRegistrationIDGetRequest(
+ workflow_registration_id=workflow_registration_id,
+ with_workflow=with_workflow,
+ include_shared=include_shared,
+ )
+
+ req = self._build_request_async(
+ method="GET",
+ path="/v1/workflows/registrations/{workflow_registration_id}",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_workflow_registration_v1_workflows_registrations__workflow_registration_id__get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(
+ models.WorkflowRegistrationGetResponse, http_res
+ )
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ def archive_workflow(
+ self,
+ *,
+ workflow_identifier: str,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowArchiveResponse:
+ r"""Archive Workflow
+
+ :param workflow_identifier:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.ArchiveWorkflowV1WorkflowsWorkflowIdentifierArchivePutRequest(
+ workflow_identifier=workflow_identifier,
+ )
+
+ req = self._build_request(
+ method="PUT",
+ path="/v1/workflows/{workflow_identifier}/archive",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="archive_workflow_v1_workflows__workflow_identifier__archive_put",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.WorkflowArchiveResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def archive_workflow_async(
+ self,
+ *,
+ workflow_identifier: str,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowArchiveResponse:
+ r"""Archive Workflow
+
+ :param workflow_identifier:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.ArchiveWorkflowV1WorkflowsWorkflowIdentifierArchivePutRequest(
+ workflow_identifier=workflow_identifier,
+ )
+
+ req = self._build_request_async(
+ method="PUT",
+ path="/v1/workflows/{workflow_identifier}/archive",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="archive_workflow_v1_workflows__workflow_identifier__archive_put",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.WorkflowArchiveResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ def unarchive_workflow(
+ self,
+ *,
+ workflow_identifier: str,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowUnarchiveResponse:
+ r"""Unarchive Workflow
+
+ :param workflow_identifier:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = (
+ models.UnarchiveWorkflowV1WorkflowsWorkflowIdentifierUnarchivePutRequest(
+ workflow_identifier=workflow_identifier,
+ )
+ )
+
+ req = self._build_request(
+ method="PUT",
+ path="/v1/workflows/{workflow_identifier}/unarchive",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="unarchive_workflow_v1_workflows__workflow_identifier__unarchive_put",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.WorkflowUnarchiveResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def unarchive_workflow_async(
+ self,
+ *,
+ workflow_identifier: str,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowUnarchiveResponse:
+ r"""Unarchive Workflow
+
+ :param workflow_identifier:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = (
+ models.UnarchiveWorkflowV1WorkflowsWorkflowIdentifierUnarchivePutRequest(
+ workflow_identifier=workflow_identifier,
+ )
+ )
+
+ req = self._build_request_async(
+ method="PUT",
+ path="/v1/workflows/{workflow_identifier}/unarchive",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=True,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="unarchive_workflow_v1_workflows__workflow_identifier__unarchive_put",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.WorkflowUnarchiveResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
diff --git a/src/mistralai/client/workflows_events.py b/src/mistralai/client/workflows_events.py
new file mode 100644
index 00000000..03df3f78
--- /dev/null
+++ b/src/mistralai/client/workflows_events.py
@@ -0,0 +1,886 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+# @generated-id: 6d4f674ce8ef
+
+from .basesdk import BaseSDK
+from mistralai.client import errors, models, utils
+from mistralai.client._hooks import HookContext
+from mistralai.client.types import OptionalNullable, UNSET
+from mistralai.client.utils import eventstreaming, get_security_from_env
+from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response
+from typing import Any, Dict, List, Mapping, Optional, Union
+
+
+class WorkflowsEvents(BaseSDK):
+ def receive_workflow_event(
+ self,
+ *,
+ event: Union[
+ models.WorkflowEventRequestEvent, models.WorkflowEventRequestEventTypedDict
+ ],
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowEventResponse:
+ r"""Receive Workflow Event
+
+ Receive workflow events from workers.
+
+ Events are published to NATS for real-time streaming and persisted in the database.
+
+ For shared workers, the actual execution owner is resolved from the execution record,
+ ensuring events are streamed to the correct user's namespace.
+
+ :param event: The workflow event payload.
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.WorkflowEventRequest(
+ event=utils.get_pydantic_model(event, models.WorkflowEventRequestEvent),
+ )
+
+ req = self._build_request(
+ method="POST",
+ path="/v1/workflows/events",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=True,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ get_serialized_body=lambda: utils.serialize_request_body(
+ request, False, False, "json", models.WorkflowEventRequest
+ ),
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="receive_workflow_event_v1_workflows_events_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.WorkflowEventResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def receive_workflow_event_async(
+ self,
+ *,
+ event: Union[
+ models.WorkflowEventRequestEvent, models.WorkflowEventRequestEventTypedDict
+ ],
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowEventResponse:
+ r"""Receive Workflow Event
+
+ Receive workflow events from workers.
+
+ Events are published to NATS for real-time streaming and persisted in the database.
+
+ For shared workers, the actual execution owner is resolved from the execution record,
+ ensuring events are streamed to the correct user's namespace.
+
+ :param event: The workflow event payload.
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.WorkflowEventRequest(
+ event=utils.get_pydantic_model(event, models.WorkflowEventRequestEvent),
+ )
+
+ req = self._build_request_async(
+ method="POST",
+ path="/v1/workflows/events",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=True,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ get_serialized_body=lambda: utils.serialize_request_body(
+ request, False, False, "json", models.WorkflowEventRequest
+ ),
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="receive_workflow_event_v1_workflows_events_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.WorkflowEventResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ def receive_workflow_events_batch(
+ self,
+ *,
+ events: Union[
+ List[models.WorkflowEventBatchRequestEvent],
+ List[models.WorkflowEventBatchRequestEventTypedDict],
+ ],
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowEventBatchResponse:
+ r"""Receive Workflow Events Batch
+
+ Receive multiple workflow events from workers in a single batch.
+
+ Events are published to NATS for real-time streaming and persisted in the database.
+ This endpoint processes events sequentially to maintain ordering guarantees.
+
+ For shared workers, the actual execution owner is resolved from the execution record,
+ ensuring events are streamed to the correct user's namespace.
+
+ :param events: List of workflow events to send.
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.WorkflowEventBatchRequest(
+ events=utils.get_pydantic_model(
+ events, List[models.WorkflowEventBatchRequestEvent]
+ ),
+ )
+
+ req = self._build_request(
+ method="POST",
+ path="/v1/workflows/events/batch",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=True,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ get_serialized_body=lambda: utils.serialize_request_body(
+ request, False, False, "json", models.WorkflowEventBatchRequest
+ ),
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="receive_workflow_events_batch_v1_workflows_events_batch_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.WorkflowEventBatchResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def receive_workflow_events_batch_async(
+ self,
+ *,
+ events: Union[
+ List[models.WorkflowEventBatchRequestEvent],
+ List[models.WorkflowEventBatchRequestEventTypedDict],
+ ],
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.WorkflowEventBatchResponse:
+ r"""Receive Workflow Events Batch
+
+ Receive multiple workflow events from workers in a single batch.
+
+ Events are published to NATS for real-time streaming and persisted in the database.
+ This endpoint processes events sequentially to maintain ordering guarantees.
+
+ For shared workers, the actual execution owner is resolved from the execution record,
+ ensuring events are streamed to the correct user's namespace.
+
+ :param events: List of workflow events to send.
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.WorkflowEventBatchRequest(
+ events=utils.get_pydantic_model(
+ events, List[models.WorkflowEventBatchRequestEvent]
+ ),
+ )
+
+ req = self._build_request_async(
+ method="POST",
+ path="/v1/workflows/events/batch",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=True,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ get_serialized_body=lambda: utils.serialize_request_body(
+ request, False, False, "json", models.WorkflowEventBatchRequest
+ ),
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="receive_workflow_events_batch_v1_workflows_events_batch_post",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.WorkflowEventBatchResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ def get_stream_events(
+ self,
+ *,
+ scope: Optional[models.Scope] = "*",
+ activity_name: Optional[str] = "*",
+ activity_id: Optional[str] = "*",
+ workflow_name: Optional[str] = "*",
+ workflow_exec_id: Optional[str] = "*",
+ root_workflow_exec_id: Optional[str] = "*",
+ parent_workflow_exec_id: Optional[str] = "*",
+ stream: Optional[str] = "*",
+ start_seq: Optional[int] = 0,
+ metadata_filters: OptionalNullable[Dict[str, Any]] = UNSET,
+ workflow_event_types: OptionalNullable[List[models.WorkflowEventType]] = UNSET,
+ last_event_id: OptionalNullable[str] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> eventstreaming.EventStream[
+ models.GetStreamEventsV1WorkflowsEventsStreamGetResponseBody
+ ]:
+ r"""Get Stream Events
+
+ :param scope:
+ :param activity_name:
+ :param activity_id:
+ :param workflow_name:
+ :param workflow_exec_id:
+ :param root_workflow_exec_id:
+ :param parent_workflow_exec_id:
+ :param stream:
+ :param start_seq:
+ :param metadata_filters:
+ :param workflow_event_types:
+ :param last_event_id:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetStreamEventsV1WorkflowsEventsStreamGetRequest(
+ scope=scope,
+ activity_name=activity_name,
+ activity_id=activity_id,
+ workflow_name=workflow_name,
+ workflow_exec_id=workflow_exec_id,
+ root_workflow_exec_id=root_workflow_exec_id,
+ parent_workflow_exec_id=parent_workflow_exec_id,
+ stream=stream,
+ start_seq=start_seq,
+ metadata_filters=metadata_filters,
+ workflow_event_types=workflow_event_types,
+ last_event_id=last_event_id,
+ )
+
+ req = self._build_request(
+ method="GET",
+ path="/v1/workflows/events/stream",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="text/event-stream",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_stream_events_v1_workflows_events_stream_get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ stream=True,
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "text/event-stream"):
+ return eventstreaming.EventStream(
+ http_res,
+ lambda raw: utils.unmarshal_json(
+ raw, models.GetStreamEventsV1WorkflowsEventsStreamGetResponseBody
+ ),
+ client_ref=self,
+ data_required=False,
+ )
+ if utils.match_response(http_res, "422", "application/json"):
+ http_res_text = utils.stream_to_text(http_res)
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res, http_res_text
+ )
+ raise errors.HTTPValidationError(response_data, http_res, http_res_text)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("Unexpected response received", http_res, http_res_text)
+
+ async def get_stream_events_async(
+ self,
+ *,
+ scope: Optional[models.Scope] = "*",
+ activity_name: Optional[str] = "*",
+ activity_id: Optional[str] = "*",
+ workflow_name: Optional[str] = "*",
+ workflow_exec_id: Optional[str] = "*",
+ root_workflow_exec_id: Optional[str] = "*",
+ parent_workflow_exec_id: Optional[str] = "*",
+ stream: Optional[str] = "*",
+ start_seq: Optional[int] = 0,
+ metadata_filters: OptionalNullable[Dict[str, Any]] = UNSET,
+ workflow_event_types: OptionalNullable[List[models.WorkflowEventType]] = UNSET,
+ last_event_id: OptionalNullable[str] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> eventstreaming.EventStreamAsync[
+ models.GetStreamEventsV1WorkflowsEventsStreamGetResponseBody
+ ]:
+ r"""Get Stream Events
+
+ :param scope:
+ :param activity_name:
+ :param activity_id:
+ :param workflow_name:
+ :param workflow_exec_id:
+ :param root_workflow_exec_id:
+ :param parent_workflow_exec_id:
+ :param stream:
+ :param start_seq:
+ :param metadata_filters:
+ :param workflow_event_types:
+ :param last_event_id:
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetStreamEventsV1WorkflowsEventsStreamGetRequest(
+ scope=scope,
+ activity_name=activity_name,
+ activity_id=activity_id,
+ workflow_name=workflow_name,
+ workflow_exec_id=workflow_exec_id,
+ root_workflow_exec_id=root_workflow_exec_id,
+ parent_workflow_exec_id=parent_workflow_exec_id,
+ stream=stream,
+ start_seq=start_seq,
+ metadata_filters=metadata_filters,
+ workflow_event_types=workflow_event_types,
+ last_event_id=last_event_id,
+ )
+
+ req = self._build_request_async(
+ method="GET",
+ path="/v1/workflows/events/stream",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="text/event-stream",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_stream_events_v1_workflows_events_stream_get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ stream=True,
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "text/event-stream"):
+ return eventstreaming.EventStreamAsync(
+ http_res,
+ lambda raw: utils.unmarshal_json(
+ raw, models.GetStreamEventsV1WorkflowsEventsStreamGetResponseBody
+ ),
+ client_ref=self,
+ data_required=False,
+ )
+ if utils.match_response(http_res, "422", "application/json"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res, http_res_text
+ )
+ raise errors.HTTPValidationError(response_data, http_res, http_res_text)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("Unexpected response received", http_res, http_res_text)
+
+ def get_workflow_events(
+ self,
+ *,
+ root_workflow_exec_id: OptionalNullable[str] = UNSET,
+ workflow_exec_id: OptionalNullable[str] = UNSET,
+ workflow_run_id: OptionalNullable[str] = UNSET,
+ limit: Optional[int] = 100,
+ cursor: OptionalNullable[str] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.ListWorkflowEventResponse:
+ r"""Get Workflow Events
+
+ :param root_workflow_exec_id: Execution ID of the root workflow that initiated this execution chain.
+ :param workflow_exec_id: Execution ID of the workflow that emitted this event.
+ :param workflow_run_id: Run ID of the workflow that emitted this event.
+ :param limit: Maximum number of events to return.
+ :param cursor: Cursor for pagination.
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetWorkflowEventsV1WorkflowsEventsListGetRequest(
+ root_workflow_exec_id=root_workflow_exec_id,
+ workflow_exec_id=workflow_exec_id,
+ workflow_run_id=workflow_run_id,
+ limit=limit,
+ cursor=cursor,
+ )
+
+ req = self._build_request(
+ method="GET",
+ path="/v1/workflows/events/list",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = self.do_request(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_workflow_events_v1_workflows_events_list_get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.ListWorkflowEventResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = utils.stream_to_text(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
+
+ async def get_workflow_events_async(
+ self,
+ *,
+ root_workflow_exec_id: OptionalNullable[str] = UNSET,
+ workflow_exec_id: OptionalNullable[str] = UNSET,
+ workflow_run_id: OptionalNullable[str] = UNSET,
+ limit: Optional[int] = 100,
+ cursor: OptionalNullable[str] = UNSET,
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
+ server_url: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ http_headers: Optional[Mapping[str, str]] = None,
+ ) -> models.ListWorkflowEventResponse:
+ r"""Get Workflow Events
+
+ :param root_workflow_exec_id: Execution ID of the root workflow that initiated this execution chain.
+ :param workflow_exec_id: Execution ID of the workflow that emitted this event.
+ :param workflow_run_id: Run ID of the workflow that emitted this event.
+ :param limit: Maximum number of events to return.
+ :param cursor: Cursor for pagination.
+ :param retries: Override the default retry configuration for this method
+ :param server_url: Override the default server URL for this method
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
+ :param http_headers: Additional headers to set or replace on requests.
+ """
+ base_url = None
+ url_variables = None
+ if timeout_ms is None:
+ timeout_ms = self.sdk_configuration.timeout_ms
+
+ if server_url is not None:
+ base_url = server_url
+ else:
+ base_url = self._get_url(base_url, url_variables)
+
+ request = models.GetWorkflowEventsV1WorkflowsEventsListGetRequest(
+ root_workflow_exec_id=root_workflow_exec_id,
+ workflow_exec_id=workflow_exec_id,
+ workflow_run_id=workflow_run_id,
+ limit=limit,
+ cursor=cursor,
+ )
+
+ req = self._build_request_async(
+ method="GET",
+ path="/v1/workflows/events/list",
+ base_url=base_url,
+ url_variables=url_variables,
+ request=request,
+ request_body_required=False,
+ request_has_path_params=False,
+ request_has_query_params=True,
+ user_agent_header="user-agent",
+ accept_header_value="application/json",
+ http_headers=http_headers,
+ security=self.sdk_configuration.security,
+ allow_empty_value=None,
+ timeout_ms=timeout_ms,
+ )
+
+ if retries == UNSET:
+ if self.sdk_configuration.retry_config is not UNSET:
+ retries = self.sdk_configuration.retry_config
+
+ retry_config = None
+ if isinstance(retries, utils.RetryConfig):
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
+
+ http_res = await self.do_request_async(
+ hook_ctx=HookContext(
+ config=self.sdk_configuration,
+ base_url=base_url or "",
+ operation_id="get_workflow_events_v1_workflows_events_list_get",
+ oauth2_scopes=None,
+ security_source=get_security_from_env(
+ self.sdk_configuration.security, models.Security
+ ),
+ ),
+ request=req,
+ error_status_codes=["422", "4XX", "5XX"],
+ retry_config=retry_config,
+ )
+
+ response_data: Any = None
+ if utils.match_response(http_res, "200", "application/json"):
+ return unmarshal_json_response(models.ListWorkflowEventResponse, http_res)
+ if utils.match_response(http_res, "422", "application/json"):
+ response_data = unmarshal_json_response(
+ errors.HTTPValidationErrorData, http_res
+ )
+ raise errors.HTTPValidationError(response_data, http_res)
+ if utils.match_response(http_res, "4XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+ if utils.match_response(http_res, "5XX", "*"):
+ http_res_text = await utils.stream_to_text_async(http_res)
+ raise errors.SDKError("API error occurred", http_res, http_res_text)
+
+ raise errors.SDKError("Unexpected response received", http_res)
diff --git a/uv.lock b/uv.lock
index 3769cd61..9064878e 100644
--- a/uv.lock
+++ b/uv.lock
@@ -560,7 +560,7 @@ wheels = [
[[package]]
name = "mistralai"
-version = "2.1.3"
+version = "2.2.0rc1"
source = { editable = "." }
dependencies = [
{ name = "eval-type-backport" },