From 1107759137d1b9be0a8a828fe7022af0a6d1efdf Mon Sep 17 00:00:00 2001 From: Matt Date: Tue, 17 Mar 2026 14:41:20 +0100 Subject: [PATCH 01/18] feat(workflow-executor): scaffold @forestadmin/workflow-executor package --- .github/workflows/build.yml | 1 + packages/workflow-executor/CHANGELOG.md | 1 + packages/workflow-executor/CLAUDE.md | 34 + packages/workflow-executor/LICENSE | 674 ++++++++++++++++++ packages/workflow-executor/jest.config.ts | 8 + packages/workflow-executor/package.json | 30 + packages/workflow-executor/src/index.ts | 1 + packages/workflow-executor/test/.gitkeep | 0 .../workflow-executor/tsconfig.eslint.json | 3 + packages/workflow-executor/tsconfig.json | 7 + 10 files changed, 759 insertions(+) create mode 100644 packages/workflow-executor/CHANGELOG.md create mode 100644 packages/workflow-executor/CLAUDE.md create mode 100644 packages/workflow-executor/LICENSE create mode 100644 packages/workflow-executor/jest.config.ts create mode 100644 packages/workflow-executor/package.json create mode 100644 packages/workflow-executor/src/index.ts create mode 100644 packages/workflow-executor/test/.gitkeep create mode 100644 packages/workflow-executor/tsconfig.eslint.json create mode 100644 packages/workflow-executor/tsconfig.json diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index d161736ea6..12c03a54e6 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -80,6 +80,7 @@ jobs: - plugin-aws-s3 - plugin-export-advanced - plugin-flattener + - workflow-executor steps: - uses: actions/checkout@v4 - uses: actions/setup-node@v4 diff --git a/packages/workflow-executor/CHANGELOG.md b/packages/workflow-executor/CHANGELOG.md new file mode 100644 index 0000000000..5d9c4c1989 --- /dev/null +++ b/packages/workflow-executor/CHANGELOG.md @@ -0,0 +1 @@ +# @forestadmin/workflow-executor diff --git a/packages/workflow-executor/CLAUDE.md b/packages/workflow-executor/CLAUDE.md new file mode 100644 index 0000000000..65f388ffba --- /dev/null +++ b/packages/workflow-executor/CLAUDE.md @@ -0,0 +1,34 @@ +# @forestadmin/workflow-executor + +Bibliothèque TypeScript framework-agnostic qui exécute des steps de workflow côté client (infra du client, à côté de l'agent Forest Admin). + +## Architecture + +- **Pull-based** — L'executor poll `WorkflowPort`. `triggerPoll(runId)` pour accélérer un run spécifique +- **Atomic** — Chaque step exécutée en isolation. `RunStore` assure la continuité entre steps +- **Privacy** — Zéro donnée client dans l'orchestrateur. Données dans `RunStore` +- **Ports** — Toute IO passe par une interface injectée +- **AI intégré** — Utilise `@forestadmin/ai-proxy` (Router) pour créer les modèles et charger les remote tools + +## Commands + +```bash +yarn workspace @forestadmin/workflow-executor build # Build +yarn workspace @forestadmin/workflow-executor test # Run tests +yarn workspace @forestadmin/workflow-executor lint # Lint +``` + +## Testing + +- Prefer integration tests over unit tests +- Use AAA pattern (Arrange, Act, Assert) +- Test behavior, not implementation +- Strong assertions: verify exact arguments, not just that a function was called + +## Changelog + +> **IMPORTANT**: When a new feature, fix, or change is implemented, add an entry below summarizing what was done. + +| Date | Type | Summary | +|------------|---------|---------| +| 2026-03-16 | setup | Initial package scaffolding (package.json, tsconfig, jest, CI integration) | diff --git a/packages/workflow-executor/LICENSE b/packages/workflow-executor/LICENSE new file mode 100644 index 0000000000..e62ec04cde --- /dev/null +++ b/packages/workflow-executor/LICENSE @@ -0,0 +1,674 @@ +GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/packages/workflow-executor/jest.config.ts b/packages/workflow-executor/jest.config.ts new file mode 100644 index 0000000000..d622773e8a --- /dev/null +++ b/packages/workflow-executor/jest.config.ts @@ -0,0 +1,8 @@ +/* eslint-disable import/no-relative-packages */ +import jestConfig from '../../jest.config'; + +export default { + ...jestConfig, + collectCoverageFrom: ['/src/**/*.ts'], + testMatch: ['/test/**/*.test.ts'], +}; diff --git a/packages/workflow-executor/package.json b/packages/workflow-executor/package.json new file mode 100644 index 0000000000..9ae4eed2b9 --- /dev/null +++ b/packages/workflow-executor/package.json @@ -0,0 +1,30 @@ +{ + "name": "@forestadmin/workflow-executor", + "version": "1.0.0", + "main": "dist/index.js", + "license": "GPL-3.0", + "publishConfig": { + "access": "public" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/ForestAdmin/agent-nodejs.git", + "directory": "packages/workflow-executor" + }, + "dependencies": { + "@forestadmin/ai-proxy": "1.5.0", + "sequelize": "^6.37.5", + "zod": "^4.3.5" + }, + "files": [ + "dist/**/*.js", + "dist/**/*.d.ts" + ], + "scripts": { + "build": "tsc", + "build:watch": "tsc --watch", + "clean": "rm -rf coverage dist", + "lint": "eslint src", + "test": "jest" + } +} diff --git a/packages/workflow-executor/src/index.ts b/packages/workflow-executor/src/index.ts new file mode 100644 index 0000000000..03111ba05a --- /dev/null +++ b/packages/workflow-executor/src/index.ts @@ -0,0 +1 @@ +// @forestadmin/workflow-executor diff --git a/packages/workflow-executor/test/.gitkeep b/packages/workflow-executor/test/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packages/workflow-executor/tsconfig.eslint.json b/packages/workflow-executor/tsconfig.eslint.json new file mode 100644 index 0000000000..9bdc52705d --- /dev/null +++ b/packages/workflow-executor/tsconfig.eslint.json @@ -0,0 +1,3 @@ +{ + "extends": "../../tsconfig.eslint.json" +} diff --git a/packages/workflow-executor/tsconfig.json b/packages/workflow-executor/tsconfig.json new file mode 100644 index 0000000000..e0d66374ae --- /dev/null +++ b/packages/workflow-executor/tsconfig.json @@ -0,0 +1,7 @@ +{ + "extends": "../../tsconfig.json", + "compilerOptions": { + "outDir": "dist" + }, + "include": ["src/**/*"] +} From 4510b7bf1047c91303448b0c319d4eaad58167c5 Mon Sep 17 00:00:00 2001 From: alban bertolini Date: Tue, 17 Mar 2026 14:57:32 +0100 Subject: [PATCH 02/18] =?UTF-8?q?feat(workflow-executor):=20finalize=20sca?= =?UTF-8?q?ffold=20=E2=80=94=20clean=20CLAUDE.md,=20remove=20premature=20d?= =?UTF-8?q?eps,=20add=20smoke=20test?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Rewrite CLAUDE.md with project overview and architecture principles, remove changelog - Remove unused dependencies (ai-proxy, sequelize, zod) per YAGNI - Add smoke test so CI passes Co-Authored-By: Claude Opus 4.6 --- packages/workflow-executor/CLAUDE.md | 30 ++++++++----------- packages/workflow-executor/package.json | 5 ---- packages/workflow-executor/test/index.test.ts | 5 ++++ 3 files changed, 18 insertions(+), 22 deletions(-) create mode 100644 packages/workflow-executor/test/index.test.ts diff --git a/packages/workflow-executor/CLAUDE.md b/packages/workflow-executor/CLAUDE.md index 65f388ffba..26f8674a90 100644 --- a/packages/workflow-executor/CLAUDE.md +++ b/packages/workflow-executor/CLAUDE.md @@ -1,21 +1,25 @@ # @forestadmin/workflow-executor -Bibliothèque TypeScript framework-agnostic qui exécute des steps de workflow côté client (infra du client, à côté de l'agent Forest Admin). +> **Note to Claude**: Keep this file up to date. When adding a new feature, module, architectural pattern, or dependency, update the relevant section below. -## Architecture +## Overview -- **Pull-based** — L'executor poll `WorkflowPort`. `triggerPoll(runId)` pour accélérer un run spécifique -- **Atomic** — Chaque step exécutée en isolation. `RunStore` assure la continuité entre steps -- **Privacy** — Zéro donnée client dans l'orchestrateur. Données dans `RunStore` -- **Ports** — Toute IO passe par une interface injectée -- **AI intégré** — Utilise `@forestadmin/ai-proxy` (Router) pour créer les modèles et charger les remote tools +TypeScript library (framework-agnostic) that executes workflow steps on the client's infrastructure, alongside the Forest Admin agent. The orchestrator never sees client data — it only sends step definitions; this package fetches them and runs them locally. + +## Architecture Principles + +- **Pull-based** — The executor polls for pending steps via `WorkflowPort`. `triggerPoll(runId)` fast-tracks a specific run. +- **Atomic** — Each step is executed in isolation. `RunStore` maintains continuity between steps. +- **Privacy** — Zero client data leaves the client's infrastructure. All data lives in `RunStore`. +- **Ports (IO injection)** — Every external IO goes through an injected port interface, making the core pure and testable. +- **AI integration** — Uses `@forestadmin/ai-proxy` (Router) to create models and load remote tools. ## Commands ```bash yarn workspace @forestadmin/workflow-executor build # Build -yarn workspace @forestadmin/workflow-executor test # Run tests -yarn workspace @forestadmin/workflow-executor lint # Lint +yarn workspace @forestadmin/workflow-executor test # Run tests +yarn workspace @forestadmin/workflow-executor lint # Lint ``` ## Testing @@ -24,11 +28,3 @@ yarn workspace @forestadmin/workflow-executor lint # Lint - Use AAA pattern (Arrange, Act, Assert) - Test behavior, not implementation - Strong assertions: verify exact arguments, not just that a function was called - -## Changelog - -> **IMPORTANT**: When a new feature, fix, or change is implemented, add an entry below summarizing what was done. - -| Date | Type | Summary | -|------------|---------|---------| -| 2026-03-16 | setup | Initial package scaffolding (package.json, tsconfig, jest, CI integration) | diff --git a/packages/workflow-executor/package.json b/packages/workflow-executor/package.json index 9ae4eed2b9..6a5292f5e7 100644 --- a/packages/workflow-executor/package.json +++ b/packages/workflow-executor/package.json @@ -11,11 +11,6 @@ "url": "git+https://github.com/ForestAdmin/agent-nodejs.git", "directory": "packages/workflow-executor" }, - "dependencies": { - "@forestadmin/ai-proxy": "1.5.0", - "sequelize": "^6.37.5", - "zod": "^4.3.5" - }, "files": [ "dist/**/*.js", "dist/**/*.d.ts" diff --git a/packages/workflow-executor/test/index.test.ts b/packages/workflow-executor/test/index.test.ts new file mode 100644 index 0000000000..202001a921 --- /dev/null +++ b/packages/workflow-executor/test/index.test.ts @@ -0,0 +1,5 @@ +describe('workflow-executor', () => { + it('should be importable', () => { + expect(require('../src/index')).toBeDefined(); + }); +}); From 17f26ca43a113993782baaf218c2179dc4bd8c76 Mon Sep 17 00:00:00 2001 From: alban bertolini Date: Tue, 17 Mar 2026 15:08:29 +0100 Subject: [PATCH 03/18] =?UTF-8?q?fix(workflow-executor):=20address=20revie?= =?UTF-8?q?w=20=E2=80=94=20lint=20test=20dir,=20improve=20test,=20document?= =?UTF-8?q?=20system=20architecture?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Lint now covers src and test directories - Replace require() with import, use stronger assertion (toHaveLength) - Add System Architecture section describing Front/Orchestrator/Executor/Agent - Mark Architecture Principles as planned (not yet implemented) - Remove redundant test/.gitkeep - Make index.ts a valid module with export {} Co-Authored-By: Claude Opus 4.6 --- packages/workflow-executor/CLAUDE.md | 29 +++++++++++++++---- packages/workflow-executor/package.json | 2 +- packages/workflow-executor/src/index.ts | 2 +- packages/workflow-executor/test/.gitkeep | 0 packages/workflow-executor/test/index.test.ts | 6 ++-- 5 files changed, 29 insertions(+), 10 deletions(-) delete mode 100644 packages/workflow-executor/test/.gitkeep diff --git a/packages/workflow-executor/CLAUDE.md b/packages/workflow-executor/CLAUDE.md index 26f8674a90..19f7b448c3 100644 --- a/packages/workflow-executor/CLAUDE.md +++ b/packages/workflow-executor/CLAUDE.md @@ -6,13 +6,30 @@ TypeScript library (framework-agnostic) that executes workflow steps on the client's infrastructure, alongside the Forest Admin agent. The orchestrator never sees client data — it only sends step definitions; this package fetches them and runs them locally. -## Architecture Principles +## System Architecture -- **Pull-based** — The executor polls for pending steps via `WorkflowPort`. `triggerPoll(runId)` fast-tracks a specific run. -- **Atomic** — Each step is executed in isolation. `RunStore` maintains continuity between steps. -- **Privacy** — Zero client data leaves the client's infrastructure. All data lives in `RunStore`. -- **Ports (IO injection)** — Every external IO goes through an injected port interface, making the core pure and testable. -- **AI integration** — Uses `@forestadmin/ai-proxy` (Router) to create models and load remote tools. +The workflow system is split into 4 components: + +- **Front** — The Forest Admin UI. Users design workflows (sequence of steps) and trigger runs. Displays run progress and results in real time. +- **Orchestrator** — Forest Admin backend. Stores workflow definitions, manages run state machines, and dispatches steps. Never sees client data — only step metadata. +- **Executor** _(this package)_ — Runs on the client's infrastructure. Polls the orchestrator for pending steps, executes them locally (with access to client data), and reports results back. Privacy boundary lives here. +- **Agent** — The Forest Admin agent (`@forestadmin/agent`). Acts as a proxy for the executor — provides access to the datasource layer (collections, actions, fields) so the executor can read/write client data without direct database access. + +``` +Front ──▶ Orchestrator ◀──pull── Executor ──▶ Agent (datasources) + ▲ │ + └──────────── progress/results ────────┘ +``` + +## Architecture Principles (Planned) + +The following principles will guide implementation. None are implemented yet. + +- **Pull-based** — The executor will poll for pending steps via a port interface. A `triggerPoll(runId)` mechanism will fast-track a specific run. +- **Atomic** — Each step will execute in isolation. A run store will maintain continuity between steps. +- **Privacy** — Zero client data leaves the client's infrastructure. +- **Ports (IO injection)** — All external IO will go through injected port interfaces, keeping the core pure and testable. +- **AI integration** — Will use `@forestadmin/ai-proxy` (Router) to create models and load remote tools. ## Commands diff --git a/packages/workflow-executor/package.json b/packages/workflow-executor/package.json index 6a5292f5e7..a3fe055e30 100644 --- a/packages/workflow-executor/package.json +++ b/packages/workflow-executor/package.json @@ -19,7 +19,7 @@ "build": "tsc", "build:watch": "tsc --watch", "clean": "rm -rf coverage dist", - "lint": "eslint src", + "lint": "eslint src test", "test": "jest" } } diff --git a/packages/workflow-executor/src/index.ts b/packages/workflow-executor/src/index.ts index 03111ba05a..cb0ff5c3b5 100644 --- a/packages/workflow-executor/src/index.ts +++ b/packages/workflow-executor/src/index.ts @@ -1 +1 @@ -// @forestadmin/workflow-executor +export {}; diff --git a/packages/workflow-executor/test/.gitkeep b/packages/workflow-executor/test/.gitkeep deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/packages/workflow-executor/test/index.test.ts b/packages/workflow-executor/test/index.test.ts index 202001a921..d80b0c4e5b 100644 --- a/packages/workflow-executor/test/index.test.ts +++ b/packages/workflow-executor/test/index.test.ts @@ -1,5 +1,7 @@ +import * as mod from '../src/index'; + describe('workflow-executor', () => { - it('should be importable', () => { - expect(require('../src/index')).toBeDefined(); + it('should export an empty module', () => { + expect(Object.keys(mod)).toHaveLength(0); }); }); From 29f5646ea3a747ea944338af123f9217a3e59096 Mon Sep 17 00:00:00 2001 From: scra Date: Wed, 18 Mar 2026 09:48:22 +0100 Subject: [PATCH 04/18] feat(workflow-executor): define foundational types and port interfaces (#1494) --- packages/workflow-executor/CLAUDE.md | 17 ++++++++ packages/workflow-executor/src/index.ts | 34 ++++++++++++++- .../workflow-executor/src/ports/agent-port.ts | 19 ++++++++ .../workflow-executor/src/ports/run-store.ts | 13 ++++++ .../src/ports/workflow-port.ts | 15 +++++++ .../workflow-executor/src/types/execution.ts | 36 ++++++++++++++++ .../workflow-executor/src/types/record.ts | 20 +++++++++ .../src/types/step-definition.ts | 43 +++++++++++++++++++ .../src/types/step-execution-data.ts | 21 +++++++++ .../src/types/step-history.ts | 23 ++++++++++ packages/workflow-executor/test/index.test.ts | 19 ++++++-- 11 files changed, 255 insertions(+), 5 deletions(-) create mode 100644 packages/workflow-executor/src/ports/agent-port.ts create mode 100644 packages/workflow-executor/src/ports/run-store.ts create mode 100644 packages/workflow-executor/src/ports/workflow-port.ts create mode 100644 packages/workflow-executor/src/types/execution.ts create mode 100644 packages/workflow-executor/src/types/record.ts create mode 100644 packages/workflow-executor/src/types/step-definition.ts create mode 100644 packages/workflow-executor/src/types/step-execution-data.ts create mode 100644 packages/workflow-executor/src/types/step-history.ts diff --git a/packages/workflow-executor/CLAUDE.md b/packages/workflow-executor/CLAUDE.md index 19f7b448c3..085be18e95 100644 --- a/packages/workflow-executor/CLAUDE.md +++ b/packages/workflow-executor/CLAUDE.md @@ -21,6 +21,23 @@ Front ──▶ Orchestrator ◀──pull── Executor ──▶ Agent └──────────── progress/results ────────┘ ``` +## Package Structure + +``` +src/ +├── types/ # Core type definitions (@draft) +│ ├── step-definition.ts # StepType enum + step definition interfaces +│ ├── step-history.ts # Step outcome tracking types +│ ├── step-execution-data.ts # Runtime state for in-progress steps +│ ├── record.ts # Record references and data types +│ └── execution.ts # Top-level execution types (context, results) +├── ports/ # IO boundary interfaces (@draft) +│ ├── agent-port.ts # Interface to the Forest Admin agent (datasource) +│ ├── workflow-port.ts # Interface to the orchestrator +│ └── run-store.ts # Interface for persisting run state +└── index.ts # Barrel exports +``` + ## Architecture Principles (Planned) The following principles will guide implementation. None are implemented yet. diff --git a/packages/workflow-executor/src/index.ts b/packages/workflow-executor/src/index.ts index cb0ff5c3b5..1ecbf6fe1d 100644 --- a/packages/workflow-executor/src/index.ts +++ b/packages/workflow-executor/src/index.ts @@ -1 +1,33 @@ -export {}; +export { StepType } from './types/step-definition'; +export type { + StepCategory, + ConditionStepDefinition, + AiTaskStepDefinition, + StepDefinition, +} from './types/step-definition'; + +export type { + StepStatus, + ConditionStepHistory, + AiTaskStepHistory, + StepHistory, +} from './types/step-history'; + +export type { + ConditionStepExecutionData, + AiTaskStepExecutionData, + StepExecutionData, +} from './types/step-execution-data'; + +export type { RecordFieldRef, RecordRef, RecordData } from './types/record'; + +export type { + UserInput, + PendingStepExecution, + StepExecutionResult, + ExecutionContext, +} from './types/execution'; + +export type { AgentPort } from './ports/agent-port'; +export type { McpConfiguration, WorkflowPort } from './ports/workflow-port'; +export type { RunStore } from './ports/run-store'; diff --git a/packages/workflow-executor/src/ports/agent-port.ts b/packages/workflow-executor/src/ports/agent-port.ts new file mode 100644 index 0000000000..5d4f431c7e --- /dev/null +++ b/packages/workflow-executor/src/ports/agent-port.ts @@ -0,0 +1,19 @@ +/** @draft Types derived from the workflow-executor spec -- subject to change. */ + +import type { RecordData } from '../types/record'; + +export interface AgentPort { + getRecord(collectionName: string, recordId: string): Promise; + updateRecord( + collectionName: string, + recordId: string, + values: Record, + ): Promise; + getRelatedData( + collectionName: string, + recordId: string, + relationName: string, + ): Promise; + getActions(collectionName: string): Promise; + executeAction(collectionName: string, actionName: string, recordIds: string[]): Promise; +} diff --git a/packages/workflow-executor/src/ports/run-store.ts b/packages/workflow-executor/src/ports/run-store.ts new file mode 100644 index 0000000000..88063dc4a0 --- /dev/null +++ b/packages/workflow-executor/src/ports/run-store.ts @@ -0,0 +1,13 @@ +/** @draft Types derived from the workflow-executor spec -- subject to change. */ + +import type { RecordData } from '../types/record'; +import type { StepExecutionData } from '../types/step-execution-data'; + +export interface RunStore { + getRecords(runId: string): Promise; + getRecord(runId: string, collectionName: string, recordId: string): Promise; + saveRecord(runId: string, record: RecordData): Promise; + getStepExecutions(runId: string): Promise; + getStepExecution(runId: string, stepIndex: number): Promise; + saveStepExecution(runId: string, stepExecution: StepExecutionData): Promise; +} diff --git a/packages/workflow-executor/src/ports/workflow-port.ts b/packages/workflow-executor/src/ports/workflow-port.ts new file mode 100644 index 0000000000..806bb33980 --- /dev/null +++ b/packages/workflow-executor/src/ports/workflow-port.ts @@ -0,0 +1,15 @@ +/** @draft Types derived from the workflow-executor spec -- subject to change. */ + +import type { PendingStepExecution } from '../types/execution'; +import type { RecordRef } from '../types/record'; +import type { StepHistory } from '../types/step-history'; + +/** Placeholder -- will be typed as McpConfiguration from @forestadmin/ai-proxy/mcp-client once added as dependency. */ +export type McpConfiguration = unknown; + +export interface WorkflowPort { + getPendingStepExecutions(): Promise; + completeStepExecution(runId: string, stepHistory: StepHistory): Promise; + getCollectionRef(collectionName: string): Promise; + getMcpServerConfigs(): Promise; +} diff --git a/packages/workflow-executor/src/types/execution.ts b/packages/workflow-executor/src/types/execution.ts new file mode 100644 index 0000000000..953810f89e --- /dev/null +++ b/packages/workflow-executor/src/types/execution.ts @@ -0,0 +1,36 @@ +/** @draft Types derived from the workflow-executor spec -- subject to change. */ + +import type { RecordData } from './record'; +import type { StepDefinition } from './step-definition'; +import type { StepHistory } from './step-history'; +import type { AgentPort } from '../ports/agent-port'; +import type { RunStore } from '../ports/run-store'; +import type { WorkflowPort } from '../ports/workflow-port'; + +export type UserInput = { type: 'confirmation'; confirmed: boolean }; + +export interface PendingStepExecution { + runId: string; + step: StepDefinition; + stepHistory: StepHistory; + previousSteps: StepHistory[]; + availableRecords: RecordData[]; + userInput?: UserInput; +} + +export interface StepExecutionResult { + stepHistory: StepHistory; + newAvailableRecord?: RecordData; +} + +export interface ExecutionContext { + runId: string; + /** Placeholder -- will be typed as AiConfiguration from @forestadmin/ai-proxy once added as dependency. */ + model: unknown; + agentPort: AgentPort; + workflowPort: WorkflowPort; + runStore: RunStore; + history: StepHistory[]; + /** Placeholder -- will be typed as RemoteTool[] (to be re-exported from @forestadmin/ai-proxy). */ + remoteTools: unknown[]; +} diff --git a/packages/workflow-executor/src/types/record.ts b/packages/workflow-executor/src/types/record.ts new file mode 100644 index 0000000000..9610da056b --- /dev/null +++ b/packages/workflow-executor/src/types/record.ts @@ -0,0 +1,20 @@ +/** @draft Types derived from the workflow-executor spec -- subject to change. */ + +export interface RecordFieldRef { + fieldName: string; + displayName: string; + type: string; + isRelationship: boolean; + referencedCollectionName?: string; +} + +export interface RecordRef { + recordId: string; + collectionName: string; + collectionDisplayName: string; + fields: RecordFieldRef[]; +} + +export interface RecordData extends RecordRef { + values: Record; +} diff --git a/packages/workflow-executor/src/types/step-definition.ts b/packages/workflow-executor/src/types/step-definition.ts new file mode 100644 index 0000000000..4471887fb1 --- /dev/null +++ b/packages/workflow-executor/src/types/step-definition.ts @@ -0,0 +1,43 @@ +/** @draft Types derived from the workflow-executor spec -- subject to change. */ + +export enum StepType { + Condition = 'condition', + ReadRecord = 'read-record', + UpdateRecord = 'update-record', + TriggerAction = 'trigger-action', + LoadRelatedRecord = 'load-related-record', +} + +interface BaseStepDefinition { + id: string; + type: StepType; + aiConfigName?: string; +} + +export interface ConditionStepDefinition extends BaseStepDefinition { + type: StepType.Condition; + options: string[]; + prompt?: string; +} + +export interface AiTaskStepDefinition extends BaseStepDefinition { + type: + | StepType.ReadRecord + | StepType.UpdateRecord + | StepType.TriggerAction + | StepType.LoadRelatedRecord; + recordSourceStepId?: string; + prompt?: string; + automaticCompletion?: boolean; + allowedTools?: string[]; + remoteToolsSourceId?: string; +} + +export type StepDefinition = ConditionStepDefinition | AiTaskStepDefinition; + +/** + * Coarse categorization of steps. StepType has 5 fine-grained values; + * StepCategory collapses the 4 non-condition types into 'ai-task'. + * Used as discriminant in StepHistory and StepExecutionData. + */ +export type StepCategory = 'condition' | 'ai-task'; diff --git a/packages/workflow-executor/src/types/step-execution-data.ts b/packages/workflow-executor/src/types/step-execution-data.ts new file mode 100644 index 0000000000..bc95c5b28e --- /dev/null +++ b/packages/workflow-executor/src/types/step-execution-data.ts @@ -0,0 +1,21 @@ +/** @draft Types derived from the workflow-executor spec -- subject to change. */ + +import type { RecordRef } from './record'; + +interface BaseStepExecutionData { + stepIndex: number; + executionParams?: Record; + executionResult?: Record; +} + +export interface ConditionStepExecutionData extends BaseStepExecutionData { + type: 'condition'; +} + +export interface AiTaskStepExecutionData extends BaseStepExecutionData { + type: 'ai-task'; + toolConfirmationInterruption?: Record; + selectedRecordRef?: RecordRef; +} + +export type StepExecutionData = ConditionStepExecutionData | AiTaskStepExecutionData; diff --git a/packages/workflow-executor/src/types/step-history.ts b/packages/workflow-executor/src/types/step-history.ts new file mode 100644 index 0000000000..35dd1e7fb2 --- /dev/null +++ b/packages/workflow-executor/src/types/step-history.ts @@ -0,0 +1,23 @@ +/** @draft Types derived from the workflow-executor spec -- subject to change. */ + +export type StepStatus = 'success' | 'error' | 'awaiting-input'; + +interface BaseStepHistory { + stepId: string; + stepIndex: number; + status: StepStatus; + /** Present when status is 'error'. */ + error?: string; +} + +export interface ConditionStepHistory extends BaseStepHistory { + type: 'condition'; + /** Present when status is 'success'. */ + selectedOption?: string; +} + +export interface AiTaskStepHistory extends BaseStepHistory { + type: 'ai-task'; +} + +export type StepHistory = ConditionStepHistory | AiTaskStepHistory; diff --git a/packages/workflow-executor/test/index.test.ts b/packages/workflow-executor/test/index.test.ts index d80b0c4e5b..05affa035c 100644 --- a/packages/workflow-executor/test/index.test.ts +++ b/packages/workflow-executor/test/index.test.ts @@ -1,7 +1,18 @@ -import * as mod from '../src/index'; +import { StepType } from '../src/index'; -describe('workflow-executor', () => { - it('should export an empty module', () => { - expect(Object.keys(mod)).toHaveLength(0); +describe('StepType', () => { + it('should expose exactly 5 step types', () => { + const values = Object.values(StepType); + expect(values).toHaveLength(5); + }); + + it.each([ + ['Condition', 'condition'], + ['ReadRecord', 'read-record'], + ['UpdateRecord', 'update-record'], + ['TriggerAction', 'trigger-action'], + ['LoadRelatedRecord', 'load-related-record'], + ] as const)('should have %s = "%s"', (key, value) => { + expect(StepType[key]).toBe(value); }); }); From 127b579824258aed1cacd7ffb72024d7e31ebf06 Mon Sep 17 00:00:00 2001 From: scra Date: Wed, 18 Mar 2026 14:00:03 +0100 Subject: [PATCH 05/18] feat(workflow-executor): implement condition step executor (AI-only) (#1495) --- packages/workflow-executor/CLAUDE.md | 44 +- packages/workflow-executor/package.json | 4 + packages/workflow-executor/src/errors.ts | 23 + .../src/executors/base-step-executor.ts | 106 +++++ .../src/executors/condition-step-executor.ts | 101 +++++ packages/workflow-executor/src/index.ts | 5 + .../workflow-executor/src/ports/run-store.ts | 12 +- .../workflow-executor/src/types/execution.ts | 37 +- .../src/types/step-definition.ts | 3 +- .../src/types/step-execution-data.ts | 6 +- .../src/types/step-history.ts | 19 +- .../test/executors/base-step-executor.test.ts | 400 ++++++++++++++++++ .../executors/condition-step-executor.test.ts | 310 ++++++++++++++ yarn.lock | 44 ++ 14 files changed, 1073 insertions(+), 41 deletions(-) create mode 100644 packages/workflow-executor/src/errors.ts create mode 100644 packages/workflow-executor/src/executors/base-step-executor.ts create mode 100644 packages/workflow-executor/src/executors/condition-step-executor.ts create mode 100644 packages/workflow-executor/test/executors/base-step-executor.test.ts create mode 100644 packages/workflow-executor/test/executors/condition-step-executor.test.ts diff --git a/packages/workflow-executor/CLAUDE.md b/packages/workflow-executor/CLAUDE.md index 085be18e95..2be0522bca 100644 --- a/packages/workflow-executor/CLAUDE.md +++ b/packages/workflow-executor/CLAUDE.md @@ -6,6 +6,25 @@ TypeScript library (framework-agnostic) that executes workflow steps on the client's infrastructure, alongside the Forest Admin agent. The orchestrator never sees client data — it only sends step definitions; this package fetches them and runs them locally. +## Why this package exists — Frontend → Backend migration + +Workflows currently run entirely in the **frontend** (`forestadmin/frontend`). The front parses BPMN, manages the run state machine, calls the AI, executes tools, and handles user interactions — all in the browser. + +This works for interactive use cases but blocks **automation**: scheduled workflows, API-triggered runs, and headless execution all require a human with a browser open. The goal of this migration is to move workflow execution to the **backend** (client-side agent infrastructure) so workflows can run without a frontend and without human intervention. + +### What stays on the front +- Workflow designer (BPMN editor) +- Run monitoring / progress display +- Manual decisions when the AI can't decide (`manual-decision` status) + +### What moves to the backend (this package) +- Step execution (condition decisions, AI tasks, record operations) +- AI calls (gateway option selection, tool selection, tool execution) +- Record selection and data access (via AgentPort) + +### Constraint: must be ISO with front +The executor must produce the same behavior as the frontend implementation (`forestadmin/frontend`, `app/features/workflow/`). Same tool schemas, same AI interactions, same fallback logic. + ## System Architecture The workflow system is split into 4 components: @@ -16,15 +35,14 @@ The workflow system is split into 4 components: - **Agent** — The Forest Admin agent (`@forestadmin/agent`). Acts as a proxy for the executor — provides access to the datasource layer (collections, actions, fields) so the executor can read/write client data without direct database access. ``` -Front ──▶ Orchestrator ◀──pull── Executor ──▶ Agent (datasources) - ▲ │ - └──────────── progress/results ────────┘ +Front ◀──▶ Orchestrator ◀──pull/push──▶ Executor ──▶ Agent (datasources) ``` ## Package Structure ``` src/ +├── errors.ts # WorkflowExecutorError, MissingToolCallError, MalformedToolCallError ├── types/ # Core type definitions (@draft) │ ├── step-definition.ts # StepType enum + step definition interfaces │ ├── step-history.ts # Step outcome tracking types @@ -34,19 +52,21 @@ src/ ├── ports/ # IO boundary interfaces (@draft) │ ├── agent-port.ts # Interface to the Forest Admin agent (datasource) │ ├── workflow-port.ts # Interface to the orchestrator -│ └── run-store.ts # Interface for persisting run state +│ └── run-store.ts # Interface for persisting run state (scoped to a run) +├── executors/ # Step executor implementations +│ ├── base-step-executor.ts # Abstract base class (context injection + shared helpers) +│ └── condition-step-executor.ts # AI-powered condition step (chooses among options) └── index.ts # Barrel exports ``` -## Architecture Principles (Planned) - -The following principles will guide implementation. None are implemented yet. +## Architecture Principles -- **Pull-based** — The executor will poll for pending steps via a port interface. A `triggerPoll(runId)` mechanism will fast-track a specific run. -- **Atomic** — Each step will execute in isolation. A run store will maintain continuity between steps. -- **Privacy** — Zero client data leaves the client's infrastructure. -- **Ports (IO injection)** — All external IO will go through injected port interfaces, keeping the core pure and testable. -- **AI integration** — Will use `@forestadmin/ai-proxy` (Router) to create models and load remote tools. +- **Pull-based** — The executor polls for pending steps via a port interface (`WorkflowPort.getPendingStepExecutions`; polling loop not yet implemented). +- **Atomic** — Each step executes in isolation. A run store (scoped per run) maintains continuity between steps. +- **Privacy** — Zero client data leaves the client's infrastructure. `StepHistory` is sent to the orchestrator and must NEVER contain client data. Privacy-sensitive information (e.g. AI reasoning) must stay in `StepExecutionData` (persisted in the RunStore, client-side only). +- **Ports (IO injection)** — All external IO goes through injected port interfaces, keeping the core pure and testable. +- **AI integration** — Uses `@langchain/core` (`BaseChatModel`, `DynamicStructuredTool`) for AI-powered steps. `ExecutionContext.model` is a `BaseChatModel`. +- **No recovery/retry** — Once the executor returns a step result to the orchestrator, the step is considered executed. There is no mechanism to re-dispatch a step, so executors must NOT include recovery checks (e.g. checking the RunStore for cached results before executing). Each step executes exactly once. ## Commands diff --git a/packages/workflow-executor/package.json b/packages/workflow-executor/package.json index a3fe055e30..3c838da931 100644 --- a/packages/workflow-executor/package.json +++ b/packages/workflow-executor/package.json @@ -21,5 +21,9 @@ "clean": "rm -rf coverage dist", "lint": "eslint src test", "test": "jest" + }, + "dependencies": { + "@langchain/core": "1.1.33", + "zod": "4.3.6" } } diff --git a/packages/workflow-executor/src/errors.ts b/packages/workflow-executor/src/errors.ts new file mode 100644 index 0000000000..3a853e949c --- /dev/null +++ b/packages/workflow-executor/src/errors.ts @@ -0,0 +1,23 @@ +/* eslint-disable max-classes-per-file */ + +export class WorkflowExecutorError extends Error { + constructor(message: string) { + super(message); + this.name = this.constructor.name; + } +} + +export class MissingToolCallError extends WorkflowExecutorError { + constructor() { + super('AI did not return a tool call'); + } +} + +export class MalformedToolCallError extends WorkflowExecutorError { + readonly toolName: string; + + constructor(toolName: string, details: string) { + super(`AI returned a malformed tool call for "${toolName}": ${details}`); + this.toolName = toolName; + } +} diff --git a/packages/workflow-executor/src/executors/base-step-executor.ts b/packages/workflow-executor/src/executors/base-step-executor.ts new file mode 100644 index 0000000000..6c4fc76944 --- /dev/null +++ b/packages/workflow-executor/src/executors/base-step-executor.ts @@ -0,0 +1,106 @@ +import type { ExecutionContext, StepExecutionResult } from '../types/execution'; +import type { StepDefinition } from '../types/step-definition'; +import type { StepExecutionData } from '../types/step-execution-data'; +import type { StepHistory } from '../types/step-history'; +import type { AIMessage, BaseMessage } from '@langchain/core/messages'; +import type { DynamicStructuredTool } from '@langchain/core/tools'; + +import { SystemMessage } from '@langchain/core/messages'; + +import { MalformedToolCallError, MissingToolCallError } from '../errors'; + +export default abstract class BaseStepExecutor< + TStep extends StepDefinition = StepDefinition, + THistory extends StepHistory = StepHistory, +> { + protected readonly context: ExecutionContext; + + constructor(context: ExecutionContext) { + this.context = context; + } + + abstract execute(step: TStep, stepHistory: THistory): Promise; + + /** + * Returns a SystemMessage array summarizing previously executed steps. + * Empty array when there is no history. Ready to spread into a messages array. + */ + protected async buildPreviousStepsMessages(): Promise { + if (!this.context.history.length) return []; + + const summary = await this.summarizePreviousSteps(); + + return [new SystemMessage(summary)]; + } + + /** + * Builds a text summary of previously executed steps for AI prompts. + * Correlates history entries (step + stepHistory pairs) with executionParams + * from the RunStore (matched by stepHistory.stepIndex). + * When no executionParams is available, falls back to StepHistory details. + */ + private async summarizePreviousSteps(): Promise { + const allStepExecutions = await this.context.runStore.getStepExecutions(); + + return this.context.history + .map(({ step, stepHistory }) => { + const execution = allStepExecutions.find(e => e.stepIndex === stepHistory.stepIndex); + + return this.buildStepSummary(step, stepHistory, execution); + }) + .join('\n\n'); + } + + private buildStepSummary( + step: StepDefinition, + stepHistory: StepHistory, + execution: StepExecutionData | undefined, + ): string { + const prompt = step.prompt ?? '(no prompt)'; + const header = `Step "${step.id}" (index ${stepHistory.stepIndex}):`; + const lines = [header, ` Prompt: ${prompt}`]; + + if (execution?.executionParams) { + lines.push(` Result: ${JSON.stringify(execution.executionParams)}`); + } else { + const { stepId, stepIndex, type, ...historyDetails } = stepHistory; + lines.push(` History: ${JSON.stringify(historyDetails)}`); + } + + return lines.join('\n'); + } + + /** + * Binds a single tool to the model, invokes it, and extracts the tool call args. + * Throws MalformedToolCallError or MissingToolCallError on invalid AI responses. + */ + protected async invokeWithTool>( + messages: BaseMessage[], + tool: DynamicStructuredTool, + ): Promise { + const modelWithTool = this.context.model.bindTools([tool], { tool_choice: 'any' }); + const response = await modelWithTool.invoke(messages); + + return this.extractToolCallArgs(response); + } + + /** + * Extracts the first tool call's args from an AI response. + * Throws if the AI returned a malformed tool call (invalid_tool_calls) or no tool call at all. + */ + private extractToolCallArgs>(response: AIMessage): T { + const toolCall = response.tool_calls?.[0]; + if (toolCall?.args) return toolCall.args as T; + + const invalidCall = response.invalid_tool_calls?.[0]; + + if (invalidCall) { + throw new MalformedToolCallError( + invalidCall.name ?? 'unknown', + invalidCall.error ?? 'no details available', + ); + } + + throw new MissingToolCallError(); + } +} diff --git a/packages/workflow-executor/src/executors/condition-step-executor.ts b/packages/workflow-executor/src/executors/condition-step-executor.ts new file mode 100644 index 0000000000..b90d47ad81 --- /dev/null +++ b/packages/workflow-executor/src/executors/condition-step-executor.ts @@ -0,0 +1,101 @@ +import type { StepExecutionResult } from '../types/execution'; +import type { ConditionStepDefinition } from '../types/step-definition'; +import type { ConditionStepHistory } from '../types/step-history'; + +import { HumanMessage, SystemMessage } from '@langchain/core/messages'; +import { DynamicStructuredTool } from '@langchain/core/tools'; +import { z } from 'zod'; + +import BaseStepExecutor from './base-step-executor'; + +interface GatewayToolArgs { + option: string | null; + reasoning: string; + question: string; +} + +const GATEWAY_SYSTEM_PROMPT = `You are an AI agent selecting the correct option for a workflow gateway decision. + +**Task**: Analyze the question and available options, then select the option that DIRECTLY answers the question. Options must be literal answers, not interpretations. + +**Critical Rule**: Options must semantically match possible answers to the question. +- Question "Does X contain Y?" expects options like "yes"/"no", NOT colors or unrelated values +- Question "What is the status?" expects options like "active"/"inactive", NOT arbitrary words +- If options don't match expected answer types, select null + +**NEVER invent mappings** between options and answers (e.g., never assume "purple"="no" or "red"="yes") + +**When to select null**: +- Options are semantically unrelated to the question type (colors for yes/no questions) +- None of the options literally match the expected answer +- The question is ambiguous or lacks necessary context +- You are less than 80% confident in any option + +**Reasoning format**: +- State which option you selected and why +- If selecting null: explain why options don't match the question +- Do not refer to yourself as "I" in the response, use a passive formulation instead.`; + +export default class ConditionStepExecutor extends BaseStepExecutor< + ConditionStepDefinition, + ConditionStepHistory +> { + async execute( + step: ConditionStepDefinition, + stepHistory: ConditionStepHistory, + ): Promise { + const tool = new DynamicStructuredTool({ + name: 'choose-gateway-option', + description: + 'Select the option that answers the question. ' + + 'Use null if no option matches or you are uncertain. ' + + 'Explain your reasoning.', + schema: z.object({ + reasoning: z.string().describe('The reasoning behind the choice'), + question: z.string().describe('The question to answer by choosing an option'), + option: z + .enum(step.options) + .nullable() + .describe('The chosen option, or null if no option clearly answers the question.'), + }), + func: async input => JSON.stringify(input), + }); + + const messages = [ + ...(await this.buildPreviousStepsMessages()), + new SystemMessage(GATEWAY_SYSTEM_PROMPT), + new HumanMessage(`**Question**: ${step.prompt ?? 'Choose the most appropriate option.'}`), + ]; + + let args: GatewayToolArgs; + + try { + args = await this.invokeWithTool(messages, tool); + } catch (error: unknown) { + return { + stepHistory: { + ...stepHistory, + status: 'error', + error: (error as Error).message, + }, + }; + } + + const { option: selectedOption, reasoning } = args; + + await this.context.runStore.saveStepExecution({ + type: 'condition', + stepIndex: stepHistory.stepIndex, + executionParams: { answer: selectedOption, reasoning }, + executionResult: selectedOption ? { answer: selectedOption } : undefined, + }); + + if (!selectedOption) { + return { stepHistory: { ...stepHistory, status: 'manual-decision' } }; + } + + return { + stepHistory: { ...stepHistory, status: 'success', selectedOption }, + }; + } +} diff --git a/packages/workflow-executor/src/index.ts b/packages/workflow-executor/src/index.ts index 1ecbf6fe1d..9d570f5729 100644 --- a/packages/workflow-executor/src/index.ts +++ b/packages/workflow-executor/src/index.ts @@ -22,6 +22,7 @@ export type { export type { RecordFieldRef, RecordRef, RecordData } from './types/record'; export type { + StepRecord, UserInput, PendingStepExecution, StepExecutionResult, @@ -31,3 +32,7 @@ export type { export type { AgentPort } from './ports/agent-port'; export type { McpConfiguration, WorkflowPort } from './ports/workflow-port'; export type { RunStore } from './ports/run-store'; + +export { WorkflowExecutorError, MissingToolCallError, MalformedToolCallError } from './errors'; +export { default as BaseStepExecutor } from './executors/base-step-executor'; +export { default as ConditionStepExecutor } from './executors/condition-step-executor'; diff --git a/packages/workflow-executor/src/ports/run-store.ts b/packages/workflow-executor/src/ports/run-store.ts index 88063dc4a0..212ab14088 100644 --- a/packages/workflow-executor/src/ports/run-store.ts +++ b/packages/workflow-executor/src/ports/run-store.ts @@ -4,10 +4,10 @@ import type { RecordData } from '../types/record'; import type { StepExecutionData } from '../types/step-execution-data'; export interface RunStore { - getRecords(runId: string): Promise; - getRecord(runId: string, collectionName: string, recordId: string): Promise; - saveRecord(runId: string, record: RecordData): Promise; - getStepExecutions(runId: string): Promise; - getStepExecution(runId: string, stepIndex: number): Promise; - saveStepExecution(runId: string, stepExecution: StepExecutionData): Promise; + getRecords(): Promise; + getRecord(collectionName: string, recordId: string): Promise; + saveRecord(record: RecordData): Promise; + getStepExecutions(): Promise; + getStepExecution(stepIndex: number): Promise; + saveStepExecution(stepExecution: StepExecutionData): Promise; } diff --git a/packages/workflow-executor/src/types/execution.ts b/packages/workflow-executor/src/types/execution.ts index 953810f89e..e983aad4b3 100644 --- a/packages/workflow-executor/src/types/execution.ts +++ b/packages/workflow-executor/src/types/execution.ts @@ -1,36 +1,39 @@ /** @draft Types derived from the workflow-executor spec -- subject to change. */ -import type { RecordData } from './record'; +import type { RecordRef } from './record'; import type { StepDefinition } from './step-definition'; import type { StepHistory } from './step-history'; import type { AgentPort } from '../ports/agent-port'; import type { RunStore } from '../ports/run-store'; import type { WorkflowPort } from '../ports/workflow-port'; +import type { BaseChatModel } from '@langchain/core/language_models/chat_models'; + +export interface StepRecord { + step: StepDefinition; + stepHistory: StepHistory; +} export type UserInput = { type: 'confirmation'; confirmed: boolean }; export interface PendingStepExecution { - runId: string; - step: StepDefinition; - stepHistory: StepHistory; - previousSteps: StepHistory[]; - availableRecords: RecordData[]; - userInput?: UserInput; + readonly runId: string; + readonly step: StepDefinition; + readonly stepHistory: StepHistory; + readonly previousSteps: ReadonlyArray; + readonly availableRecords: ReadonlyArray; + readonly userInput?: UserInput; } export interface StepExecutionResult { stepHistory: StepHistory; - newAvailableRecord?: RecordData; } export interface ExecutionContext { - runId: string; - /** Placeholder -- will be typed as AiConfiguration from @forestadmin/ai-proxy once added as dependency. */ - model: unknown; - agentPort: AgentPort; - workflowPort: WorkflowPort; - runStore: RunStore; - history: StepHistory[]; - /** Placeholder -- will be typed as RemoteTool[] (to be re-exported from @forestadmin/ai-proxy). */ - remoteTools: unknown[]; + readonly runId: string; + readonly model: BaseChatModel; + readonly agentPort: AgentPort; + readonly workflowPort: WorkflowPort; + readonly runStore: RunStore; + readonly history: ReadonlyArray>; + readonly remoteTools: readonly unknown[]; } diff --git a/packages/workflow-executor/src/types/step-definition.ts b/packages/workflow-executor/src/types/step-definition.ts index 4471887fb1..dffae8c312 100644 --- a/packages/workflow-executor/src/types/step-definition.ts +++ b/packages/workflow-executor/src/types/step-definition.ts @@ -16,7 +16,7 @@ interface BaseStepDefinition { export interface ConditionStepDefinition extends BaseStepDefinition { type: StepType.Condition; - options: string[]; + options: [string, ...string[]]; prompt?: string; } @@ -38,6 +38,5 @@ export type StepDefinition = ConditionStepDefinition | AiTaskStepDefinition; /** * Coarse categorization of steps. StepType has 5 fine-grained values; * StepCategory collapses the 4 non-condition types into 'ai-task'. - * Used as discriminant in StepHistory and StepExecutionData. */ export type StepCategory = 'condition' | 'ai-task'; diff --git a/packages/workflow-executor/src/types/step-execution-data.ts b/packages/workflow-executor/src/types/step-execution-data.ts index bc95c5b28e..5b5549c875 100644 --- a/packages/workflow-executor/src/types/step-execution-data.ts +++ b/packages/workflow-executor/src/types/step-execution-data.ts @@ -4,16 +4,18 @@ import type { RecordRef } from './record'; interface BaseStepExecutionData { stepIndex: number; - executionParams?: Record; - executionResult?: Record; } export interface ConditionStepExecutionData extends BaseStepExecutionData { type: 'condition'; + executionParams?: { answer: string | null; reasoning?: string }; + executionResult?: { answer: string }; } export interface AiTaskStepExecutionData extends BaseStepExecutionData { type: 'ai-task'; + executionParams?: Record; + executionResult?: Record; toolConfirmationInterruption?: Record; selectedRecordRef?: RecordRef; } diff --git a/packages/workflow-executor/src/types/step-history.ts b/packages/workflow-executor/src/types/step-history.ts index 35dd1e7fb2..bf9b66b61a 100644 --- a/packages/workflow-executor/src/types/step-history.ts +++ b/packages/workflow-executor/src/types/step-history.ts @@ -1,23 +1,38 @@ /** @draft Types derived from the workflow-executor spec -- subject to change. */ -export type StepStatus = 'success' | 'error' | 'awaiting-input'; +type BaseStepStatus = 'success' | 'error'; +/** Condition steps can fall back to human decision when the AI is uncertain. */ +export type ConditionStepStatus = BaseStepStatus | 'manual-decision'; + +/** AI task steps can pause mid-execution to await user input (e.g. tool confirmation). */ +export type AiTaskStepStatus = BaseStepStatus | 'awaiting-input'; + +/** Union of all step statuses. */ +export type StepStatus = ConditionStepStatus | AiTaskStepStatus; + +/** + * StepHistory is sent to the orchestrator — it must NEVER contain client data. + * Any privacy-sensitive information (e.g. AI reasoning) must stay in + * StepExecutionData (persisted in the RunStore, client-side only). + */ interface BaseStepHistory { stepId: string; stepIndex: number; - status: StepStatus; /** Present when status is 'error'. */ error?: string; } export interface ConditionStepHistory extends BaseStepHistory { type: 'condition'; + status: ConditionStepStatus; /** Present when status is 'success'. */ selectedOption?: string; } export interface AiTaskStepHistory extends BaseStepHistory { type: 'ai-task'; + status: AiTaskStepStatus; } export type StepHistory = ConditionStepHistory | AiTaskStepHistory; diff --git a/packages/workflow-executor/test/executors/base-step-executor.test.ts b/packages/workflow-executor/test/executors/base-step-executor.test.ts new file mode 100644 index 0000000000..73f5e716b1 --- /dev/null +++ b/packages/workflow-executor/test/executors/base-step-executor.test.ts @@ -0,0 +1,400 @@ +import type { RunStore } from '../../src/ports/run-store'; +import type { ExecutionContext, StepExecutionResult } from '../../src/types/execution'; +import type { StepDefinition } from '../../src/types/step-definition'; +import type { StepExecutionData } from '../../src/types/step-execution-data'; +import type { StepHistory } from '../../src/types/step-history'; +import type { BaseMessage, SystemMessage } from '@langchain/core/messages'; +import type { DynamicStructuredTool } from '@langchain/core/tools'; + +import { MalformedToolCallError, MissingToolCallError } from '../../src/errors'; +import BaseStepExecutor from '../../src/executors/base-step-executor'; +import { StepType } from '../../src/types/step-definition'; + +/** Concrete subclass that exposes protected methods for testing. */ +class TestableExecutor extends BaseStepExecutor { + async execute(): Promise { + throw new Error('not used'); + } + + override buildPreviousStepsMessages(): Promise { + return super.buildPreviousStepsMessages(); + } + + override invokeWithTool>( + messages: BaseMessage[], + tool: DynamicStructuredTool, + ): Promise { + return super.invokeWithTool(messages, tool); + } +} + +function makeHistoryEntry( + overrides: { stepId?: string; stepIndex?: number; prompt?: string } = {}, +): { step: StepDefinition; stepHistory: StepHistory } { + return { + step: { + id: overrides.stepId ?? 'step-1', + type: StepType.Condition, + options: ['A', 'B'], + prompt: overrides.prompt ?? 'Pick one', + }, + stepHistory: { + type: 'condition', + stepId: overrides.stepId ?? 'step-1', + stepIndex: overrides.stepIndex ?? 0, + status: 'success', + }, + }; +} + +function makeMockRunStore(stepExecutions: StepExecutionData[] = []): RunStore { + return { + getRecords: jest.fn().mockResolvedValue([]), + getRecord: jest.fn().mockResolvedValue(null), + saveRecord: jest.fn().mockResolvedValue(undefined), + getStepExecutions: jest.fn().mockResolvedValue(stepExecutions), + getStepExecution: jest.fn().mockResolvedValue(null), + saveStepExecution: jest.fn().mockResolvedValue(undefined), + }; +} + +function makeContext(overrides: Partial = {}): ExecutionContext { + return { + runId: 'run-1', + model: {} as ExecutionContext['model'], + agentPort: {} as ExecutionContext['agentPort'], + workflowPort: {} as ExecutionContext['workflowPort'], + runStore: makeMockRunStore(), + history: [], + remoteTools: [], + ...overrides, + }; +} + +describe('BaseStepExecutor', () => { + describe('buildPreviousStepsMessages', () => { + it('returns empty array for empty history', async () => { + const executor = new TestableExecutor(makeContext()); + + expect(await executor.buildPreviousStepsMessages()).toEqual([]); + }); + + it('includes prompt and executionParams from previous steps', async () => { + const executor = new TestableExecutor( + makeContext({ + history: [makeHistoryEntry({ stepId: 'cond-1', stepIndex: 0, prompt: 'Approve?' })], + runStore: makeMockRunStore([ + { + type: 'condition', + stepIndex: 0, + executionParams: { answer: 'Yes', reasoning: 'Order is valid' }, + executionResult: { answer: 'Yes' }, + }, + ]), + }), + ); + + const result = await executor + .buildPreviousStepsMessages() + .then(msgs => msgs[0]?.content ?? ''); + + expect(result).toContain('Step "cond-1"'); + expect(result).toContain('Prompt: Approve?'); + expect(result).toContain('Result: {"answer":"Yes","reasoning":"Order is valid"}'); + }); + + it('falls back to History when step has no executionParams in RunStore', async () => { + const executor = new TestableExecutor( + makeContext({ + history: [ + makeHistoryEntry({ stepId: 'cond-1', stepIndex: 0 }), + makeHistoryEntry({ stepId: 'cond-2', stepIndex: 1, prompt: 'Second?' }), + ], + runStore: makeMockRunStore([ + { type: 'condition', stepIndex: 0 }, + { + type: 'condition', + stepIndex: 1, + executionParams: { answer: 'No', reasoning: 'Clearly no' }, + }, + ]), + }), + ); + + const result = await executor + .buildPreviousStepsMessages() + .then(msgs => msgs[0]?.content ?? ''); + + expect(result).toContain('Step "cond-1"'); + expect(result).toContain('History: {"status":"success"}'); + expect(result).toContain('Step "cond-2"'); + expect(result).toContain('Result: {"answer":"No","reasoning":"Clearly no"}'); + }); + + it('falls back to History when no matching step execution in RunStore', async () => { + const executor = new TestableExecutor( + makeContext({ + history: [ + makeHistoryEntry({ stepId: 'orphan', stepIndex: 5, prompt: 'Orphan step' }), + makeHistoryEntry({ stepId: 'matched', stepIndex: 1, prompt: 'Matched step' }), + ], + runStore: makeMockRunStore([ + { + type: 'condition', + stepIndex: 1, + executionParams: { answer: 'B', reasoning: 'Option B fits' }, + }, + ]), + }), + ); + + const result = await executor + .buildPreviousStepsMessages() + .then(msgs => msgs[0]?.content ?? ''); + + expect(result).toContain('Step "orphan"'); + expect(result).toContain('History: {"status":"success"}'); + expect(result).toContain('Step "matched"'); + expect(result).toContain('Result: {"answer":"B","reasoning":"Option B fits"}'); + }); + + it('includes selectedOption in History for condition steps', async () => { + const entry = makeHistoryEntry({ + stepId: 'cond-approval', + stepIndex: 0, + prompt: 'Approved?', + }); + (entry.stepHistory as { selectedOption?: string }).selectedOption = 'Yes'; + + const executor = new TestableExecutor( + makeContext({ + history: [entry], + runStore: makeMockRunStore([]), + }), + ); + + const result = await executor + .buildPreviousStepsMessages() + .then(msgs => msgs[0]?.content ?? ''); + + expect(result).toContain('Step "cond-approval"'); + expect(result).toContain('"selectedOption":"Yes"'); + }); + + it('includes error in History for failed steps', async () => { + const entry = makeHistoryEntry({ + stepId: 'failing-step', + stepIndex: 0, + prompt: 'Do something', + }); + entry.stepHistory.status = 'error'; + (entry.stepHistory as { error?: string }).error = 'AI could not match an option'; + + const executor = new TestableExecutor( + makeContext({ + history: [entry], + runStore: makeMockRunStore([]), + }), + ); + + const result = await executor + .buildPreviousStepsMessages() + .then(msgs => msgs[0]?.content ?? ''); + + expect(result).toContain('"status":"error"'); + expect(result).toContain('"error":"AI could not match an option"'); + }); + + it('includes status in History for ai-task steps without RunStore data', async () => { + const entry: { step: StepDefinition; stepHistory: StepHistory } = { + step: { id: 'ai-step', type: StepType.ReadRecord, prompt: 'Run task' }, + stepHistory: { type: 'ai-task', stepId: 'ai-step', stepIndex: 0, status: 'awaiting-input' }, + }; + + const executor = new TestableExecutor( + makeContext({ + history: [entry], + runStore: makeMockRunStore([]), + }), + ); + + const result = await executor + .buildPreviousStepsMessages() + .then(msgs => msgs[0]?.content ?? ''); + + expect(result).toContain('Step "ai-step"'); + expect(result).toContain('History: {"status":"awaiting-input"}'); + }); + + it('uses Result when RunStore has executionParams, History otherwise', async () => { + const condEntry = makeHistoryEntry({ + stepId: 'cond-1', + stepIndex: 0, + prompt: 'Approved?', + }); + (condEntry.stepHistory as { selectedOption?: string }).selectedOption = 'Yes'; + + const aiEntry: { step: StepDefinition; stepHistory: StepHistory } = { + step: { id: 'read-customer', type: StepType.ReadRecord, prompt: 'Read name' }, + stepHistory: { type: 'ai-task', stepId: 'read-customer', stepIndex: 1, status: 'success' }, + }; + + const executor = new TestableExecutor( + makeContext({ + history: [condEntry, aiEntry], + runStore: makeMockRunStore([ + { + type: 'ai-task', + stepIndex: 1, + executionParams: { answer: 'John Doe' }, + }, + ]), + }), + ); + + const result = await executor + .buildPreviousStepsMessages() + .then(msgs => msgs[0]?.content ?? ''); + + expect(result).toContain('Step "cond-1"'); + expect(result).toContain('History: {"status":"success","selectedOption":"Yes"}'); + expect(result).toContain('Step "read-customer"'); + expect(result).toContain('Result: {"answer":"John Doe"}'); + }); + + it('prefers RunStore executionParams over History fallback', async () => { + const entry = makeHistoryEntry({ stepId: 'cond-1', stepIndex: 0, prompt: 'Pick one' }); + (entry.stepHistory as { selectedOption?: string }).selectedOption = 'A'; + + const executor = new TestableExecutor( + makeContext({ + history: [entry], + runStore: makeMockRunStore([ + { + type: 'condition', + stepIndex: 0, + executionParams: { answer: 'A', reasoning: 'Best fit' }, + }, + ]), + }), + ); + + const result = await executor + .buildPreviousStepsMessages() + .then(msgs => msgs[0]?.content ?? ''); + + expect(result).toContain('Result: {"answer":"A","reasoning":"Best fit"}'); + expect(result).not.toContain('History:'); + }); + + it('shows "(no prompt)" when step has no prompt', async () => { + const entry = makeHistoryEntry({ stepIndex: 0 }); + entry.step.prompt = undefined; + + const executor = new TestableExecutor( + makeContext({ + history: [entry], + runStore: makeMockRunStore([ + { + type: 'condition', + stepIndex: 0, + executionParams: { answer: 'A', reasoning: 'Only option' }, + }, + ]), + }), + ); + + const result = await executor + .buildPreviousStepsMessages() + .then(msgs => msgs[0]?.content ?? ''); + + expect(result).toContain('Prompt: (no prompt)'); + }); + }); + + describe('invokeWithTool', () => { + function makeMockModel(response: unknown) { + const invoke = jest.fn().mockResolvedValue(response); + + return { + model: { + bindTools: jest.fn().mockReturnValue({ invoke }), + } as unknown as ExecutionContext['model'], + invoke, + }; + } + + const dummyTool = {} as DynamicStructuredTool; + const dummyMessages = [] as BaseMessage[]; + + it('returns args from the first tool call', async () => { + const { model } = makeMockModel({ + tool_calls: [{ name: 'tool', args: { key: 'value' }, id: 'c1' }], + }); + const executor = new TestableExecutor(makeContext({ model })); + + const result = await executor.invokeWithTool(dummyMessages, dummyTool); + + expect(result).toEqual({ key: 'value' }); + }); + + it('binds tool with tool_choice "any"', async () => { + const { model } = makeMockModel({ + tool_calls: [{ name: 'tool', args: {}, id: 'c1' }], + }); + const executor = new TestableExecutor(makeContext({ model })); + + await executor.invokeWithTool(dummyMessages, dummyTool); + + expect(model.bindTools).toHaveBeenCalledWith([dummyTool], { tool_choice: 'any' }); + }); + + it('throws MissingToolCallError when tool_calls is undefined', async () => { + const { model } = makeMockModel({}); + const executor = new TestableExecutor(makeContext({ model })); + + await expect(executor.invokeWithTool(dummyMessages, dummyTool)).rejects.toThrow( + MissingToolCallError, + ); + }); + + it('throws MissingToolCallError when tool_calls is empty', async () => { + const { model } = makeMockModel({ tool_calls: [] }); + const executor = new TestableExecutor(makeContext({ model })); + + await expect(executor.invokeWithTool(dummyMessages, dummyTool)).rejects.toThrow( + MissingToolCallError, + ); + }); + + it('throws MalformedToolCallError when invalid_tool_calls is present', async () => { + const { model } = makeMockModel({ + tool_calls: [], + invalid_tool_calls: [{ name: 'my-tool', args: '{bad', error: 'Parse error' }], + }); + const executor = new TestableExecutor(makeContext({ model })); + + await expect(executor.invokeWithTool(dummyMessages, dummyTool)).rejects.toThrow( + MalformedToolCallError, + ); + await expect(executor.invokeWithTool(dummyMessages, dummyTool)).rejects.toThrow( + 'AI returned a malformed tool call for "my-tool": Parse error', + ); + }); + + it('throws MalformedToolCallError with "unknown" when invalid_tool_call has no name', async () => { + const { model } = makeMockModel({ + tool_calls: [], + invalid_tool_calls: [{ error: 'Something broke' }], + }); + const executor = new TestableExecutor(makeContext({ model })); + + await expect(executor.invokeWithTool(dummyMessages, dummyTool)).rejects.toThrow( + MalformedToolCallError, + ); + await expect(executor.invokeWithTool(dummyMessages, dummyTool)).rejects.toThrow( + 'AI returned a malformed tool call for "unknown": Something broke', + ); + }); + }); +}); diff --git a/packages/workflow-executor/test/executors/condition-step-executor.test.ts b/packages/workflow-executor/test/executors/condition-step-executor.test.ts new file mode 100644 index 0000000000..ba7fe7f34d --- /dev/null +++ b/packages/workflow-executor/test/executors/condition-step-executor.test.ts @@ -0,0 +1,310 @@ +import type { RunStore } from '../../src/ports/run-store'; +import type { ExecutionContext } from '../../src/types/execution'; +import type { ConditionStepDefinition } from '../../src/types/step-definition'; +import type { ConditionStepHistory } from '../../src/types/step-history'; + +import ConditionStepExecutor from '../../src/executors/condition-step-executor'; +import { StepType } from '../../src/types/step-definition'; + +function makeStep(overrides: Partial = {}): ConditionStepDefinition { + return { + id: 'cond-1', + type: StepType.Condition, + options: ['Approve', 'Reject'], + prompt: 'Should we approve this?', + ...overrides, + }; +} + +function makeStepHistory(overrides: Partial = {}): ConditionStepHistory { + return { + type: 'condition', + stepId: 'cond-1', + stepIndex: 0, + status: 'success', + ...overrides, + }; +} + +function makeMockRunStore(overrides: Partial = {}): RunStore { + return { + getRecords: jest.fn().mockResolvedValue([]), + getRecord: jest.fn().mockResolvedValue(null), + saveRecord: jest.fn().mockResolvedValue(undefined), + getStepExecutions: jest.fn().mockResolvedValue([]), + getStepExecution: jest.fn().mockResolvedValue(null), + saveStepExecution: jest.fn().mockResolvedValue(undefined), + ...overrides, + }; +} + +function makeMockModel(toolCallArgs?: Record) { + const invoke = jest.fn().mockResolvedValue({ + tool_calls: toolCallArgs + ? [{ name: 'choose-gateway-option', args: toolCallArgs, id: 'call_1' }] + : undefined, + }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const model = { bindTools } as unknown as ExecutionContext['model']; + + return { model, bindTools, invoke }; +} + +function makeContext(overrides: Partial = {}): ExecutionContext { + return { + runId: 'run-1', + model: makeMockModel().model, + agentPort: {} as ExecutionContext['agentPort'], + workflowPort: {} as ExecutionContext['workflowPort'], + runStore: makeMockRunStore(), + history: [], + remoteTools: [], + ...overrides, + }; +} + +describe('ConditionStepExecutor', () => { + describe('immutability', () => { + it('does not mutate the input stepHistory', async () => { + const mockModel = makeMockModel({ + option: 'Reject', + reasoning: 'Incomplete', + question: 'Approve?', + }); + const stepHistory = makeStepHistory(); + const executor = new ConditionStepExecutor(makeContext({ model: mockModel.model })); + + const result = await executor.execute(makeStep(), stepHistory); + + expect(result.stepHistory).not.toBe(stepHistory); + expect(stepHistory.status).toBe('success'); + expect(stepHistory.selectedOption).toBeUndefined(); + }); + }); + + describe('AI decision', () => { + it('calls AI and returns selected option on success', async () => { + const mockModel = makeMockModel({ + option: 'Reject', + reasoning: 'The request is incomplete', + question: 'Should we approve?', + }); + const runStore = makeMockRunStore(); + const context = makeContext({ + model: mockModel.model, + runStore, + }); + const executor = new ConditionStepExecutor(context); + + const result = await executor.execute(makeStep(), makeStepHistory()); + + expect(result.stepHistory.status).toBe('success'); + expect((result.stepHistory as ConditionStepHistory).selectedOption).toBe('Reject'); + + expect(mockModel.bindTools).toHaveBeenCalledWith( + [expect.objectContaining({ name: 'choose-gateway-option' })], + { tool_choice: 'any' }, + ); + + expect(runStore.saveStepExecution).toHaveBeenCalledWith({ + type: 'condition', + stepIndex: 0, + executionParams: { answer: 'Reject', reasoning: 'The request is incomplete' }, + executionResult: { answer: 'Reject' }, + }); + }); + + it('binds a tool with all step options and nullable for no-match', async () => { + const mockModel = makeMockModel({ + option: 'Approve', + reasoning: 'Looks good', + question: 'Should we?', + }); + const executor = new ConditionStepExecutor(makeContext({ model: mockModel.model })); + + await executor.execute( + makeStep({ options: ['Approve', 'Reject', 'Defer'] }), + makeStepHistory(), + ); + + const tool = mockModel.bindTools.mock.calls[0][0][0]; + expect(tool.name).toBe('choose-gateway-option'); + expect(tool.schema.parse({ option: 'Approve', reasoning: 'r', question: 'q' })).toBeTruthy(); + expect(tool.schema.parse({ option: 'Defer', reasoning: 'r', question: 'q' })).toBeTruthy(); + expect(tool.schema.parse({ option: null, reasoning: 'r', question: 'q' })).toBeTruthy(); + expect(() => + tool.schema.parse({ option: 'InvalidOption', reasoning: 'r', question: 'q' }), + ).toThrow(); + }); + + it('sends system prompt + user question as separate messages', async () => { + const mockModel = makeMockModel({ + option: 'Approve', + reasoning: 'Looks good', + question: 'Should we approve?', + }); + const context = makeContext({ model: mockModel.model }); + const executor = new ConditionStepExecutor(context); + + await executor.execute( + makeStep({ prompt: 'Custom prompt for this step' }), + makeStepHistory(), + ); + + const messages = mockModel.invoke.mock.calls[0][0]; + expect(messages).toHaveLength(2); + expect(messages[0].content).toContain('workflow gateway decision'); + expect(messages[0].content).toContain('80% confident'); + expect(messages[1].content).toBe('**Question**: Custom prompt for this step'); + }); + + it('uses default question when step.prompt is undefined', async () => { + const mockModel = makeMockModel({ + option: 'Approve', + reasoning: 'Default', + question: 'Approve?', + }); + const context = makeContext({ model: mockModel.model }); + const executor = new ConditionStepExecutor(context); + + await executor.execute(makeStep({ prompt: undefined }), makeStepHistory()); + + const messages = mockModel.invoke.mock.calls[0][0]; + const humanMessage = messages[messages.length - 1]; + expect(humanMessage.content).toBe('**Question**: Choose the most appropriate option.'); + }); + + it('prepends previous steps summary as separate SystemMessage', async () => { + const mockModel = makeMockModel({ + option: 'Approve', + reasoning: 'Based on previous decision', + question: 'Final approval?', + }); + const runStore = makeMockRunStore({ + getStepExecution: jest.fn().mockResolvedValue(null), + getStepExecutions: jest.fn().mockResolvedValue([ + { + type: 'condition', + stepIndex: 0, + executionParams: { answer: 'Yes', reasoning: 'Validated by manager' }, + }, + ]), + }); + const context = makeContext({ + model: mockModel.model, + runStore, + history: [ + { + step: { + id: 'prev-step', + type: StepType.Condition, + options: ['Yes', 'No'], + prompt: 'Previous question', + }, + stepHistory: { + type: 'condition', + stepId: 'prev-step', + stepIndex: 0, + status: 'success', + }, + }, + ], + }); + const executor = new ConditionStepExecutor(context); + + await executor.execute( + makeStep({ id: 'cond-2' }), + makeStepHistory({ stepId: 'cond-2', stepIndex: 1 }), + ); + + const messages = mockModel.invoke.mock.calls[0][0]; + expect(messages).toHaveLength(3); + expect(messages[0].content).toContain('Previous question'); + expect(messages[0].content).toContain('"answer":"Yes"'); + expect(messages[1].content).toContain('workflow gateway decision'); + expect(messages[2].content).toContain('**Question**'); + }); + }); + + describe('no-match fallback', () => { + it('returns manual-decision when AI selects null', async () => { + const mockModel = makeMockModel({ + option: null, + reasoning: 'None apply', + question: 'N/A', + }); + const runStore = makeMockRunStore(); + const context = makeContext({ + model: mockModel.model, + runStore, + }); + const executor = new ConditionStepExecutor(context); + + const result = await executor.execute(makeStep(), makeStepHistory()); + + expect(result.stepHistory.status).toBe('manual-decision'); + expect(result.stepHistory.error).toBeUndefined(); + expect((result.stepHistory as ConditionStepHistory).selectedOption).toBeUndefined(); + expect(runStore.saveStepExecution).toHaveBeenCalledWith({ + type: 'condition', + stepIndex: 0, + executionParams: { answer: null, reasoning: 'None apply' }, + executionResult: undefined, + }); + }); + + it('returns error when AI returns an invalid (malformed) tool call', async () => { + const invoke = jest.fn().mockResolvedValue({ + tool_calls: [], + invalid_tool_calls: [ + { name: 'choose-gateway-option', args: '{bad json', error: 'JSON parse error' }, + ], + }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const runStore = makeMockRunStore(); + const context = makeContext({ + model: { bindTools } as unknown as ExecutionContext['model'], + runStore, + }); + const executor = new ConditionStepExecutor(context); + + const result = await executor.execute(makeStep(), makeStepHistory()); + + expect(result.stepHistory.status).toBe('error'); + expect(result.stepHistory.error).toBe( + 'AI returned a malformed tool call for "choose-gateway-option": JSON parse error', + ); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + }); + + describe('error propagation', () => { + it('returns error status when model invocation fails', async () => { + const invoke = jest.fn().mockRejectedValue(new Error('API timeout')); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const context = makeContext({ + model: { bindTools } as unknown as ExecutionContext['model'], + }); + const executor = new ConditionStepExecutor(context); + + const result = await executor.execute(makeStep(), makeStepHistory()); + + expect(result.stepHistory.status).toBe('error'); + expect(result.stepHistory.error).toBe('API timeout'); + }); + + it('lets run store errors propagate', async () => { + const mockModel = makeMockModel({ + option: 'Approve', + reasoning: 'OK', + question: 'Approve?', + }); + const runStore = makeMockRunStore({ + saveStepExecution: jest.fn().mockRejectedValue(new Error('Storage full')), + }); + const executor = new ConditionStepExecutor(makeContext({ model: mockModel.model, runStore })); + + await expect(executor.execute(makeStep(), makeStepHistory())).rejects.toThrow('Storage full'); + }); + }); +}); diff --git a/yarn.lock b/yarn.lock index 023ddb64f2..c7c7add7ea 100644 --- a/yarn.lock +++ b/yarn.lock @@ -2314,6 +2314,23 @@ uuid "^10.0.0" zod "^3.25.76 || ^4" +"@langchain/core@1.1.33": + version "1.1.33" + resolved "https://registry.yarnpkg.com/@langchain/core/-/core-1.1.33.tgz#414536e9d0a6f90576502e532336104360ed4392" + integrity sha512-At1ooBmPlHMkhTkG6NqeOVjNscuJwneBB8F88rFRvBvIfhTACVLzEwMiZFWNTM8DzUXUOcxxqS7xKRyr6JBbOQ== + dependencies: + "@cfworker/json-schema" "^4.0.2" + "@standard-schema/spec" "^1.1.0" + ansi-styles "^5.0.0" + camelcase "6" + decamelize "1.2.0" + js-tiktoken "^1.0.12" + langsmith ">=0.5.0 <1.0.0" + mustache "^4.2.0" + p-queue "^6.6.2" + uuid "^11.1.0" + zod "^3.25.76 || ^4" + "@langchain/langgraph-checkpoint@^1.0.0": version "1.0.0" resolved "https://registry.yarnpkg.com/@langchain/langgraph-checkpoint/-/langgraph-checkpoint-1.0.0.tgz#ece2ede439d0d0b0b532c4be7817fd5029afe4f8" @@ -4169,6 +4186,11 @@ dependencies: tslib "^2.6.2" +"@standard-schema/spec@^1.1.0": + version "1.1.0" + resolved "https://registry.yarnpkg.com/@standard-schema/spec/-/spec-1.1.0.tgz#a79b55dbaf8604812f52d140b2c9ab41bc150bb8" + integrity sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w== + "@tokenizer/token@^0.3.0": version "0.3.0" resolved "https://registry.yarnpkg.com/@tokenizer/token/-/token-0.3.0.tgz#fe98a93fe789247e998c75e74e9c7c63217aa276" @@ -11297,6 +11319,18 @@ koa@^3.0.1: semver "^7.6.3" uuid "^10.0.0" +"langsmith@>=0.5.0 <1.0.0": + version "0.5.10" + resolved "https://registry.yarnpkg.com/langsmith/-/langsmith-0.5.10.tgz#f0df23538e6a7c2928787030cedfb4be9d5b3db6" + integrity sha512-unBdaaD/CqAOLIYjd9kT33FgHUMvHSsyBIPbQa+p/rE/Sv/l4pAC5ISEE79zphxi+vV4qxHqEgqahVXj2Xvz7A== + dependencies: + "@types/uuid" "^10.0.0" + chalk "^5.6.2" + console-table-printer "^2.12.1" + p-queue "^6.6.2" + semver "^7.6.3" + uuid "^10.0.0" + lerna@^8.2.3: version "8.2.3" resolved "https://registry.yarnpkg.com/lerna/-/lerna-8.2.3.tgz#0a9c07eda4cfac84a480b3e66915189ccfb5bd2c" @@ -17288,6 +17322,11 @@ uuid@^10.0.0: resolved "https://registry.yarnpkg.com/uuid/-/uuid-10.0.0.tgz#5a95aa454e6e002725c79055fd42aaba30ca6294" integrity sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ== +uuid@^11.1.0: + version "11.1.0" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-11.1.0.tgz#9549028be1753bb934fc96e2bca09bb4105ae912" + integrity sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A== + uuid@^13.0.0: version "13.0.0" resolved "https://registry.yarnpkg.com/uuid/-/uuid-13.0.0.tgz#263dc341b19b4d755eb8fe36b78d95a6b65707e8" @@ -17825,6 +17864,11 @@ zod-to-json-schema@^3.25.1: resolved "https://registry.yarnpkg.com/zod-to-json-schema/-/zod-to-json-schema-3.25.1.tgz#7f24962101a439ddade2bf1aeab3c3bfec7d84ba" integrity sha512-pM/SU9d3YAggzi6MtR4h7ruuQlqKtad8e9S0fmxcMi+ueAK5Korys/aWcV9LIIHTVbj01NdzxcnXSN+O74ZIVA== +zod@4.3.6: + version "4.3.6" + resolved "https://registry.yarnpkg.com/zod/-/zod-4.3.6.tgz#89c56e0aa7d2b05107d894412227087885ab112a" + integrity sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg== + "zod@^3.25 || ^4.0", "zod@^3.25.76 || ^4", zod@^4.3.5: version "4.3.5" resolved "https://registry.yarnpkg.com/zod/-/zod-4.3.5.tgz#aeb269a6f9fc259b1212c348c7c5432aaa474d2a" From cb8036b9d5cfdc6dcb88580ecdd7f2c60e099005 Mon Sep 17 00:00:00 2001 From: Matthieu Date: Wed, 18 Mar 2026 15:29:40 +0100 Subject: [PATCH 06/18] feat(workflow-executor): implement AgentPort adapter using agent-client (#1496) --- packages/workflow-executor/package.json | 1 + .../src/adapters/agent-client-agent-port.ts | 129 ++++++++++ packages/workflow-executor/src/errors.ts | 6 + packages/workflow-executor/src/index.ts | 10 +- .../workflow-executor/src/ports/agent-port.ts | 16 +- .../src/ports/workflow-port.ts | 4 +- .../workflow-executor/src/types/execution.ts | 4 +- .../workflow-executor/src/types/record.ts | 13 +- .../src/types/step-execution-data.ts | 4 +- .../adapters/agent-client-agent-port.test.ts | 241 ++++++++++++++++++ 10 files changed, 411 insertions(+), 17 deletions(-) create mode 100644 packages/workflow-executor/src/adapters/agent-client-agent-port.ts create mode 100644 packages/workflow-executor/test/adapters/agent-client-agent-port.test.ts diff --git a/packages/workflow-executor/package.json b/packages/workflow-executor/package.json index 3c838da931..fafc832c59 100644 --- a/packages/workflow-executor/package.json +++ b/packages/workflow-executor/package.json @@ -23,6 +23,7 @@ "test": "jest" }, "dependencies": { + "@forestadmin/agent-client": "1.4.13", "@langchain/core": "1.1.33", "zod": "4.3.6" } diff --git a/packages/workflow-executor/src/adapters/agent-client-agent-port.ts b/packages/workflow-executor/src/adapters/agent-client-agent-port.ts new file mode 100644 index 0000000000..23015e8bcd --- /dev/null +++ b/packages/workflow-executor/src/adapters/agent-client-agent-port.ts @@ -0,0 +1,129 @@ +import type { AgentPort } from '../ports/agent-port'; +import type { ActionRef, CollectionRef, RecordData } from '../types/record'; +import type { RemoteAgentClient, SelectOptions } from '@forestadmin/agent-client'; + +import { RecordNotFoundError } from '../errors'; + +function buildPkFilter( + primaryKeyFields: string[], + recordId: Array, +): SelectOptions['filters'] { + if (primaryKeyFields.length === 1) { + return { field: primaryKeyFields[0], operator: 'Equal', value: recordId[0] }; + } + + return { + aggregator: 'And', + conditions: primaryKeyFields.map((field, i) => ({ + field, + operator: 'Equal', + value: recordId[i], + })), + }; +} + +// agent-client methods (update, relation, action) still expect the pipe-encoded string format +function encodePk(recordId: Array): string { + return recordId.map(v => String(v)).join('|'); +} + +function extractRecordId( + primaryKeyFields: string[], + record: Record, +): Array { + return primaryKeyFields.map(field => record[field] as string | number); +} + +export default class AgentClientAgentPort implements AgentPort { + private readonly client: RemoteAgentClient; + private readonly collectionRefs: Record; + + constructor(params: { + client: RemoteAgentClient; + collectionRefs: Record; + }) { + this.client = params.client; + this.collectionRefs = params.collectionRefs; + } + + async getRecord(collectionName: string, recordId: Array): Promise { + const ref = this.getCollectionRef(collectionName); + const records = await this.client.collection(collectionName).list>({ + filters: buildPkFilter(ref.primaryKeyFields, recordId), + pagination: { size: 1, number: 1 }, + }); + + if (records.length === 0) { + throw new RecordNotFoundError(collectionName, encodePk(recordId)); + } + + return { ...ref, recordId, values: records[0] }; + } + + async updateRecord( + collectionName: string, + recordId: Array, + values: Record, + ): Promise { + const ref = this.getCollectionRef(collectionName); + const updatedRecord = await this.client + .collection(collectionName) + .update>(encodePk(recordId), values); + + return { ...ref, recordId, values: updatedRecord }; + } + + async getRelatedData( + collectionName: string, + recordId: Array, + relationName: string, + ): Promise { + const relatedRef = this.getCollectionRef(relationName); + + const records = await this.client + .collection(collectionName) + .relation(relationName, encodePk(recordId)) + .list>(); + + return records.map(record => ({ + ...relatedRef, + recordId: extractRecordId(relatedRef.primaryKeyFields, record), + values: record, + })); + } + + async getActions(collectionName: string): Promise { + const ref = this.collectionRefs[collectionName]; + + return ref ? ref.actions : []; + } + + async executeAction( + collectionName: string, + actionName: string, + recordIds: Array[], + ): Promise { + const encodedIds = recordIds.map(id => encodePk(id)); + const action = await this.client + .collection(collectionName) + .action(actionName, { recordIds: encodedIds }); + + return action.execute(); + } + + private getCollectionRef(collectionName: string): CollectionRef { + const ref = this.collectionRefs[collectionName]; + + if (!ref) { + return { + collectionName, + collectionDisplayName: collectionName, + primaryKeyFields: ['id'], + fields: [], + actions: [], + }; + } + + return ref; + } +} diff --git a/packages/workflow-executor/src/errors.ts b/packages/workflow-executor/src/errors.ts index 3a853e949c..d735977d4f 100644 --- a/packages/workflow-executor/src/errors.ts +++ b/packages/workflow-executor/src/errors.ts @@ -21,3 +21,9 @@ export class MalformedToolCallError extends WorkflowExecutorError { this.toolName = toolName; } } + +export class RecordNotFoundError extends WorkflowExecutorError { + constructor(collectionName: string, recordId: string) { + super(`Record not found: collection "${collectionName}", id "${recordId}"`); + } +} diff --git a/packages/workflow-executor/src/index.ts b/packages/workflow-executor/src/index.ts index 9d570f5729..c434071d83 100644 --- a/packages/workflow-executor/src/index.ts +++ b/packages/workflow-executor/src/index.ts @@ -19,7 +19,7 @@ export type { StepExecutionData, } from './types/step-execution-data'; -export type { RecordFieldRef, RecordRef, RecordData } from './types/record'; +export type { RecordFieldRef, ActionRef, CollectionRef, RecordData } from './types/record'; export type { StepRecord, @@ -33,6 +33,12 @@ export type { AgentPort } from './ports/agent-port'; export type { McpConfiguration, WorkflowPort } from './ports/workflow-port'; export type { RunStore } from './ports/run-store'; -export { WorkflowExecutorError, MissingToolCallError, MalformedToolCallError } from './errors'; +export { + WorkflowExecutorError, + MissingToolCallError, + MalformedToolCallError, + RecordNotFoundError, +} from './errors'; export { default as BaseStepExecutor } from './executors/base-step-executor'; export { default as ConditionStepExecutor } from './executors/condition-step-executor'; +export { default as AgentClientAgentPort } from './adapters/agent-client-agent-port'; diff --git a/packages/workflow-executor/src/ports/agent-port.ts b/packages/workflow-executor/src/ports/agent-port.ts index 5d4f431c7e..6a588f1f23 100644 --- a/packages/workflow-executor/src/ports/agent-port.ts +++ b/packages/workflow-executor/src/ports/agent-port.ts @@ -1,19 +1,23 @@ /** @draft Types derived from the workflow-executor spec -- subject to change. */ -import type { RecordData } from '../types/record'; +import type { ActionRef, RecordData } from '../types/record'; export interface AgentPort { - getRecord(collectionName: string, recordId: string): Promise; + getRecord(collectionName: string, recordId: Array): Promise; updateRecord( collectionName: string, - recordId: string, + recordId: Array, values: Record, ): Promise; getRelatedData( collectionName: string, - recordId: string, + recordId: Array, relationName: string, ): Promise; - getActions(collectionName: string): Promise; - executeAction(collectionName: string, actionName: string, recordIds: string[]): Promise; + getActions(collectionName: string): Promise; + executeAction( + collectionName: string, + actionName: string, + recordIds: Array[], + ): Promise; } diff --git a/packages/workflow-executor/src/ports/workflow-port.ts b/packages/workflow-executor/src/ports/workflow-port.ts index 806bb33980..c36ea41d8e 100644 --- a/packages/workflow-executor/src/ports/workflow-port.ts +++ b/packages/workflow-executor/src/ports/workflow-port.ts @@ -1,7 +1,7 @@ /** @draft Types derived from the workflow-executor spec -- subject to change. */ import type { PendingStepExecution } from '../types/execution'; -import type { RecordRef } from '../types/record'; +import type { CollectionRef } from '../types/record'; import type { StepHistory } from '../types/step-history'; /** Placeholder -- will be typed as McpConfiguration from @forestadmin/ai-proxy/mcp-client once added as dependency. */ @@ -10,6 +10,6 @@ export type McpConfiguration = unknown; export interface WorkflowPort { getPendingStepExecutions(): Promise; completeStepExecution(runId: string, stepHistory: StepHistory): Promise; - getCollectionRef(collectionName: string): Promise; + getCollectionRef(collectionName: string): Promise; getMcpServerConfigs(): Promise; } diff --git a/packages/workflow-executor/src/types/execution.ts b/packages/workflow-executor/src/types/execution.ts index e983aad4b3..d2524403cf 100644 --- a/packages/workflow-executor/src/types/execution.ts +++ b/packages/workflow-executor/src/types/execution.ts @@ -1,6 +1,6 @@ /** @draft Types derived from the workflow-executor spec -- subject to change. */ -import type { RecordRef } from './record'; +import type { CollectionRef } from './record'; import type { StepDefinition } from './step-definition'; import type { StepHistory } from './step-history'; import type { AgentPort } from '../ports/agent-port'; @@ -20,7 +20,7 @@ export interface PendingStepExecution { readonly step: StepDefinition; readonly stepHistory: StepHistory; readonly previousSteps: ReadonlyArray; - readonly availableRecords: ReadonlyArray; + readonly availableRecords: ReadonlyArray; readonly userInput?: UserInput; } diff --git a/packages/workflow-executor/src/types/record.ts b/packages/workflow-executor/src/types/record.ts index 9610da056b..14064fcb1f 100644 --- a/packages/workflow-executor/src/types/record.ts +++ b/packages/workflow-executor/src/types/record.ts @@ -8,13 +8,20 @@ export interface RecordFieldRef { referencedCollectionName?: string; } -export interface RecordRef { - recordId: string; +export interface ActionRef { + name: string; + displayName: string; +} + +export interface CollectionRef { collectionName: string; collectionDisplayName: string; + primaryKeyFields: string[]; fields: RecordFieldRef[]; + actions: ActionRef[]; } -export interface RecordData extends RecordRef { +export interface RecordData extends CollectionRef { + recordId: Array; values: Record; } diff --git a/packages/workflow-executor/src/types/step-execution-data.ts b/packages/workflow-executor/src/types/step-execution-data.ts index 5b5549c875..e2d46eaf47 100644 --- a/packages/workflow-executor/src/types/step-execution-data.ts +++ b/packages/workflow-executor/src/types/step-execution-data.ts @@ -1,6 +1,6 @@ /** @draft Types derived from the workflow-executor spec -- subject to change. */ -import type { RecordRef } from './record'; +import type { CollectionRef } from './record'; interface BaseStepExecutionData { stepIndex: number; @@ -17,7 +17,7 @@ export interface AiTaskStepExecutionData extends BaseStepExecutionData { executionParams?: Record; executionResult?: Record; toolConfirmationInterruption?: Record; - selectedRecordRef?: RecordRef; + selectedRecord?: CollectionRef; } export type StepExecutionData = ConditionStepExecutionData | AiTaskStepExecutionData; diff --git a/packages/workflow-executor/test/adapters/agent-client-agent-port.test.ts b/packages/workflow-executor/test/adapters/agent-client-agent-port.test.ts new file mode 100644 index 0000000000..8789907875 --- /dev/null +++ b/packages/workflow-executor/test/adapters/agent-client-agent-port.test.ts @@ -0,0 +1,241 @@ +import type { CollectionRef } from '../../src/types/record'; +import type { RemoteAgentClient } from '@forestadmin/agent-client'; + +import AgentClientAgentPort from '../../src/adapters/agent-client-agent-port'; +import { RecordNotFoundError } from '../../src/errors'; + +function createMockClient() { + const mockAction = { execute: jest.fn() }; + const mockRelation = { list: jest.fn() }; + const mockCollection = { + list: jest.fn(), + update: jest.fn(), + relation: jest.fn().mockReturnValue(mockRelation), + action: jest.fn().mockResolvedValue(mockAction), + }; + + const client = { + collection: jest.fn().mockReturnValue(mockCollection), + } as unknown as jest.Mocked; + + return { client, mockCollection, mockRelation, mockAction }; +} + +describe('AgentClientAgentPort', () => { + let client: jest.Mocked; + let mockCollection: ReturnType['mockCollection']; + let mockRelation: ReturnType['mockRelation']; + let mockAction: ReturnType['mockAction']; + let collectionRefs: Record; + let port: AgentClientAgentPort; + + beforeEach(() => { + jest.clearAllMocks(); + + ({ client, mockCollection, mockRelation, mockAction } = createMockClient()); + + collectionRefs = { + users: { + collectionName: 'users', + collectionDisplayName: 'Users', + primaryKeyFields: ['id'], + fields: [ + { fieldName: 'id', displayName: 'id', type: 'Number', isRelationship: false }, + { fieldName: 'name', displayName: 'name', type: 'String', isRelationship: false }, + ], + actions: [ + { name: 'sendEmail', displayName: 'Send Email' }, + { name: 'archive', displayName: 'Archive' }, + ], + }, + orders: { + collectionName: 'orders', + collectionDisplayName: 'Orders', + primaryKeyFields: ['tenantId', 'orderId'], + fields: [ + { fieldName: 'tenantId', displayName: 'Tenant', type: 'Number', isRelationship: false }, + { fieldName: 'orderId', displayName: 'Order', type: 'Number', isRelationship: false }, + ], + actions: [], + }, + posts: { + collectionName: 'posts', + collectionDisplayName: 'Posts', + primaryKeyFields: ['id'], + fields: [ + { fieldName: 'id', displayName: 'id', type: 'Number', isRelationship: false }, + { fieldName: 'title', displayName: 'title', type: 'String', isRelationship: false }, + ], + actions: [], + }, + }; + + port = new AgentClientAgentPort({ client, collectionRefs }); + }); + + describe('getRecord', () => { + it('should return a RecordData for a simple PK', async () => { + mockCollection.list.mockResolvedValue([{ id: 42, name: 'Alice' }]); + + const result = await port.getRecord('users', [42]); + + expect(mockCollection.list).toHaveBeenCalledWith({ + filters: { field: 'id', operator: 'Equal', value: 42 }, + pagination: { size: 1, number: 1 }, + }); + expect(result).toEqual({ + recordId: [42], + collectionName: 'users', + collectionDisplayName: 'Users', + primaryKeyFields: ['id'], + fields: collectionRefs.users.fields, + actions: collectionRefs.users.actions, + values: { id: 42, name: 'Alice' }, + }); + }); + + it('should build a composite And filter for composite PKs', async () => { + mockCollection.list.mockResolvedValue([{ tenantId: 1, orderId: 2 }]); + + await port.getRecord('orders', [1, 2]); + + expect(mockCollection.list).toHaveBeenCalledWith({ + filters: { + aggregator: 'And', + conditions: [ + { field: 'tenantId', operator: 'Equal', value: 1 }, + { field: 'orderId', operator: 'Equal', value: 2 }, + ], + }, + pagination: { size: 1, number: 1 }, + }); + }); + + it('should throw a RecordNotFoundError when no record is found', async () => { + mockCollection.list.mockResolvedValue([]); + + await expect(port.getRecord('users', [999])).rejects.toThrow(RecordNotFoundError); + }); + + it('should fallback to pk field "id" when collection is unknown', async () => { + mockCollection.list.mockResolvedValue([{ id: 1 }]); + + const result = await port.getRecord('unknown', [1]); + + expect(mockCollection.list).toHaveBeenCalledWith( + expect.objectContaining({ + filters: { field: 'id', operator: 'Equal', value: 1 }, + }), + ); + expect(result.collectionName).toBe('unknown'); + expect(result.fields).toEqual([]); + }); + }); + + describe('updateRecord', () => { + it('should call update with pipe-encoded id and return a RecordData', async () => { + mockCollection.update.mockResolvedValue({ id: 42, name: 'Bob' }); + + const result = await port.updateRecord('users', [42], { name: 'Bob' }); + + expect(mockCollection.update).toHaveBeenCalledWith('42', { name: 'Bob' }); + expect(result).toEqual({ + recordId: [42], + collectionName: 'users', + collectionDisplayName: 'Users', + primaryKeyFields: ['id'], + fields: collectionRefs.users.fields, + actions: collectionRefs.users.actions, + values: { id: 42, name: 'Bob' }, + }); + }); + + it('should encode composite PK to pipe format for update', async () => { + mockCollection.update.mockResolvedValue({ tenantId: 1, orderId: 2 }); + + await port.updateRecord('orders', [1, 2], { status: 'done' }); + + expect(mockCollection.update).toHaveBeenCalledWith('1|2', { status: 'done' }); + }); + }); + + describe('getRelatedData', () => { + it('should return RecordData[] with recordId extracted from PK fields', async () => { + mockRelation.list.mockResolvedValue([ + { id: 10, title: 'Post A' }, + { id: 11, title: 'Post B' }, + ]); + + const result = await port.getRelatedData('users', [42], 'posts'); + + expect(mockCollection.relation).toHaveBeenCalledWith('posts', '42'); + expect(result).toEqual([ + { + recordId: [10], + collectionName: 'posts', + collectionDisplayName: 'Posts', + primaryKeyFields: ['id'], + fields: collectionRefs.posts.fields, + actions: collectionRefs.posts.actions, + values: { id: 10, title: 'Post A' }, + }, + { + recordId: [11], + collectionName: 'posts', + collectionDisplayName: 'Posts', + primaryKeyFields: ['id'], + fields: collectionRefs.posts.fields, + actions: collectionRefs.posts.actions, + values: { id: 11, title: 'Post B' }, + }, + ]); + }); + + it('should fallback to relationName when no CollectionRef exists', async () => { + mockRelation.list.mockResolvedValue([{ id: 1 }]); + + const result = await port.getRelatedData('users', [42], 'unknownRelation'); + + expect(result[0].collectionName).toBe('unknownRelation'); + expect(result[0].recordId).toEqual([1]); + }); + + it('should return an empty array when no related data exists', async () => { + mockRelation.list.mockResolvedValue([]); + + expect(await port.getRelatedData('users', [42], 'posts')).toEqual([]); + }); + }); + + describe('getActions', () => { + it('should return ActionRef[] from CollectionRef', async () => { + expect(await port.getActions('users')).toEqual([ + { name: 'sendEmail', displayName: 'Send Email' }, + { name: 'archive', displayName: 'Archive' }, + ]); + }); + + it('should return an empty array for an unknown collection', async () => { + expect(await port.getActions('unknown')).toEqual([]); + }); + }); + + describe('executeAction', () => { + it('should encode recordIds to pipe format and call execute', async () => { + mockAction.execute.mockResolvedValue({ success: 'done' }); + + const result = await port.executeAction('users', 'sendEmail', [[1], [2]]); + + expect(mockCollection.action).toHaveBeenCalledWith('sendEmail', { recordIds: ['1', '2'] }); + expect(result).toEqual({ success: 'done' }); + }); + + it('should propagate errors from action execution', async () => { + mockAction.execute.mockRejectedValue(new Error('Action failed')); + + await expect(port.executeAction('users', 'sendEmail', [[1]])).rejects.toThrow( + 'Action failed', + ); + }); + }); +}); From 0ebae51e820be356f7e16a01a8bcb929799c8ff8 Mon Sep 17 00:00:00 2001 From: scra Date: Wed, 18 Mar 2026 16:38:22 +0100 Subject: [PATCH 07/18] feat(ai-proxy): add programmatic API to aiClient (#1492) --- packages/ai-proxy/src/ai-client.ts | 65 +++ .../ai-proxy/src/create-base-chat-model.ts | 26 ++ packages/ai-proxy/src/get-ai-configuration.ts | 28 ++ packages/ai-proxy/src/index.ts | 2 + packages/ai-proxy/src/router.ts | 43 +- .../src/validate-ai-configurations.ts | 12 + packages/ai-proxy/test/ai-client.test.ts | 413 ++++++++++++++++++ .../test/create-base-chat-model.test.ts | 118 +++++ .../test/get-ai-configuration.test.ts | 57 +++ 9 files changed, 730 insertions(+), 34 deletions(-) create mode 100644 packages/ai-proxy/src/ai-client.ts create mode 100644 packages/ai-proxy/src/create-base-chat-model.ts create mode 100644 packages/ai-proxy/src/get-ai-configuration.ts create mode 100644 packages/ai-proxy/src/validate-ai-configurations.ts create mode 100644 packages/ai-proxy/test/ai-client.test.ts create mode 100644 packages/ai-proxy/test/create-base-chat-model.test.ts create mode 100644 packages/ai-proxy/test/get-ai-configuration.test.ts diff --git a/packages/ai-proxy/src/ai-client.ts b/packages/ai-proxy/src/ai-client.ts new file mode 100644 index 0000000000..4a3beeda53 --- /dev/null +++ b/packages/ai-proxy/src/ai-client.ts @@ -0,0 +1,65 @@ +import type { McpConfiguration } from './mcp-client'; +import type { AiConfiguration } from './provider'; +import type { Logger } from '@forestadmin/datasource-toolkit'; +import type { BaseChatModel } from '@langchain/core/language_models/chat_models'; + +import { createBaseChatModel } from './create-base-chat-model'; +import { AINotConfiguredError } from './errors'; +import getAiConfiguration from './get-ai-configuration'; +import McpClient from './mcp-client'; +import validateAiConfigurations from './validate-ai-configurations'; + +// eslint-disable-next-line import/prefer-default-export +export class AiClient { + private readonly aiConfigurations: AiConfiguration[]; + private readonly logger?: Logger; + private readonly modelCache = new Map(); + private mcpClient?: McpClient; + + constructor(params?: { aiConfigurations?: AiConfiguration[]; logger?: Logger }) { + this.aiConfigurations = params?.aiConfigurations ?? []; + this.logger = params?.logger; + + validateAiConfigurations(this.aiConfigurations); + } + + getModel(aiName?: string): BaseChatModel { + const config = getAiConfiguration(this.aiConfigurations, aiName, this.logger); + if (!config) throw new AINotConfiguredError(); + + const cached = this.modelCache.get(config.name); + if (cached) return cached; + + const model = createBaseChatModel(config); + this.modelCache.set(config.name, model); + + return model; + } + + async loadRemoteTools(mcpConfig: McpConfiguration): Promise { + await this.closeMcpClient('Error closing previous MCP connection'); + + const newClient = new McpClient(mcpConfig, this.logger); + const tools = await newClient.loadTools(); + this.mcpClient = newClient; + + return tools; + } + + async closeConnections(): Promise { + await this.closeMcpClient('Error during MCP connection cleanup'); + } + + private async closeMcpClient(errorMessage: string): Promise { + if (!this.mcpClient) return; + + try { + await this.mcpClient.closeConnections(); + } catch (error) { + const err = error instanceof Error ? error : new Error(String(error)); + this.logger?.('Error', errorMessage, err); + } finally { + this.mcpClient = undefined; + } + } +} diff --git a/packages/ai-proxy/src/create-base-chat-model.ts b/packages/ai-proxy/src/create-base-chat-model.ts new file mode 100644 index 0000000000..78d97ea5d1 --- /dev/null +++ b/packages/ai-proxy/src/create-base-chat-model.ts @@ -0,0 +1,26 @@ +import type { AiConfiguration } from './provider'; +import type { BaseChatModel } from '@langchain/core/language_models/chat_models'; + +import { ChatAnthropic } from '@langchain/anthropic'; +import { ChatOpenAI } from '@langchain/openai'; + +import { AIBadRequestError } from './errors'; + +// eslint-disable-next-line import/prefer-default-export +export function createBaseChatModel(config: AiConfiguration): BaseChatModel { + if (config.provider === 'openai') { + const { provider, name, ...opts } = config; + + return new ChatOpenAI({ maxRetries: 0, ...opts }); + } + + if (config.provider === 'anthropic') { + const { provider, name, model, ...opts } = config; + + return new ChatAnthropic({ maxRetries: 0, ...opts, model }); + } + + throw new AIBadRequestError( + `Unsupported AI provider '${(config as { provider: string }).provider}'.`, + ); +} diff --git a/packages/ai-proxy/src/get-ai-configuration.ts b/packages/ai-proxy/src/get-ai-configuration.ts new file mode 100644 index 0000000000..ee43acb5b6 --- /dev/null +++ b/packages/ai-proxy/src/get-ai-configuration.ts @@ -0,0 +1,28 @@ +import type { AiConfiguration } from './provider'; +import type { Logger } from '@forestadmin/datasource-toolkit'; + +export default function getAiConfiguration( + aiConfigurations: AiConfiguration[], + aiName?: string, + logger?: Logger, +): AiConfiguration | null { + if (aiConfigurations.length === 0) return null; + + if (aiName) { + const config = aiConfigurations.find(c => c.name === aiName); + + if (!config) { + const fallback = aiConfigurations[0]; + logger?.( + 'Warn', + `AI configuration '${aiName}' not found. Falling back to '${fallback.name}' (provider: ${fallback.provider}, model: ${fallback.model})`, + ); + + return fallback; + } + + return config; + } + + return aiConfigurations[0]; +} diff --git a/packages/ai-proxy/src/index.ts b/packages/ai-proxy/src/index.ts index 5dd913d5d4..c355e0f9bb 100644 --- a/packages/ai-proxy/src/index.ts +++ b/packages/ai-proxy/src/index.ts @@ -3,8 +3,10 @@ import type { McpConfiguration } from './mcp-client'; import McpConfigChecker from './mcp-config-checker'; export { createAiProvider } from './create-ai-provider'; +export { createBaseChatModel } from './create-base-chat-model'; export { default as ProviderDispatcher } from './provider-dispatcher'; export * from './provider-dispatcher'; +export * from './ai-client'; export * from './remote-tools'; export * from './router'; export * from './mcp-client'; diff --git a/packages/ai-proxy/src/router.ts b/packages/ai-proxy/src/router.ts index 8424a35bcb..807885ab76 100644 --- a/packages/ai-proxy/src/router.ts +++ b/packages/ai-proxy/src/router.ts @@ -5,12 +5,13 @@ import type { RouteArgs } from './schemas/route'; import type { Logger } from '@forestadmin/datasource-toolkit'; import type { z } from 'zod'; -import { AIBadRequestError, AIModelNotSupportedError } from './errors'; +import { AIBadRequestError } from './errors'; +import getAiConfiguration from './get-ai-configuration'; import McpClient from './mcp-client'; import ProviderDispatcher from './provider-dispatcher'; import { RemoteTools } from './remote-tools'; import { routeArgsSchema } from './schemas/route'; -import isModelSupportingTools from './supported-models'; +import validateAiConfigurations from './validate-ai-configurations'; export type { AiQueryArgs, @@ -40,15 +41,7 @@ export class Router { this.localToolsApiKeys = params?.localToolsApiKeys; this.logger = params?.logger; - this.validateConfigurations(); - } - - private validateConfigurations(): void { - for (const config of this.aiConfigurations) { - if (!isModelSupportingTools(config.model, config.provider)) { - throw new AIModelNotSupportedError(config.model); - } - } + validateAiConfigurations(this.aiConfigurations); } /** @@ -82,7 +75,11 @@ export class Router { switch (validatedArgs.route) { case 'ai-query': { - const aiConfiguration = this.getAiConfiguration(validatedArgs.query?.['ai-name']); + const aiConfiguration = getAiConfiguration( + this.aiConfigurations, + validatedArgs.query?.['ai-name'], + this.logger, + ); return await new ProviderDispatcher(aiConfiguration, remoteTools).dispatch( validatedArgs.body, @@ -141,26 +138,4 @@ export class Router { }) .join('; '); } - - private getAiConfiguration(aiName?: string): AiConfiguration | null { - if (this.aiConfigurations.length === 0) return null; - - if (aiName) { - const config = this.aiConfigurations.find(c => c.name === aiName); - - if (!config) { - const fallback = this.aiConfigurations[0]; - this.logger?.( - 'Warn', - `AI configuration '${aiName}' not found. Falling back to '${fallback.name}' (provider: ${fallback.provider}, model: ${fallback.model})`, - ); - - return fallback; - } - - return config; - } - - return this.aiConfigurations[0]; - } } diff --git a/packages/ai-proxy/src/validate-ai-configurations.ts b/packages/ai-proxy/src/validate-ai-configurations.ts new file mode 100644 index 0000000000..150234207e --- /dev/null +++ b/packages/ai-proxy/src/validate-ai-configurations.ts @@ -0,0 +1,12 @@ +import type { AiConfiguration } from './provider'; + +import { AIModelNotSupportedError } from './errors'; +import isModelSupportingTools from './supported-models'; + +export default function validateAiConfigurations(aiConfigurations: AiConfiguration[]): void { + for (const config of aiConfigurations) { + if (!isModelSupportingTools(config.model, config.provider)) { + throw new AIModelNotSupportedError(config.model); + } + } +} diff --git a/packages/ai-proxy/test/ai-client.test.ts b/packages/ai-proxy/test/ai-client.test.ts new file mode 100644 index 0000000000..6c6929dd2c --- /dev/null +++ b/packages/ai-proxy/test/ai-client.test.ts @@ -0,0 +1,413 @@ +import type { Logger } from '@forestadmin/datasource-toolkit'; +import type { BaseChatModel } from '@langchain/core/language_models/chat_models'; + +import { AIModelNotSupportedError, AINotConfiguredError, AiClient } from '../src'; +import McpClient from '../src/mcp-client'; + +jest.mock('../src/mcp-client', () => { + return jest.fn().mockImplementation(() => ({ + loadTools: jest.fn().mockResolvedValue([]), + closeConnections: jest.fn(), + })); +}); + +const MockedMcpClient = McpClient as jest.MockedClass; + +const createBaseChatModelMock = jest.fn().mockReturnValue({} as BaseChatModel); +jest.mock('../src/create-base-chat-model', () => ({ + createBaseChatModel: (...args: unknown[]) => createBaseChatModelMock(...args), +})); + +describe('Model validation', () => { + it('throws AIModelNotSupportedError for unsupported models', () => { + expect( + () => + new AiClient({ + aiConfigurations: [ + { name: 'test', provider: 'openai', apiKey: 'dev', model: 'gpt-4' }, + ], + }), + ).toThrow(AIModelNotSupportedError); + }); + + it('accepts supported models', () => { + expect( + () => + new AiClient({ + aiConfigurations: [ + { name: 'test', provider: 'openai', apiKey: 'dev', model: 'gpt-4o' }, + ], + }), + ).not.toThrow(); + }); +}); + +describe('getModel', () => { + beforeEach(() => { + jest.clearAllMocks(); + }); + + it('returns a BaseChatModel by calling createBaseChatModel', () => { + const fakeModel = { fake: true } as unknown as BaseChatModel; + createBaseChatModelMock.mockReturnValue(fakeModel); + + const client = new AiClient({ + aiConfigurations: [{ name: 'gpt4', provider: 'openai', apiKey: 'dev', model: 'gpt-4o' }], + }); + + const result = client.getModel('gpt4'); + + expect(createBaseChatModelMock).toHaveBeenCalledWith( + expect.objectContaining({ name: 'gpt4', provider: 'openai', model: 'gpt-4o' }), + ); + expect(result).toBe(fakeModel); + }); + + it('returns cached instance on second call with same name', () => { + const fakeModel = { fake: true } as unknown as BaseChatModel; + createBaseChatModelMock.mockReturnValue(fakeModel); + + const client = new AiClient({ + aiConfigurations: [{ name: 'gpt4', provider: 'openai', apiKey: 'dev', model: 'gpt-4o' }], + }); + + const first = client.getModel('gpt4'); + const second = client.getModel('gpt4'); + + expect(first).toBe(second); + expect(createBaseChatModelMock).toHaveBeenCalledTimes(1); + }); + + it('uses first configuration when aiName is not provided', () => { + const fakeModel = { fake: true } as unknown as BaseChatModel; + createBaseChatModelMock.mockReturnValue(fakeModel); + + const client = new AiClient({ + aiConfigurations: [ + { name: 'gpt4', provider: 'openai', apiKey: 'dev', model: 'gpt-4o' }, + { name: 'gpt4mini', provider: 'openai', apiKey: 'dev', model: 'gpt-4o-mini' }, + ], + }); + + client.getModel(); + + expect(createBaseChatModelMock).toHaveBeenCalledWith( + expect.objectContaining({ name: 'gpt4', model: 'gpt-4o' }), + ); + }); + + it('throws AINotConfiguredError when aiConfigurations is empty', () => { + const client = new AiClient({}); + + expect(() => client.getModel()).toThrow(AINotConfiguredError); + }); + + it('throws AINotConfiguredError when constructed with no arguments', () => { + const client = new AiClient(); + + expect(() => client.getModel()).toThrow(AINotConfiguredError); + }); + + it('creates separate cached instances for different AI names', () => { + const fakeModel1 = { id: 1 } as unknown as BaseChatModel; + const fakeModel2 = { id: 2 } as unknown as BaseChatModel; + createBaseChatModelMock.mockReturnValueOnce(fakeModel1).mockReturnValueOnce(fakeModel2); + + const client = new AiClient({ + aiConfigurations: [ + { name: 'gpt4', provider: 'openai', apiKey: 'dev', model: 'gpt-4o' }, + { name: 'gpt4mini', provider: 'openai', apiKey: 'dev', model: 'gpt-4o-mini' }, + ], + }); + + const result1 = client.getModel('gpt4'); + const result2 = client.getModel('gpt4mini'); + + expect(result1).not.toBe(result2); + expect(createBaseChatModelMock).toHaveBeenCalledTimes(2); + }); + + it('falls back to first config and caches by resolved name', () => { + const fakeModel = { fake: true } as unknown as BaseChatModel; + createBaseChatModelMock.mockReturnValue(fakeModel); + const mockLogger = jest.fn(); + + const client = new AiClient({ + aiConfigurations: [{ name: 'gpt4', provider: 'openai', apiKey: 'dev', model: 'gpt-4o' }], + logger: mockLogger, + }); + + const result = client.getModel('non-existent'); + + expect(mockLogger).toHaveBeenCalledWith( + 'Warn', + expect.stringContaining("AI configuration 'non-existent' not found"), + ); + expect(createBaseChatModelMock).toHaveBeenCalledWith( + expect.objectContaining({ name: 'gpt4' }), + ); + expect(result).toBe(fakeModel); + }); +}); + +describe('loadRemoteTools', () => { + beforeEach(() => { + jest.clearAllMocks(); + }); + + it('creates an McpClient and returns loaded tools', async () => { + const fakeTools = [{ name: 'tool1' }]; + jest.mocked(McpClient).mockImplementation( + () => + ({ + loadTools: jest.fn().mockResolvedValue(fakeTools), + closeConnections: jest.fn(), + } as unknown as McpClient), + ); + + const client = new AiClient({}); + const mcpConfig = { configs: { server1: { command: 'test', args: [] } } }; + + const result = await client.loadRemoteTools(mcpConfig); + + expect(MockedMcpClient).toHaveBeenCalledWith(mcpConfig, undefined); + expect(result).toBe(fakeTools); + }); + + it('closes previous client before creating a new one', async () => { + const closeConnectionsMock1 = jest.fn(); + const closeConnectionsMock2 = jest.fn(); + + jest + .mocked(McpClient) + .mockImplementationOnce( + () => + ({ + loadTools: jest.fn().mockResolvedValue([]), + closeConnections: closeConnectionsMock1, + } as unknown as McpClient), + ) + .mockImplementationOnce( + () => + ({ + loadTools: jest.fn().mockResolvedValue([]), + closeConnections: closeConnectionsMock2, + } as unknown as McpClient), + ); + + const client = new AiClient({}); + const mcpConfig = { configs: { server1: { command: 'test', args: [] } } }; + + await client.loadRemoteTools(mcpConfig); + await client.loadRemoteTools(mcpConfig); + + expect(closeConnectionsMock1).toHaveBeenCalledTimes(1); + expect(MockedMcpClient).toHaveBeenCalledTimes(2); + }); + + it('passes the logger to McpClient', async () => { + const customLogger: Logger = jest.fn(); + jest.mocked(McpClient).mockImplementation( + () => + ({ + loadTools: jest.fn().mockResolvedValue([]), + closeConnections: jest.fn(), + } as unknown as McpClient), + ); + + const client = new AiClient({ logger: customLogger }); + const mcpConfig = { configs: { server1: { command: 'test', args: [] } } }; + + await client.loadRemoteTools(mcpConfig); + + expect(MockedMcpClient).toHaveBeenCalledWith(mcpConfig, customLogger); + }); + + it('still creates a new client when closing the previous one fails', async () => { + const mockLogger = jest.fn(); + const closeError = new Error('Close failed'); + const fakeTools = [{ name: 'tool1' }]; + + jest + .mocked(McpClient) + .mockImplementationOnce( + () => + ({ + loadTools: jest.fn().mockResolvedValue([]), + closeConnections: jest.fn().mockRejectedValue(closeError), + } as unknown as McpClient), + ) + .mockImplementationOnce( + () => + ({ + loadTools: jest.fn().mockResolvedValue(fakeTools), + closeConnections: jest.fn(), + } as unknown as McpClient), + ); + + const client = new AiClient({ logger: mockLogger }); + const mcpConfig = { configs: { server1: { command: 'test', args: [] } } }; + + await client.loadRemoteTools(mcpConfig); + const result = await client.loadRemoteTools(mcpConfig); + + expect(result).toBe(fakeTools); + expect(MockedMcpClient).toHaveBeenCalledTimes(2); + expect(mockLogger).toHaveBeenCalledWith( + 'Error', + 'Error closing previous MCP connection', + closeError, + ); + }); + + it('wraps non-Error thrown values when closing previous client fails', async () => { + const mockLogger = jest.fn(); + + jest + .mocked(McpClient) + .mockImplementationOnce( + () => + ({ + loadTools: jest.fn().mockResolvedValue([]), + closeConnections: jest.fn().mockRejectedValue('string error'), + } as unknown as McpClient), + ) + .mockImplementationOnce( + () => + ({ + loadTools: jest.fn().mockResolvedValue([]), + closeConnections: jest.fn(), + } as unknown as McpClient), + ); + + const client = new AiClient({ logger: mockLogger }); + const mcpConfig = { configs: { server1: { command: 'test', args: [] } } }; + + await client.loadRemoteTools(mcpConfig); + await client.loadRemoteTools(mcpConfig); + + expect(mockLogger).toHaveBeenCalledWith( + 'Error', + 'Error closing previous MCP connection', + expect.objectContaining({ message: 'string error' }), + ); + }); + + it('does not store mcpClient reference when loadTools fails', async () => { + const loadToolsError = new Error('loadTools failed'); + + jest.mocked(McpClient).mockImplementation( + () => + ({ + loadTools: jest.fn().mockRejectedValue(loadToolsError), + closeConnections: jest.fn(), + } as unknown as McpClient), + ); + + const client = new AiClient({}); + const mcpConfig = { configs: { server1: { command: 'test', args: [] } } }; + + await expect(client.loadRemoteTools(mcpConfig)).rejects.toThrow(loadToolsError); + + // closeConnections should be a no-op since mcpClient was never stored + await expect(client.closeConnections()).resolves.toBeUndefined(); + }); +}); + +describe('closeConnections', () => { + beforeEach(() => { + jest.clearAllMocks(); + }); + + it('closes the McpClient', async () => { + const closeConnectionsMock = jest.fn(); + jest.mocked(McpClient).mockImplementation( + () => + ({ + loadTools: jest.fn().mockResolvedValue([]), + closeConnections: closeConnectionsMock, + } as unknown as McpClient), + ); + + const client = new AiClient({}); + await client.loadRemoteTools({ configs: { server1: { command: 'test', args: [] } } }); + + await client.closeConnections(); + + expect(closeConnectionsMock).toHaveBeenCalledTimes(1); + }); + + it('is a no-op when no McpClient exists', async () => { + const client = new AiClient({}); + + await expect(client.closeConnections()).resolves.toBeUndefined(); + }); + + it('logs error and clears reference when closeConnections throws', async () => { + const mockLogger = jest.fn(); + const closeError = new Error('close failed'); + jest.mocked(McpClient).mockImplementation( + () => + ({ + loadTools: jest.fn().mockResolvedValue([]), + closeConnections: jest.fn().mockRejectedValue(closeError), + } as unknown as McpClient), + ); + + const client = new AiClient({ logger: mockLogger }); + await client.loadRemoteTools({ configs: { server1: { command: 'test', args: [] } } }); + + // Should not throw — error is caught and logged + await client.closeConnections(); + + expect(mockLogger).toHaveBeenCalledWith( + 'Error', + 'Error during MCP connection cleanup', + closeError, + ); + + // Second call should be a no-op (reference cleared in finally block) + await expect(client.closeConnections()).resolves.toBeUndefined(); + }); + + it('wraps non-Error thrown values during cleanup', async () => { + const mockLogger = jest.fn(); + jest.mocked(McpClient).mockImplementation( + () => + ({ + loadTools: jest.fn().mockResolvedValue([]), + closeConnections: jest.fn().mockRejectedValue('string error'), + } as unknown as McpClient), + ); + + const client = new AiClient({ logger: mockLogger }); + await client.loadRemoteTools({ configs: { server1: { command: 'test', args: [] } } }); + + await client.closeConnections(); + + expect(mockLogger).toHaveBeenCalledWith( + 'Error', + 'Error during MCP connection cleanup', + expect.objectContaining({ message: 'string error' }), + ); + }); + + it('is safe to call twice', async () => { + const closeConnectionsMock = jest.fn(); + jest.mocked(McpClient).mockImplementation( + () => + ({ + loadTools: jest.fn().mockResolvedValue([]), + closeConnections: closeConnectionsMock, + } as unknown as McpClient), + ); + + const client = new AiClient({}); + await client.loadRemoteTools({ configs: { server1: { command: 'test', args: [] } } }); + + await client.closeConnections(); + await client.closeConnections(); + + expect(closeConnectionsMock).toHaveBeenCalledTimes(1); + }); +}); diff --git a/packages/ai-proxy/test/create-base-chat-model.test.ts b/packages/ai-proxy/test/create-base-chat-model.test.ts new file mode 100644 index 0000000000..6da3e41af3 --- /dev/null +++ b/packages/ai-proxy/test/create-base-chat-model.test.ts @@ -0,0 +1,118 @@ +import { ChatAnthropic } from '@langchain/anthropic'; +import { ChatOpenAI } from '@langchain/openai'; + +import { createBaseChatModel } from '../src/create-base-chat-model'; +import { AIBadRequestError } from '../src/errors'; + +jest.mock('@langchain/openai', () => ({ + ChatOpenAI: jest.fn(), +})); + +jest.mock('@langchain/anthropic', () => ({ + ChatAnthropic: jest.fn(), +})); + +describe('createBaseChatModel', () => { + beforeEach(() => { + jest.clearAllMocks(); + }); + + it('creates a ChatOpenAI for openai provider with maxRetries: 0', () => { + const config = { + name: 'gpt4', + provider: 'openai' as const, + apiKey: 'test-key', + model: 'gpt-4o', + }; + + createBaseChatModel(config); + + expect(ChatOpenAI).toHaveBeenCalledWith({ + maxRetries: 0, + apiKey: 'test-key', + model: 'gpt-4o', + }); + }); + + it('forwards extra options like temperature to ChatOpenAI', () => { + const config = { + name: 'gpt4', + provider: 'openai' as const, + apiKey: 'test-key', + model: 'gpt-4o', + temperature: 0.7, + }; + + createBaseChatModel(config); + + expect(ChatOpenAI).toHaveBeenCalledWith({ + maxRetries: 0, + apiKey: 'test-key', + model: 'gpt-4o', + temperature: 0.7, + }); + }); + + it('forwards extra options like temperature to ChatAnthropic', () => { + const config = { + name: 'claude', + provider: 'anthropic' as const, + apiKey: 'test-key', + model: 'claude-3-5-sonnet-latest' as const, + temperature: 0.5, + }; + + createBaseChatModel(config); + + expect(ChatAnthropic).toHaveBeenCalledWith({ + maxRetries: 0, + apiKey: 'test-key', + model: 'claude-3-5-sonnet-latest', + temperature: 0.5, + }); + }); + + it('does not pass __includeRawResponse for openai provider', () => { + const config = { + name: 'gpt4', + provider: 'openai' as const, + apiKey: 'test-key', + model: 'gpt-4o', + }; + + createBaseChatModel(config); + + const passedArgs = (ChatOpenAI as unknown as jest.Mock).mock.calls[0][0]; + expect(passedArgs).not.toHaveProperty('__includeRawResponse'); + }); + + it('creates a ChatAnthropic for anthropic provider with maxRetries: 0', () => { + const config = { + name: 'claude', + provider: 'anthropic' as const, + apiKey: 'test-key', + model: 'claude-3-5-sonnet-latest' as const, + }; + + createBaseChatModel(config); + + expect(ChatAnthropic).toHaveBeenCalledWith({ + maxRetries: 0, + apiKey: 'test-key', + model: 'claude-3-5-sonnet-latest', + }); + }); + + it('throws AIBadRequestError for unsupported provider', () => { + const config = { + name: 'unknown', + provider: 'unknown-provider' as any, + model: 'some-model', + }; + + expect(() => createBaseChatModel(config)).toThrow(AIBadRequestError); + expect(() => createBaseChatModel(config)).toThrow( + "Unsupported AI provider 'unknown-provider'.", + ); + }); +}); diff --git a/packages/ai-proxy/test/get-ai-configuration.test.ts b/packages/ai-proxy/test/get-ai-configuration.test.ts new file mode 100644 index 0000000000..aee9bf2d39 --- /dev/null +++ b/packages/ai-proxy/test/get-ai-configuration.test.ts @@ -0,0 +1,57 @@ +import type { AiConfiguration } from '../src/provider'; + +import getAiConfiguration from '../src/get-ai-configuration'; + +const gpt4Config: AiConfiguration = { + name: 'gpt4', + provider: 'openai', + apiKey: 'dev', + model: 'gpt-4o', +}; + +const claudeConfig: AiConfiguration = { + name: 'claude', + provider: 'anthropic', + apiKey: 'dev', + model: 'claude-3-5-sonnet-latest', +}; + +describe('getAiConfiguration', () => { + it('returns null when aiConfigurations is empty', () => { + expect(getAiConfiguration([], 'gpt4')).toBeNull(); + }); + + it('returns null when aiConfigurations is empty and no name provided', () => { + expect(getAiConfiguration([])).toBeNull(); + }); + + it('returns the matching config when aiName matches', () => { + expect(getAiConfiguration([gpt4Config, claudeConfig], 'claude')).toBe(claudeConfig); + }); + + it('returns first config when aiName is not provided', () => { + expect(getAiConfiguration([gpt4Config, claudeConfig])).toBe(gpt4Config); + }); + + it('returns first config when aiName is undefined', () => { + expect(getAiConfiguration([gpt4Config, claudeConfig], undefined)).toBe(gpt4Config); + }); + + it('falls back to first config and logs warning when aiName not found', () => { + const logger = jest.fn(); + + const result = getAiConfiguration([gpt4Config, claudeConfig], 'non-existent', logger); + + expect(result).toBe(gpt4Config); + expect(logger).toHaveBeenCalledWith( + 'Warn', + "AI configuration 'non-existent' not found. Falling back to 'gpt4' (provider: openai, model: gpt-4o)", + ); + }); + + it('does not crash when logger is undefined and aiName not found', () => { + const result = getAiConfiguration([gpt4Config], 'non-existent'); + + expect(result).toBe(gpt4Config); + }); +}); From c25a953b1bd9ad54656b3f0cbf7fe09f66027abe Mon Sep 17 00:00:00 2001 From: Matthieu Date: Wed, 18 Mar 2026 17:52:00 +0100 Subject: [PATCH 08/18] feat(workflow-executor): implement WorkflowPort adapter using forestadmin-client (#1498) --- packages/forestadmin-client/src/index.ts | 1 + packages/workflow-executor/package.json | 1 + .../adapters/forest-server-workflow-port.ts | 53 +++++++++ packages/workflow-executor/src/index.ts | 1 + .../src/ports/workflow-port.ts | 2 +- .../forest-server-workflow-port.test.ts | 105 ++++++++++++++++++ 6 files changed, 162 insertions(+), 1 deletion(-) create mode 100644 packages/workflow-executor/src/adapters/forest-server-workflow-port.ts create mode 100644 packages/workflow-executor/test/adapters/forest-server-workflow-port.test.ts diff --git a/packages/forestadmin-client/src/index.ts b/packages/forestadmin-client/src/index.ts index 4957d2c573..1d50616c52 100644 --- a/packages/forestadmin-client/src/index.ts +++ b/packages/forestadmin-client/src/index.ts @@ -90,6 +90,7 @@ export { default as ForestAdminClientWithCache } from './forest-admin-client-wit export { default as buildApplicationServices } from './build-application-services'; export { HttpOptions } from './utils/http-options'; export { default as ForestHttpApi } from './permissions/forest-http-api'; +export { default as ServerUtils } from './utils/server'; // export is necessary for the agent-generator package export { default as SchemaService, SchemaServiceOptions } from './schema'; export { default as ActivityLogsService, ActivityLogsOptions } from './activity-logs'; diff --git a/packages/workflow-executor/package.json b/packages/workflow-executor/package.json index fafc832c59..3138b9a5d9 100644 --- a/packages/workflow-executor/package.json +++ b/packages/workflow-executor/package.json @@ -24,6 +24,7 @@ }, "dependencies": { "@forestadmin/agent-client": "1.4.13", + "@forestadmin/forestadmin-client": "1.37.17", "@langchain/core": "1.1.33", "zod": "4.3.6" } diff --git a/packages/workflow-executor/src/adapters/forest-server-workflow-port.ts b/packages/workflow-executor/src/adapters/forest-server-workflow-port.ts new file mode 100644 index 0000000000..e804e01cfa --- /dev/null +++ b/packages/workflow-executor/src/adapters/forest-server-workflow-port.ts @@ -0,0 +1,53 @@ +import type { McpConfiguration, WorkflowPort } from '../ports/workflow-port'; +import type { PendingStepExecution } from '../types/execution'; +import type { CollectionRef } from '../types/record'; +import type { StepHistory } from '../types/step-history'; +import type { HttpOptions } from '@forestadmin/forestadmin-client'; + +import { ServerUtils } from '@forestadmin/forestadmin-client'; + +// TODO: finalize route paths with the team — these are placeholders +const ROUTES = { + pendingStepExecutions: '/liana/v1/workflow-step-executions/pending', + updateStepExecution: (runId: string) => `/liana/v1/workflow-step-executions/${runId}/complete`, + collectionRef: (collectionName: string) => `/liana/v1/collections/${collectionName}`, + mcpServerConfigs: '/liana/mcp-server-configs-with-details', +}; + +export default class ForestServerWorkflowPort implements WorkflowPort { + private readonly options: HttpOptions; + + constructor(params: { envSecret: string; forestServerUrl: string }) { + this.options = { envSecret: params.envSecret, forestServerUrl: params.forestServerUrl }; + } + + async getPendingStepExecutions(): Promise { + return ServerUtils.query( + this.options, + 'get', + ROUTES.pendingStepExecutions, + ); + } + + async updateStepExecution(runId: string, stepHistory: StepHistory): Promise { + await ServerUtils.query( + this.options, + 'post', + ROUTES.updateStepExecution(runId), + {}, + stepHistory, + ); + } + + async getCollectionRef(collectionName: string): Promise { + return ServerUtils.query( + this.options, + 'get', + ROUTES.collectionRef(collectionName), + ); + } + + async getMcpServerConfigs(): Promise { + return ServerUtils.query(this.options, 'get', ROUTES.mcpServerConfigs); + } +} diff --git a/packages/workflow-executor/src/index.ts b/packages/workflow-executor/src/index.ts index c434071d83..2918b36c45 100644 --- a/packages/workflow-executor/src/index.ts +++ b/packages/workflow-executor/src/index.ts @@ -42,3 +42,4 @@ export { export { default as BaseStepExecutor } from './executors/base-step-executor'; export { default as ConditionStepExecutor } from './executors/condition-step-executor'; export { default as AgentClientAgentPort } from './adapters/agent-client-agent-port'; +export { default as ForestServerWorkflowPort } from './adapters/forest-server-workflow-port'; diff --git a/packages/workflow-executor/src/ports/workflow-port.ts b/packages/workflow-executor/src/ports/workflow-port.ts index c36ea41d8e..93951f6f02 100644 --- a/packages/workflow-executor/src/ports/workflow-port.ts +++ b/packages/workflow-executor/src/ports/workflow-port.ts @@ -9,7 +9,7 @@ export type McpConfiguration = unknown; export interface WorkflowPort { getPendingStepExecutions(): Promise; - completeStepExecution(runId: string, stepHistory: StepHistory): Promise; + updateStepExecution(runId: string, stepHistory: StepHistory): Promise; getCollectionRef(collectionName: string): Promise; getMcpServerConfigs(): Promise; } diff --git a/packages/workflow-executor/test/adapters/forest-server-workflow-port.test.ts b/packages/workflow-executor/test/adapters/forest-server-workflow-port.test.ts new file mode 100644 index 0000000000..ff37147e74 --- /dev/null +++ b/packages/workflow-executor/test/adapters/forest-server-workflow-port.test.ts @@ -0,0 +1,105 @@ +import type { PendingStepExecution } from '../../src/types/execution'; +import type { CollectionRef } from '../../src/types/record'; +import type { StepHistory } from '../../src/types/step-history'; + +import { ServerUtils } from '@forestadmin/forestadmin-client'; + +import ForestServerWorkflowPort from '../../src/adapters/forest-server-workflow-port'; + +jest.mock('@forestadmin/forestadmin-client', () => ({ + ServerUtils: { query: jest.fn() }, +})); + +const mockQuery = ServerUtils.query as jest.Mock; + +const options = { envSecret: 'env-secret-123', forestServerUrl: 'https://api.forestadmin.com' }; + +describe('ForestServerWorkflowPort', () => { + let port: ForestServerWorkflowPort; + + beforeEach(() => { + jest.clearAllMocks(); + port = new ForestServerWorkflowPort(options); + }); + + describe('getPendingStepExecutions', () => { + it('should call the pending step executions route', async () => { + const pending: PendingStepExecution[] = []; + mockQuery.mockResolvedValue(pending); + + const result = await port.getPendingStepExecutions(); + + expect(mockQuery).toHaveBeenCalledWith( + options, + 'get', + '/liana/v1/workflow-step-executions/pending', + ); + expect(result).toBe(pending); + }); + }); + + describe('updateStepExecution', () => { + it('should post step history to the complete route', async () => { + mockQuery.mockResolvedValue(undefined); + const stepHistory: StepHistory = { + type: 'condition', + stepId: 'step-1', + stepIndex: 0, + status: 'success', + selectedOption: 'optionA', + }; + + await port.updateStepExecution('run-42', stepHistory); + + expect(mockQuery).toHaveBeenCalledWith( + options, + 'post', + '/liana/v1/workflow-step-executions/run-42/complete', + {}, + stepHistory, + ); + }); + }); + + describe('getCollectionRef', () => { + it('should fetch the collection ref by name', async () => { + const collectionRef: CollectionRef = { + collectionName: 'users', + collectionDisplayName: 'Users', + primaryKeyFields: ['id'], + fields: [], + actions: [], + }; + mockQuery.mockResolvedValue(collectionRef); + + const result = await port.getCollectionRef('users'); + + expect(mockQuery).toHaveBeenCalledWith(options, 'get', '/liana/v1/collections/users'); + expect(result).toEqual(collectionRef); + }); + }); + + describe('getMcpServerConfigs', () => { + it('should fetch mcp server configs', async () => { + const configs = [{ name: 'mcp-1' }]; + mockQuery.mockResolvedValue(configs); + + const result = await port.getMcpServerConfigs(); + + expect(mockQuery).toHaveBeenCalledWith( + options, + 'get', + '/liana/mcp-server-configs-with-details', + ); + expect(result).toEqual(configs); + }); + }); + + describe('error propagation', () => { + it('should propagate errors from ServerUtils.query', async () => { + mockQuery.mockRejectedValue(new Error('Network error')); + + await expect(port.getPendingStepExecutions()).rejects.toThrow('Network error'); + }); + }); +}); From c9877fe6bb0f4a3ad44a0561ea8be240ef0f7fce Mon Sep 17 00:00:00 2001 From: scra Date: Thu, 19 Mar 2026 15:45:17 +0100 Subject: [PATCH 09/18] feat(workflow-executor): add ReadRecordStepExecutor (#1497) --- CLAUDE.md | 5 + packages/workflow-executor/CLAUDE.md | 9 +- .../src/adapters/agent-client-agent-port.ts | 44 +- .../adapters/forest-server-workflow-port.ts | 16 +- packages/workflow-executor/src/errors.ts | 18 + .../src/executors/base-step-executor.ts | 42 +- .../src/executors/condition-step-executor.ts | 40 +- .../executors/read-record-step-executor.ts | 229 ++++++ packages/workflow-executor/src/index.ts | 31 +- .../workflow-executor/src/ports/agent-port.ts | 9 +- .../workflow-executor/src/ports/run-store.ts | 5 - .../src/ports/workflow-port.ts | 8 +- .../workflow-executor/src/types/execution.ts | 29 +- .../workflow-executor/src/types/record.ts | 26 +- .../src/types/step-definition.ts | 16 +- .../src/types/step-execution-data.ts | 65 +- .../{step-history.ts => step-outcome.ts} | 10 +- .../adapters/agent-client-agent-port.test.ts | 94 ++- .../forest-server-workflow-port.test.ts | 24 +- .../test/executors/base-step-executor.test.ts | 129 ++- .../executors/condition-step-executor.test.ts | 122 ++- .../read-record-step-executor.test.ts | 778 ++++++++++++++++++ 22 files changed, 1456 insertions(+), 293 deletions(-) create mode 100644 packages/workflow-executor/src/executors/read-record-step-executor.ts rename packages/workflow-executor/src/types/{step-history.ts => step-outcome.ts} (77%) create mode 100644 packages/workflow-executor/test/executors/read-record-step-executor.test.ts diff --git a/CLAUDE.md b/CLAUDE.md index 238598e1f2..138e9daafe 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -104,6 +104,11 @@ yarn workspace @forestadmin/agent test 5. Are edge cases handled? 6. Is the naming clear and consistent? +## Git Workflow + +The **main working branch** for workflow-executor development is `feat/prd-214-setup-workflow-executor-package`. +All feature branches for this area should be based on and PRs targeted at this branch (not `main`). + ## Linear Tickets ### MCP Setup diff --git a/packages/workflow-executor/CLAUDE.md b/packages/workflow-executor/CLAUDE.md index 2be0522bca..bff9a84141 100644 --- a/packages/workflow-executor/CLAUDE.md +++ b/packages/workflow-executor/CLAUDE.md @@ -42,10 +42,10 @@ Front ◀──▶ Orchestrator ◀──pull/push──▶ Executor ── ``` src/ -├── errors.ts # WorkflowExecutorError, MissingToolCallError, MalformedToolCallError +├── errors.ts # WorkflowExecutorError, MissingToolCallError, MalformedToolCallError, NoRecordsError, NoReadableFieldsError ├── types/ # Core type definitions (@draft) │ ├── step-definition.ts # StepType enum + step definition interfaces -│ ├── step-history.ts # Step outcome tracking types +│ ├── step-outcome.ts # Step outcome tracking types (StepOutcome, sent to orchestrator) │ ├── step-execution-data.ts # Runtime state for in-progress steps │ ├── record.ts # Record references and data types │ └── execution.ts # Top-level execution types (context, results) @@ -55,7 +55,8 @@ src/ │ └── run-store.ts # Interface for persisting run state (scoped to a run) ├── executors/ # Step executor implementations │ ├── base-step-executor.ts # Abstract base class (context injection + shared helpers) -│ └── condition-step-executor.ts # AI-powered condition step (chooses among options) +│ ├── condition-step-executor.ts # AI-powered condition step (chooses among options) +│ └── read-record-step-executor.ts # AI-powered record field reading step └── index.ts # Barrel exports ``` @@ -63,7 +64,7 @@ src/ - **Pull-based** — The executor polls for pending steps via a port interface (`WorkflowPort.getPendingStepExecutions`; polling loop not yet implemented). - **Atomic** — Each step executes in isolation. A run store (scoped per run) maintains continuity between steps. -- **Privacy** — Zero client data leaves the client's infrastructure. `StepHistory` is sent to the orchestrator and must NEVER contain client data. Privacy-sensitive information (e.g. AI reasoning) must stay in `StepExecutionData` (persisted in the RunStore, client-side only). +- **Privacy** — Zero client data leaves the client's infrastructure. `StepOutcome` is sent to the orchestrator and must NEVER contain client data. Privacy-sensitive information (e.g. AI reasoning) must stay in `StepExecutionData` (persisted in the RunStore, client-side only). - **Ports (IO injection)** — All external IO goes through injected port interfaces, keeping the core pure and testable. - **AI integration** — Uses `@langchain/core` (`BaseChatModel`, `DynamicStructuredTool`) for AI-powered steps. `ExecutionContext.model` is a `BaseChatModel`. - **No recovery/retry** — Once the executor returns a step result to the orchestrator, the step is considered executed. There is no mechanism to re-dispatch a step, so executors must NOT include recovery checks (e.g. checking the RunStore for cached results before executing). Each step executes exactly once. diff --git a/packages/workflow-executor/src/adapters/agent-client-agent-port.ts b/packages/workflow-executor/src/adapters/agent-client-agent-port.ts index 23015e8bcd..cf8949a1a6 100644 --- a/packages/workflow-executor/src/adapters/agent-client-agent-port.ts +++ b/packages/workflow-executor/src/adapters/agent-client-agent-port.ts @@ -1,5 +1,5 @@ import type { AgentPort } from '../ports/agent-port'; -import type { ActionRef, CollectionRef, RecordData } from '../types/record'; +import type { CollectionSchema } from '../types/record'; import type { RemoteAgentClient, SelectOptions } from '@forestadmin/agent-client'; import { RecordNotFoundError } from '../errors'; @@ -36,49 +36,49 @@ function extractRecordId( export default class AgentClientAgentPort implements AgentPort { private readonly client: RemoteAgentClient; - private readonly collectionRefs: Record; + private readonly collectionSchemas: Record; constructor(params: { client: RemoteAgentClient; - collectionRefs: Record; + collectionSchemas: Record; }) { this.client = params.client; - this.collectionRefs = params.collectionRefs; + this.collectionSchemas = params.collectionSchemas; } - async getRecord(collectionName: string, recordId: Array): Promise { - const ref = this.getCollectionRef(collectionName); + async getRecord(collectionName: string, recordId: Array, fieldNames?: string[]) { + const schema = this.resolveSchema(collectionName); const records = await this.client.collection(collectionName).list>({ - filters: buildPkFilter(ref.primaryKeyFields, recordId), + filters: buildPkFilter(schema.primaryKeyFields, recordId), pagination: { size: 1, number: 1 }, + ...(fieldNames?.length && { fields: fieldNames }), }); if (records.length === 0) { throw new RecordNotFoundError(collectionName, encodePk(recordId)); } - return { ...ref, recordId, values: records[0] }; + return { collectionName, recordId, values: records[0] }; } async updateRecord( collectionName: string, recordId: Array, values: Record, - ): Promise { - const ref = this.getCollectionRef(collectionName); + ) { const updatedRecord = await this.client .collection(collectionName) .update>(encodePk(recordId), values); - return { ...ref, recordId, values: updatedRecord }; + return { collectionName, recordId, values: updatedRecord }; } async getRelatedData( collectionName: string, recordId: Array, relationName: string, - ): Promise { - const relatedRef = this.getCollectionRef(relationName); + ) { + const relatedSchema = this.resolveSchema(relationName); const records = await this.client .collection(collectionName) @@ -86,18 +86,12 @@ export default class AgentClientAgentPort implements AgentPort { .list>(); return records.map(record => ({ - ...relatedRef, - recordId: extractRecordId(relatedRef.primaryKeyFields, record), + collectionName: relatedSchema.collectionName, + recordId: extractRecordId(relatedSchema.primaryKeyFields, record), values: record, })); } - async getActions(collectionName: string): Promise { - const ref = this.collectionRefs[collectionName]; - - return ref ? ref.actions : []; - } - async executeAction( collectionName: string, actionName: string, @@ -111,10 +105,10 @@ export default class AgentClientAgentPort implements AgentPort { return action.execute(); } - private getCollectionRef(collectionName: string): CollectionRef { - const ref = this.collectionRefs[collectionName]; + private resolveSchema(collectionName: string): CollectionSchema { + const schema = this.collectionSchemas[collectionName]; - if (!ref) { + if (!schema) { return { collectionName, collectionDisplayName: collectionName, @@ -124,6 +118,6 @@ export default class AgentClientAgentPort implements AgentPort { }; } - return ref; + return schema; } } diff --git a/packages/workflow-executor/src/adapters/forest-server-workflow-port.ts b/packages/workflow-executor/src/adapters/forest-server-workflow-port.ts index e804e01cfa..16037570bd 100644 --- a/packages/workflow-executor/src/adapters/forest-server-workflow-port.ts +++ b/packages/workflow-executor/src/adapters/forest-server-workflow-port.ts @@ -1,7 +1,7 @@ import type { McpConfiguration, WorkflowPort } from '../ports/workflow-port'; import type { PendingStepExecution } from '../types/execution'; -import type { CollectionRef } from '../types/record'; -import type { StepHistory } from '../types/step-history'; +import type { CollectionSchema } from '../types/record'; +import type { StepOutcome } from '../types/step-outcome'; import type { HttpOptions } from '@forestadmin/forestadmin-client'; import { ServerUtils } from '@forestadmin/forestadmin-client'; @@ -10,7 +10,7 @@ import { ServerUtils } from '@forestadmin/forestadmin-client'; const ROUTES = { pendingStepExecutions: '/liana/v1/workflow-step-executions/pending', updateStepExecution: (runId: string) => `/liana/v1/workflow-step-executions/${runId}/complete`, - collectionRef: (collectionName: string) => `/liana/v1/collections/${collectionName}`, + collectionSchema: (collectionName: string) => `/liana/v1/collections/${collectionName}`, mcpServerConfigs: '/liana/mcp-server-configs-with-details', }; @@ -29,21 +29,21 @@ export default class ForestServerWorkflowPort implements WorkflowPort { ); } - async updateStepExecution(runId: string, stepHistory: StepHistory): Promise { + async updateStepExecution(runId: string, stepOutcome: StepOutcome): Promise { await ServerUtils.query( this.options, 'post', ROUTES.updateStepExecution(runId), {}, - stepHistory, + stepOutcome, ); } - async getCollectionRef(collectionName: string): Promise { - return ServerUtils.query( + async getCollectionSchema(collectionName: string): Promise { + return ServerUtils.query( this.options, 'get', - ROUTES.collectionRef(collectionName), + ROUTES.collectionSchema(collectionName), ); } diff --git a/packages/workflow-executor/src/errors.ts b/packages/workflow-executor/src/errors.ts index d735977d4f..b835c391fa 100644 --- a/packages/workflow-executor/src/errors.ts +++ b/packages/workflow-executor/src/errors.ts @@ -27,3 +27,21 @@ export class RecordNotFoundError extends WorkflowExecutorError { super(`Record not found: collection "${collectionName}", id "${recordId}"`); } } + +export class NoRecordsError extends WorkflowExecutorError { + constructor() { + super('No records available'); + } +} + +export class NoReadableFieldsError extends WorkflowExecutorError { + constructor(collectionName: string) { + super(`No readable fields on record from collection "${collectionName}"`); + } +} + +export class NoResolvedFieldsError extends WorkflowExecutorError { + constructor(fieldNames: string[]) { + super(`None of the requested fields could be resolved: ${fieldNames.join(', ')}`); + } +} diff --git a/packages/workflow-executor/src/executors/base-step-executor.ts b/packages/workflow-executor/src/executors/base-step-executor.ts index 6c4fc76944..ed9de5cb39 100644 --- a/packages/workflow-executor/src/executors/base-step-executor.ts +++ b/packages/workflow-executor/src/executors/base-step-executor.ts @@ -1,25 +1,23 @@ import type { ExecutionContext, StepExecutionResult } from '../types/execution'; import type { StepDefinition } from '../types/step-definition'; import type { StepExecutionData } from '../types/step-execution-data'; -import type { StepHistory } from '../types/step-history'; +import type { StepOutcome } from '../types/step-outcome'; import type { AIMessage, BaseMessage } from '@langchain/core/messages'; import type { DynamicStructuredTool } from '@langchain/core/tools'; import { SystemMessage } from '@langchain/core/messages'; import { MalformedToolCallError, MissingToolCallError } from '../errors'; +import { isExecutedStepOnExecutor } from '../types/step-execution-data'; -export default abstract class BaseStepExecutor< - TStep extends StepDefinition = StepDefinition, - THistory extends StepHistory = StepHistory, -> { - protected readonly context: ExecutionContext; +export default abstract class BaseStepExecutor { + protected readonly context: ExecutionContext; - constructor(context: ExecutionContext) { + constructor(context: ExecutionContext) { this.context = context; } - abstract execute(step: TStep, stepHistory: THistory): Promise; + abstract execute(): Promise; /** * Returns a SystemMessage array summarizing previously executed steps. @@ -35,35 +33,41 @@ export default abstract class BaseStepExecutor< /** * Builds a text summary of previously executed steps for AI prompts. - * Correlates history entries (step + stepHistory pairs) with executionParams - * from the RunStore (matched by stepHistory.stepIndex). - * When no executionParams is available, falls back to StepHistory details. + * Correlates history entries (step + stepOutcome pairs) with execution data + * from the RunStore (matched by stepOutcome.stepIndex). + * When no execution data is available, falls back to StepOutcome details. */ private async summarizePreviousSteps(): Promise { const allStepExecutions = await this.context.runStore.getStepExecutions(); return this.context.history - .map(({ step, stepHistory }) => { - const execution = allStepExecutions.find(e => e.stepIndex === stepHistory.stepIndex); + .map(({ stepDefinition, stepOutcome }) => { + const execution = allStepExecutions.find(e => e.stepIndex === stepOutcome.stepIndex); - return this.buildStepSummary(step, stepHistory, execution); + return this.buildStepSummary(stepDefinition, stepOutcome, execution); }) .join('\n\n'); } private buildStepSummary( step: StepDefinition, - stepHistory: StepHistory, + stepOutcome: StepOutcome, execution: StepExecutionData | undefined, ): string { const prompt = step.prompt ?? '(no prompt)'; - const header = `Step "${step.id}" (index ${stepHistory.stepIndex}):`; + const header = `Step "${stepOutcome.stepId}" (index ${stepOutcome.stepIndex}):`; const lines = [header, ` Prompt: ${prompt}`]; - if (execution?.executionParams) { - lines.push(` Result: ${JSON.stringify(execution.executionParams)}`); + if (isExecutedStepOnExecutor(execution)) { + if (execution.executionParams !== undefined) { + lines.push(` Input: ${JSON.stringify(execution.executionParams)}`); + } + + if (execution.executionResult) { + lines.push(` Output: ${JSON.stringify(execution.executionResult)}`); + } } else { - const { stepId, stepIndex, type, ...historyDetails } = stepHistory; + const { stepId, stepIndex, type, ...historyDetails } = stepOutcome; lines.push(` History: ${JSON.stringify(historyDetails)}`); } diff --git a/packages/workflow-executor/src/executors/condition-step-executor.ts b/packages/workflow-executor/src/executors/condition-step-executor.ts index b90d47ad81..ee4a60f830 100644 --- a/packages/workflow-executor/src/executors/condition-step-executor.ts +++ b/packages/workflow-executor/src/executors/condition-step-executor.ts @@ -1,6 +1,5 @@ import type { StepExecutionResult } from '../types/execution'; import type { ConditionStepDefinition } from '../types/step-definition'; -import type { ConditionStepHistory } from '../types/step-history'; import { HumanMessage, SystemMessage } from '@langchain/core/messages'; import { DynamicStructuredTool } from '@langchain/core/tools'; @@ -36,14 +35,10 @@ const GATEWAY_SYSTEM_PROMPT = `You are an AI agent selecting the correct option - If selecting null: explain why options don't match the question - Do not refer to yourself as "I" in the response, use a passive formulation instead.`; -export default class ConditionStepExecutor extends BaseStepExecutor< - ConditionStepDefinition, - ConditionStepHistory -> { - async execute( - step: ConditionStepDefinition, - stepHistory: ConditionStepHistory, - ): Promise { +export default class ConditionStepExecutor extends BaseStepExecutor { + async execute(): Promise { + const { stepDefinition: step } = this.context; + const tool = new DynamicStructuredTool({ name: 'choose-gateway-option', description: @@ -58,7 +53,7 @@ export default class ConditionStepExecutor extends BaseStepExecutor< .nullable() .describe('The chosen option, or null if no option clearly answers the question.'), }), - func: async input => JSON.stringify(input), + func: undefined, }); const messages = [ @@ -73,8 +68,10 @@ export default class ConditionStepExecutor extends BaseStepExecutor< args = await this.invokeWithTool(messages, tool); } catch (error: unknown) { return { - stepHistory: { - ...stepHistory, + stepOutcome: { + type: 'condition', + stepId: this.context.stepId, + stepIndex: this.context.stepIndex, status: 'error', error: (error as Error).message, }, @@ -85,17 +82,30 @@ export default class ConditionStepExecutor extends BaseStepExecutor< await this.context.runStore.saveStepExecution({ type: 'condition', - stepIndex: stepHistory.stepIndex, + stepIndex: this.context.stepIndex, executionParams: { answer: selectedOption, reasoning }, executionResult: selectedOption ? { answer: selectedOption } : undefined, }); if (!selectedOption) { - return { stepHistory: { ...stepHistory, status: 'manual-decision' } }; + return { + stepOutcome: { + type: 'condition', + stepId: this.context.stepId, + stepIndex: this.context.stepIndex, + status: 'manual-decision', + }, + }; } return { - stepHistory: { ...stepHistory, status: 'success', selectedOption }, + stepOutcome: { + type: 'condition', + stepId: this.context.stepId, + stepIndex: this.context.stepIndex, + status: 'success', + selectedOption, + }, }; } } diff --git a/packages/workflow-executor/src/executors/read-record-step-executor.ts b/packages/workflow-executor/src/executors/read-record-step-executor.ts new file mode 100644 index 0000000000..e164e16909 --- /dev/null +++ b/packages/workflow-executor/src/executors/read-record-step-executor.ts @@ -0,0 +1,229 @@ +import type { StepExecutionResult } from '../types/execution'; +import type { CollectionSchema, RecordRef } from '../types/record'; +import type { AiTaskStepDefinition } from '../types/step-definition'; +import type { + FieldReadResult, + LoadRelatedRecordStepExecutionData, +} from '../types/step-execution-data'; + +import { HumanMessage, SystemMessage } from '@langchain/core/messages'; +import { DynamicStructuredTool } from '@langchain/core/tools'; +import { z } from 'zod'; + +import { + NoReadableFieldsError, + NoRecordsError, + NoResolvedFieldsError, + WorkflowExecutorError, +} from '../errors'; +import BaseStepExecutor from './base-step-executor'; + +const READ_RECORD_SYSTEM_PROMPT = `You are an AI agent reading fields from a record to answer a user request. +Select the field(s) that best answer the request. You can read one field or multiple fields at once. + +Important rules: +- Be precise: only read fields that are directly relevant to the request. +- Final answer is definitive, you won't receive any other input from the user. +- Do not refer to yourself as "I" in the response, use a passive formulation instead.`; + +export default class ReadRecordStepExecutor extends BaseStepExecutor { + private readonly schemaCache = new Map(); + + async execute(): Promise { + const { stepDefinition: step } = this.context; + const records = await this.getAvailableRecordRefs(); + + let selectedRecordRef: RecordRef; + let schema: CollectionSchema; + let fieldResults: FieldReadResult[]; + + try { + selectedRecordRef = await this.selectRecordRef(records, step.prompt); + schema = await this.getCollectionSchema(selectedRecordRef.collectionName); + const selectedDisplayNames = await this.selectFields(schema, step.prompt); + const resolvedFieldNames = selectedDisplayNames + .map( + name => + schema.fields.find(f => f.fieldName === name || f.displayName === name)?.fieldName, + ) + .filter((name): name is string => name !== undefined); + + if (resolvedFieldNames.length === 0) { + throw new NoResolvedFieldsError(selectedDisplayNames); + } + + const recordData = await this.context.agentPort.getRecord( + selectedRecordRef.collectionName, + selectedRecordRef.recordId, + resolvedFieldNames, + ); + fieldResults = this.formatFieldResults(recordData.values, schema, selectedDisplayNames); + } catch (error) { + if (error instanceof WorkflowExecutorError) { + return { + stepOutcome: { + type: 'ai-task', + stepId: this.context.stepId, + stepIndex: this.context.stepIndex, + status: 'error', + error: error.message, + }, + }; + } + + throw error; + } + + await this.context.runStore.saveStepExecution({ + type: 'read-record', + stepIndex: this.context.stepIndex, + executionParams: { fieldNames: fieldResults.map(f => f.fieldName) }, + executionResult: { fields: fieldResults }, + selectedRecordRef, + }); + + return { + stepOutcome: { + type: 'ai-task', + stepId: this.context.stepId, + stepIndex: this.context.stepIndex, + status: 'success', + }, + }; + } + + private async selectFields( + schema: CollectionSchema, + prompt: string | undefined, + ): Promise { + const tool = this.buildReadFieldTool(schema); + const messages = [ + ...(await this.buildPreviousStepsMessages()), + new SystemMessage(READ_RECORD_SYSTEM_PROMPT), + new SystemMessage( + `The selected record belongs to the "${schema.collectionDisplayName}" collection.`, + ), + new HumanMessage(`**Request**: ${prompt ?? 'Read the relevant fields.'}`), + ]; + + const args = await this.invokeWithTool<{ fieldNames: string[] }>(messages, tool); + + return args.fieldNames; + } + + private async selectRecordRef( + records: RecordRef[], + prompt: string | undefined, + ): Promise { + if (records.length === 0) throw new NoRecordsError(); + if (records.length === 1) return records[0]; + + const identifiers = await Promise.all(records.map(r => this.toRecordIdentifier(r))); + const identifierTuple = identifiers as [string, ...string[]]; + + const tool = new DynamicStructuredTool({ + name: 'select-record', + description: 'Select the most relevant record for this workflow step.', + schema: z.object({ + recordIdentifier: z.enum(identifierTuple), + }), + func: undefined, + }); + + const messages = [ + ...(await this.buildPreviousStepsMessages()), + new SystemMessage( + 'You are an AI agent selecting the most relevant record for a workflow step.\n' + + 'Choose the record whose collection best matches the user request.\n' + + 'Pay attention to the collection name of each record.', + ), + new HumanMessage(prompt ?? 'Select the most relevant record.'), + ]; + + const { recordIdentifier } = await this.invokeWithTool<{ recordIdentifier: string }>( + messages, + tool, + ); + + const selectedIndex = identifiers.indexOf(recordIdentifier); + + if (selectedIndex === -1) { + throw new WorkflowExecutorError( + `AI selected record "${recordIdentifier}" which does not match any available record`, + ); + } + + return records[selectedIndex]; + } + + private buildReadFieldTool(schema: CollectionSchema): DynamicStructuredTool { + const nonRelationFields = schema.fields.filter(f => !f.isRelationship); + + if (nonRelationFields.length === 0) { + throw new NoReadableFieldsError(schema.collectionName); + } + + const displayNames = nonRelationFields.map(f => f.displayName) as [string, ...string[]]; + + return new DynamicStructuredTool({ + name: 'read-selected-record-fields', + description: 'Read one or more fields from the selected record.', + schema: z.object({ + // z.string() (not z.enum) intentionally: an invalid field name in the array + // does not fail the whole tool call — per-field errors are handled in formatFieldResults. + // This matches the frontend implementation (ISO frontend). + fieldNames: z + .array(z.string()) + .describe( + `Names of the fields to read, possible values are: ${displayNames + .map(n => `"${n}"`) + .join(', ')}`, + ), + }), + func: undefined, + }); + } + + private formatFieldResults( + values: Record, + schema: CollectionSchema, + fieldNames: string[], + ): FieldReadResult[] { + return fieldNames.map(name => { + const field = schema.fields.find(f => f.fieldName === name || f.displayName === name); + + if (!field) return { error: `Field not found: ${name}`, fieldName: name, displayName: name }; + + return { + value: values[field.fieldName], + fieldName: field.fieldName, + displayName: field.displayName, + }; + }); + } + + private async getAvailableRecordRefs(): Promise { + const stepExecutions = await this.context.runStore.getStepExecutions(); + const relatedRecords = stepExecutions + .filter((e): e is LoadRelatedRecordStepExecutionData => e.type === 'load-related-record') + .map(e => e.record); + + return [this.context.baseRecordRef, ...relatedRecords]; + } + + private async getCollectionSchema(collectionName: string): Promise { + const cached = this.schemaCache.get(collectionName); + if (cached) return cached; + + const schema = await this.context.workflowPort.getCollectionSchema(collectionName); + this.schemaCache.set(collectionName, schema); + + return schema; + } + + private async toRecordIdentifier(record: RecordRef): Promise { + const schema = await this.getCollectionSchema(record.collectionName); + + return `Step ${record.stepIndex} - ${schema.collectionDisplayName} #${record.recordId}`; + } +} diff --git a/packages/workflow-executor/src/index.ts b/packages/workflow-executor/src/index.ts index 2918b36c45..16c054cfdd 100644 --- a/packages/workflow-executor/src/index.ts +++ b/packages/workflow-executor/src/index.ts @@ -1,6 +1,5 @@ export { StepType } from './types/step-definition'; export type { - StepCategory, ConditionStepDefinition, AiTaskStepDefinition, StepDefinition, @@ -8,21 +7,35 @@ export type { export type { StepStatus, - ConditionStepHistory, - AiTaskStepHistory, - StepHistory, -} from './types/step-history'; + ConditionStepOutcome, + AiTaskStepOutcome, + StepOutcome, +} from './types/step-outcome'; export type { + FieldReadSuccess, + FieldReadError, + FieldReadResult, ConditionStepExecutionData, + ReadRecordStepExecutionData, AiTaskStepExecutionData, + LoadRelatedRecordStepExecutionData, + ExecutedStepExecutionData, StepExecutionData, } from './types/step-execution-data'; -export type { RecordFieldRef, ActionRef, CollectionRef, RecordData } from './types/record'; +export { isExecutedStepOnExecutor } from './types/step-execution-data'; export type { - StepRecord, + FieldSchema, + ActionSchema, + CollectionSchema, + RecordRef, + RecordData, +} from './types/record'; + +export type { + Step, UserInput, PendingStepExecution, StepExecutionResult, @@ -38,8 +51,12 @@ export { MissingToolCallError, MalformedToolCallError, RecordNotFoundError, + NoRecordsError, + NoReadableFieldsError, + NoResolvedFieldsError, } from './errors'; export { default as BaseStepExecutor } from './executors/base-step-executor'; export { default as ConditionStepExecutor } from './executors/condition-step-executor'; +export { default as ReadRecordStepExecutor } from './executors/read-record-step-executor'; export { default as AgentClientAgentPort } from './adapters/agent-client-agent-port'; export { default as ForestServerWorkflowPort } from './adapters/forest-server-workflow-port'; diff --git a/packages/workflow-executor/src/ports/agent-port.ts b/packages/workflow-executor/src/ports/agent-port.ts index 6a588f1f23..a0964e250f 100644 --- a/packages/workflow-executor/src/ports/agent-port.ts +++ b/packages/workflow-executor/src/ports/agent-port.ts @@ -1,9 +1,13 @@ /** @draft Types derived from the workflow-executor spec -- subject to change. */ -import type { ActionRef, RecordData } from '../types/record'; +import type { RecordData } from '../types/record'; export interface AgentPort { - getRecord(collectionName: string, recordId: Array): Promise; + getRecord( + collectionName: string, + recordId: Array, + fieldNames?: string[], + ): Promise; updateRecord( collectionName: string, recordId: Array, @@ -14,7 +18,6 @@ export interface AgentPort { recordId: Array, relationName: string, ): Promise; - getActions(collectionName: string): Promise; executeAction( collectionName: string, actionName: string, diff --git a/packages/workflow-executor/src/ports/run-store.ts b/packages/workflow-executor/src/ports/run-store.ts index 212ab14088..6b899da848 100644 --- a/packages/workflow-executor/src/ports/run-store.ts +++ b/packages/workflow-executor/src/ports/run-store.ts @@ -1,13 +1,8 @@ /** @draft Types derived from the workflow-executor spec -- subject to change. */ -import type { RecordData } from '../types/record'; import type { StepExecutionData } from '../types/step-execution-data'; export interface RunStore { - getRecords(): Promise; - getRecord(collectionName: string, recordId: string): Promise; - saveRecord(record: RecordData): Promise; getStepExecutions(): Promise; - getStepExecution(stepIndex: number): Promise; saveStepExecution(stepExecution: StepExecutionData): Promise; } diff --git a/packages/workflow-executor/src/ports/workflow-port.ts b/packages/workflow-executor/src/ports/workflow-port.ts index 93951f6f02..392473a95d 100644 --- a/packages/workflow-executor/src/ports/workflow-port.ts +++ b/packages/workflow-executor/src/ports/workflow-port.ts @@ -1,15 +1,15 @@ /** @draft Types derived from the workflow-executor spec -- subject to change. */ import type { PendingStepExecution } from '../types/execution'; -import type { CollectionRef } from '../types/record'; -import type { StepHistory } from '../types/step-history'; +import type { CollectionSchema } from '../types/record'; +import type { StepOutcome } from '../types/step-outcome'; /** Placeholder -- will be typed as McpConfiguration from @forestadmin/ai-proxy/mcp-client once added as dependency. */ export type McpConfiguration = unknown; export interface WorkflowPort { getPendingStepExecutions(): Promise; - updateStepExecution(runId: string, stepHistory: StepHistory): Promise; - getCollectionRef(collectionName: string): Promise; + updateStepExecution(runId: string, stepOutcome: StepOutcome): Promise; + getCollectionSchema(collectionName: string): Promise; getMcpServerConfigs(): Promise; } diff --git a/packages/workflow-executor/src/types/execution.ts b/packages/workflow-executor/src/types/execution.ts index d2524403cf..406d1e4f0f 100644 --- a/packages/workflow-executor/src/types/execution.ts +++ b/packages/workflow-executor/src/types/execution.ts @@ -1,39 +1,44 @@ /** @draft Types derived from the workflow-executor spec -- subject to change. */ -import type { CollectionRef } from './record'; +import type { RecordRef } from './record'; import type { StepDefinition } from './step-definition'; -import type { StepHistory } from './step-history'; +import type { StepOutcome } from './step-outcome'; import type { AgentPort } from '../ports/agent-port'; import type { RunStore } from '../ports/run-store'; import type { WorkflowPort } from '../ports/workflow-port'; import type { BaseChatModel } from '@langchain/core/language_models/chat_models'; -export interface StepRecord { - step: StepDefinition; - stepHistory: StepHistory; +export interface Step { + stepDefinition: StepDefinition; + stepOutcome: StepOutcome; } export type UserInput = { type: 'confirmation'; confirmed: boolean }; export interface PendingStepExecution { readonly runId: string; - readonly step: StepDefinition; - readonly stepHistory: StepHistory; - readonly previousSteps: ReadonlyArray; - readonly availableRecords: ReadonlyArray; + readonly stepId: string; + readonly stepIndex: number; + readonly baseRecordRef: RecordRef; + readonly stepDefinition: StepDefinition; + readonly previousSteps: ReadonlyArray; readonly userInput?: UserInput; } export interface StepExecutionResult { - stepHistory: StepHistory; + stepOutcome: StepOutcome; } -export interface ExecutionContext { +export interface ExecutionContext { readonly runId: string; + readonly stepId: string; + readonly stepIndex: number; + readonly baseRecordRef: RecordRef; + readonly stepDefinition: TStep; readonly model: BaseChatModel; readonly agentPort: AgentPort; readonly workflowPort: WorkflowPort; readonly runStore: RunStore; - readonly history: ReadonlyArray>; + readonly history: ReadonlyArray>; readonly remoteTools: readonly unknown[]; } diff --git a/packages/workflow-executor/src/types/record.ts b/packages/workflow-executor/src/types/record.ts index 14064fcb1f..b5070c39f4 100644 --- a/packages/workflow-executor/src/types/record.ts +++ b/packages/workflow-executor/src/types/record.ts @@ -1,27 +1,35 @@ /** @draft Types derived from the workflow-executor spec -- subject to change. */ -export interface RecordFieldRef { +// -- Schema types (structure of a collection — source: WorkflowPort) -- + +export interface FieldSchema { fieldName: string; displayName: string; - type: string; isRelationship: boolean; - referencedCollectionName?: string; } -export interface ActionRef { +export interface ActionSchema { name: string; displayName: string; } -export interface CollectionRef { +export interface CollectionSchema { collectionName: string; collectionDisplayName: string; primaryKeyFields: string[]; - fields: RecordFieldRef[]; - actions: ActionRef[]; + fields: FieldSchema[]; + actions: ActionSchema[]; } -export interface RecordData extends CollectionRef { +// -- Record types (data — source: AgentPort/RunStore) -- + +/** Lightweight pointer to a specific record. */ +export interface RecordRef { + collectionName: string; recordId: Array; - values: Record; + /** Index of the workflow step that loaded this record. */ + stepIndex: number; } + +/** A record with its loaded field values — no stepIndex (agent doesn't know about steps). */ +export type RecordData = Omit & { values: Record }; diff --git a/packages/workflow-executor/src/types/step-definition.ts b/packages/workflow-executor/src/types/step-definition.ts index dffae8c312..ca23e5b413 100644 --- a/packages/workflow-executor/src/types/step-definition.ts +++ b/packages/workflow-executor/src/types/step-definition.ts @@ -9,34 +9,22 @@ export enum StepType { } interface BaseStepDefinition { - id: string; type: StepType; + prompt?: string; aiConfigName?: string; } export interface ConditionStepDefinition extends BaseStepDefinition { type: StepType.Condition; options: [string, ...string[]]; - prompt?: string; } export interface AiTaskStepDefinition extends BaseStepDefinition { - type: - | StepType.ReadRecord - | StepType.UpdateRecord - | StepType.TriggerAction - | StepType.LoadRelatedRecord; + type: Exclude; recordSourceStepId?: string; - prompt?: string; automaticCompletion?: boolean; allowedTools?: string[]; remoteToolsSourceId?: string; } export type StepDefinition = ConditionStepDefinition | AiTaskStepDefinition; - -/** - * Coarse categorization of steps. StepType has 5 fine-grained values; - * StepCategory collapses the 4 non-condition types into 'ai-task'. - */ -export type StepCategory = 'condition' | 'ai-task'; diff --git a/packages/workflow-executor/src/types/step-execution-data.ts b/packages/workflow-executor/src/types/step-execution-data.ts index e2d46eaf47..eb022a273c 100644 --- a/packages/workflow-executor/src/types/step-execution-data.ts +++ b/packages/workflow-executor/src/types/step-execution-data.ts @@ -1,23 +1,78 @@ /** @draft Types derived from the workflow-executor spec -- subject to change. */ -import type { CollectionRef } from './record'; +import type { RecordRef } from './record'; + +// -- Base -- interface BaseStepExecutionData { stepIndex: number; } +// -- Condition -- + export interface ConditionStepExecutionData extends BaseStepExecutionData { type: 'condition'; - executionParams?: { answer: string | null; reasoning?: string }; - executionResult?: { answer: string }; + executionParams: { answer: string | null; reasoning?: string }; + executionResult: { answer: string }; +} + +// -- Read Record -- + +interface FieldReadBase { + fieldName: string; + displayName: string; +} + +export interface FieldReadSuccess extends FieldReadBase { + value: unknown; +} + +export interface FieldReadError extends FieldReadBase { + error: string; +} + +export type FieldReadResult = FieldReadSuccess | FieldReadError; + +export interface ReadRecordStepExecutionData extends BaseStepExecutionData { + type: 'read-record'; + executionParams: { fieldNames: string[] }; + executionResult: { fields: FieldReadResult[] }; + selectedRecordRef: RecordRef; } +// -- Generic AI Task (fallback for untyped steps) -- + export interface AiTaskStepExecutionData extends BaseStepExecutionData { type: 'ai-task'; executionParams?: Record; executionResult?: Record; toolConfirmationInterruption?: Record; - selectedRecord?: CollectionRef; } -export type StepExecutionData = ConditionStepExecutionData | AiTaskStepExecutionData; +// -- Load Related Record -- + +export interface LoadRelatedRecordStepExecutionData extends BaseStepExecutionData { + type: 'load-related-record'; + record: RecordRef; +} + +// -- Union -- + +export type StepExecutionData = + | ConditionStepExecutionData + | ReadRecordStepExecutionData + | AiTaskStepExecutionData + | LoadRelatedRecordStepExecutionData; + +export type ExecutedStepExecutionData = + | ConditionStepExecutionData + | ReadRecordStepExecutionData + | AiTaskStepExecutionData; + +// TODO: this condition should change when load-related-record gets its own executor +// and produces executionParams/executionResult like other steps. +export function isExecutedStepOnExecutor( + data: StepExecutionData | undefined, +): data is ExecutedStepExecutionData { + return !!data && data.type !== 'load-related-record'; +} diff --git a/packages/workflow-executor/src/types/step-history.ts b/packages/workflow-executor/src/types/step-outcome.ts similarity index 77% rename from packages/workflow-executor/src/types/step-history.ts rename to packages/workflow-executor/src/types/step-outcome.ts index bf9b66b61a..9a564748eb 100644 --- a/packages/workflow-executor/src/types/step-history.ts +++ b/packages/workflow-executor/src/types/step-outcome.ts @@ -12,27 +12,27 @@ export type AiTaskStepStatus = BaseStepStatus | 'awaiting-input'; export type StepStatus = ConditionStepStatus | AiTaskStepStatus; /** - * StepHistory is sent to the orchestrator — it must NEVER contain client data. + * StepOutcome is sent to the orchestrator — it must NEVER contain client data. * Any privacy-sensitive information (e.g. AI reasoning) must stay in * StepExecutionData (persisted in the RunStore, client-side only). */ -interface BaseStepHistory { +interface BaseStepOutcome { stepId: string; stepIndex: number; /** Present when status is 'error'. */ error?: string; } -export interface ConditionStepHistory extends BaseStepHistory { +export interface ConditionStepOutcome extends BaseStepOutcome { type: 'condition'; status: ConditionStepStatus; /** Present when status is 'success'. */ selectedOption?: string; } -export interface AiTaskStepHistory extends BaseStepHistory { +export interface AiTaskStepOutcome extends BaseStepOutcome { type: 'ai-task'; status: AiTaskStepStatus; } -export type StepHistory = ConditionStepHistory | AiTaskStepHistory; +export type StepOutcome = ConditionStepOutcome | AiTaskStepOutcome; diff --git a/packages/workflow-executor/test/adapters/agent-client-agent-port.test.ts b/packages/workflow-executor/test/adapters/agent-client-agent-port.test.ts index 8789907875..b564eeaf5e 100644 --- a/packages/workflow-executor/test/adapters/agent-client-agent-port.test.ts +++ b/packages/workflow-executor/test/adapters/agent-client-agent-port.test.ts @@ -1,4 +1,4 @@ -import type { CollectionRef } from '../../src/types/record'; +import type { CollectionSchema } from '../../src/types/record'; import type { RemoteAgentClient } from '@forestadmin/agent-client'; import AgentClientAgentPort from '../../src/adapters/agent-client-agent-port'; @@ -26,7 +26,7 @@ describe('AgentClientAgentPort', () => { let mockCollection: ReturnType['mockCollection']; let mockRelation: ReturnType['mockRelation']; let mockAction: ReturnType['mockAction']; - let collectionRefs: Record; + let collectionSchemas: Record; let port: AgentClientAgentPort; beforeEach(() => { @@ -34,14 +34,14 @@ describe('AgentClientAgentPort', () => { ({ client, mockCollection, mockRelation, mockAction } = createMockClient()); - collectionRefs = { + collectionSchemas = { users: { collectionName: 'users', collectionDisplayName: 'Users', primaryKeyFields: ['id'], fields: [ - { fieldName: 'id', displayName: 'id', type: 'Number', isRelationship: false }, - { fieldName: 'name', displayName: 'name', type: 'String', isRelationship: false }, + { fieldName: 'id', displayName: 'id', isRelationship: false }, + { fieldName: 'name', displayName: 'name', isRelationship: false }, ], actions: [ { name: 'sendEmail', displayName: 'Send Email' }, @@ -53,8 +53,8 @@ describe('AgentClientAgentPort', () => { collectionDisplayName: 'Orders', primaryKeyFields: ['tenantId', 'orderId'], fields: [ - { fieldName: 'tenantId', displayName: 'Tenant', type: 'Number', isRelationship: false }, - { fieldName: 'orderId', displayName: 'Order', type: 'Number', isRelationship: false }, + { fieldName: 'tenantId', displayName: 'Tenant', isRelationship: false }, + { fieldName: 'orderId', displayName: 'Order', isRelationship: false }, ], actions: [], }, @@ -63,14 +63,14 @@ describe('AgentClientAgentPort', () => { collectionDisplayName: 'Posts', primaryKeyFields: ['id'], fields: [ - { fieldName: 'id', displayName: 'id', type: 'Number', isRelationship: false }, - { fieldName: 'title', displayName: 'title', type: 'String', isRelationship: false }, + { fieldName: 'id', displayName: 'id', isRelationship: false }, + { fieldName: 'title', displayName: 'title', isRelationship: false }, ], actions: [], }, }; - port = new AgentClientAgentPort({ client, collectionRefs }); + port = new AgentClientAgentPort({ client, collectionSchemas }); }); describe('getRecord', () => { @@ -84,12 +84,8 @@ describe('AgentClientAgentPort', () => { pagination: { size: 1, number: 1 }, }); expect(result).toEqual({ - recordId: [42], collectionName: 'users', - collectionDisplayName: 'Users', - primaryKeyFields: ['id'], - fields: collectionRefs.users.fields, - actions: collectionRefs.users.actions, + recordId: [42], values: { id: 42, name: 'Alice' }, }); }); @@ -117,6 +113,40 @@ describe('AgentClientAgentPort', () => { await expect(port.getRecord('users', [999])).rejects.toThrow(RecordNotFoundError); }); + it('should pass fields to list when fieldNames is provided', async () => { + mockCollection.list.mockResolvedValue([{ id: 42, name: 'Alice' }]); + + await port.getRecord('users', [42], ['id', 'name']); + + expect(mockCollection.list).toHaveBeenCalledWith({ + filters: { field: 'id', operator: 'Equal', value: 42 }, + pagination: { size: 1, number: 1 }, + fields: ['id', 'name'], + }); + }); + + it('should not pass fields to list when fieldNames is an empty array', async () => { + mockCollection.list.mockResolvedValue([{ id: 42, name: 'Alice' }]); + + await port.getRecord('users', [42], []); + + expect(mockCollection.list).toHaveBeenCalledWith({ + filters: { field: 'id', operator: 'Equal', value: 42 }, + pagination: { size: 1, number: 1 }, + }); + }); + + it('should not pass fields to list when fieldNames is undefined', async () => { + mockCollection.list.mockResolvedValue([{ id: 42, name: 'Alice' }]); + + await port.getRecord('users', [42]); + + expect(mockCollection.list).toHaveBeenCalledWith({ + filters: { field: 'id', operator: 'Equal', value: 42 }, + pagination: { size: 1, number: 1 }, + }); + }); + it('should fallback to pk field "id" when collection is unknown', async () => { mockCollection.list.mockResolvedValue([{ id: 1 }]); @@ -128,7 +158,6 @@ describe('AgentClientAgentPort', () => { }), ); expect(result.collectionName).toBe('unknown'); - expect(result.fields).toEqual([]); }); }); @@ -140,12 +169,8 @@ describe('AgentClientAgentPort', () => { expect(mockCollection.update).toHaveBeenCalledWith('42', { name: 'Bob' }); expect(result).toEqual({ - recordId: [42], collectionName: 'users', - collectionDisplayName: 'Users', - primaryKeyFields: ['id'], - fields: collectionRefs.users.fields, - actions: collectionRefs.users.actions, + recordId: [42], values: { id: 42, name: 'Bob' }, }); }); @@ -171,27 +196,19 @@ describe('AgentClientAgentPort', () => { expect(mockCollection.relation).toHaveBeenCalledWith('posts', '42'); expect(result).toEqual([ { - recordId: [10], collectionName: 'posts', - collectionDisplayName: 'Posts', - primaryKeyFields: ['id'], - fields: collectionRefs.posts.fields, - actions: collectionRefs.posts.actions, + recordId: [10], values: { id: 10, title: 'Post A' }, }, { - recordId: [11], collectionName: 'posts', - collectionDisplayName: 'Posts', - primaryKeyFields: ['id'], - fields: collectionRefs.posts.fields, - actions: collectionRefs.posts.actions, + recordId: [11], values: { id: 11, title: 'Post B' }, }, ]); }); - it('should fallback to relationName when no CollectionRef exists', async () => { + it('should fallback to relationName when no CollectionSchema exists', async () => { mockRelation.list.mockResolvedValue([{ id: 1 }]); const result = await port.getRelatedData('users', [42], 'unknownRelation'); @@ -207,19 +224,6 @@ describe('AgentClientAgentPort', () => { }); }); - describe('getActions', () => { - it('should return ActionRef[] from CollectionRef', async () => { - expect(await port.getActions('users')).toEqual([ - { name: 'sendEmail', displayName: 'Send Email' }, - { name: 'archive', displayName: 'Archive' }, - ]); - }); - - it('should return an empty array for an unknown collection', async () => { - expect(await port.getActions('unknown')).toEqual([]); - }); - }); - describe('executeAction', () => { it('should encode recordIds to pipe format and call execute', async () => { mockAction.execute.mockResolvedValue({ success: 'done' }); diff --git a/packages/workflow-executor/test/adapters/forest-server-workflow-port.test.ts b/packages/workflow-executor/test/adapters/forest-server-workflow-port.test.ts index ff37147e74..9e69a04eaf 100644 --- a/packages/workflow-executor/test/adapters/forest-server-workflow-port.test.ts +++ b/packages/workflow-executor/test/adapters/forest-server-workflow-port.test.ts @@ -1,6 +1,6 @@ import type { PendingStepExecution } from '../../src/types/execution'; -import type { CollectionRef } from '../../src/types/record'; -import type { StepHistory } from '../../src/types/step-history'; +import type { CollectionSchema } from '../../src/types/record'; +import type { StepOutcome } from '../../src/types/step-outcome'; import { ServerUtils } from '@forestadmin/forestadmin-client'; @@ -39,9 +39,9 @@ describe('ForestServerWorkflowPort', () => { }); describe('updateStepExecution', () => { - it('should post step history to the complete route', async () => { + it('should post step outcome to the complete route', async () => { mockQuery.mockResolvedValue(undefined); - const stepHistory: StepHistory = { + const stepOutcome: StepOutcome = { type: 'condition', stepId: 'step-1', stepIndex: 0, @@ -49,33 +49,33 @@ describe('ForestServerWorkflowPort', () => { selectedOption: 'optionA', }; - await port.updateStepExecution('run-42', stepHistory); + await port.updateStepExecution('run-42', stepOutcome); expect(mockQuery).toHaveBeenCalledWith( options, 'post', '/liana/v1/workflow-step-executions/run-42/complete', {}, - stepHistory, + stepOutcome, ); }); }); - describe('getCollectionRef', () => { - it('should fetch the collection ref by name', async () => { - const collectionRef: CollectionRef = { + describe('getCollectionSchema', () => { + it('should fetch the collection schema by name', async () => { + const collectionSchema: CollectionSchema = { collectionName: 'users', collectionDisplayName: 'Users', primaryKeyFields: ['id'], fields: [], actions: [], }; - mockQuery.mockResolvedValue(collectionRef); + mockQuery.mockResolvedValue(collectionSchema); - const result = await port.getCollectionRef('users'); + const result = await port.getCollectionSchema('users'); expect(mockQuery).toHaveBeenCalledWith(options, 'get', '/liana/v1/collections/users'); - expect(result).toEqual(collectionRef); + expect(result).toEqual(collectionSchema); }); }); diff --git a/packages/workflow-executor/test/executors/base-step-executor.test.ts b/packages/workflow-executor/test/executors/base-step-executor.test.ts index 73f5e716b1..4d79c03cf1 100644 --- a/packages/workflow-executor/test/executors/base-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/base-step-executor.test.ts @@ -1,8 +1,9 @@ import type { RunStore } from '../../src/ports/run-store'; import type { ExecutionContext, StepExecutionResult } from '../../src/types/execution'; +import type { RecordRef } from '../../src/types/record'; import type { StepDefinition } from '../../src/types/step-definition'; import type { StepExecutionData } from '../../src/types/step-execution-data'; -import type { StepHistory } from '../../src/types/step-history'; +import type { StepOutcome } from '../../src/types/step-outcome'; import type { BaseMessage, SystemMessage } from '@langchain/core/messages'; import type { DynamicStructuredTool } from '@langchain/core/tools'; @@ -30,15 +31,14 @@ class TestableExecutor extends BaseStepExecutor { function makeHistoryEntry( overrides: { stepId?: string; stepIndex?: number; prompt?: string } = {}, -): { step: StepDefinition; stepHistory: StepHistory } { +): { stepDefinition: StepDefinition; stepOutcome: StepOutcome } { return { - step: { - id: overrides.stepId ?? 'step-1', + stepDefinition: { type: StepType.Condition, options: ['A', 'B'], prompt: overrides.prompt ?? 'Pick one', }, - stepHistory: { + stepOutcome: { type: 'condition', stepId: overrides.stepId ?? 'step-1', stepIndex: overrides.stepIndex ?? 0, @@ -49,11 +49,7 @@ function makeHistoryEntry( function makeMockRunStore(stepExecutions: StepExecutionData[] = []): RunStore { return { - getRecords: jest.fn().mockResolvedValue([]), - getRecord: jest.fn().mockResolvedValue(null), - saveRecord: jest.fn().mockResolvedValue(undefined), getStepExecutions: jest.fn().mockResolvedValue(stepExecutions), - getStepExecution: jest.fn().mockResolvedValue(null), saveStepExecution: jest.fn().mockResolvedValue(undefined), }; } @@ -61,6 +57,18 @@ function makeMockRunStore(stepExecutions: StepExecutionData[] = []): RunStore { function makeContext(overrides: Partial = {}): ExecutionContext { return { runId: 'run-1', + stepId: 'step-0', + stepIndex: 0, + baseRecordRef: { + collectionName: 'customers', + recordId: [1], + stepIndex: 0, + } as RecordRef, + stepDefinition: { + type: StepType.Condition, + options: ['A', 'B'], + prompt: 'Pick one', + }, model: {} as ExecutionContext['model'], agentPort: {} as ExecutionContext['agentPort'], workflowPort: {} as ExecutionContext['workflowPort'], @@ -100,22 +108,24 @@ describe('BaseStepExecutor', () => { expect(result).toContain('Step "cond-1"'); expect(result).toContain('Prompt: Approve?'); - expect(result).toContain('Result: {"answer":"Yes","reasoning":"Order is valid"}'); + expect(result).toContain('Input: {"answer":"Yes","reasoning":"Order is valid"}'); + expect(result).toContain('Output: {"answer":"Yes"}'); }); - it('falls back to History when step has no executionParams in RunStore', async () => { + it('uses Input for matched steps and History for unmatched steps', async () => { const executor = new TestableExecutor( makeContext({ history: [ makeHistoryEntry({ stepId: 'cond-1', stepIndex: 0 }), makeHistoryEntry({ stepId: 'cond-2', stepIndex: 1, prompt: 'Second?' }), ], + // Only step 1 has an execution entry — step 0 has no match runStore: makeMockRunStore([ - { type: 'condition', stepIndex: 0 }, { type: 'condition', stepIndex: 1, executionParams: { answer: 'No', reasoning: 'Clearly no' }, + executionResult: { answer: 'No' }, }, ]), }), @@ -128,7 +138,8 @@ describe('BaseStepExecutor', () => { expect(result).toContain('Step "cond-1"'); expect(result).toContain('History: {"status":"success"}'); expect(result).toContain('Step "cond-2"'); - expect(result).toContain('Result: {"answer":"No","reasoning":"Clearly no"}'); + expect(result).toContain('Input: {"answer":"No","reasoning":"Clearly no"}'); + expect(result).toContain('Output: {"answer":"No"}'); }); it('falls back to History when no matching step execution in RunStore', async () => { @@ -143,6 +154,7 @@ describe('BaseStepExecutor', () => { type: 'condition', stepIndex: 1, executionParams: { answer: 'B', reasoning: 'Option B fits' }, + executionResult: { answer: 'B' }, }, ]), }), @@ -155,7 +167,8 @@ describe('BaseStepExecutor', () => { expect(result).toContain('Step "orphan"'); expect(result).toContain('History: {"status":"success"}'); expect(result).toContain('Step "matched"'); - expect(result).toContain('Result: {"answer":"B","reasoning":"Option B fits"}'); + expect(result).toContain('Input: {"answer":"B","reasoning":"Option B fits"}'); + expect(result).toContain('Output: {"answer":"B"}'); }); it('includes selectedOption in History for condition steps', async () => { @@ -164,7 +177,7 @@ describe('BaseStepExecutor', () => { stepIndex: 0, prompt: 'Approved?', }); - (entry.stepHistory as { selectedOption?: string }).selectedOption = 'Yes'; + (entry.stepOutcome as { selectedOption?: string }).selectedOption = 'Yes'; const executor = new TestableExecutor( makeContext({ @@ -187,8 +200,8 @@ describe('BaseStepExecutor', () => { stepIndex: 0, prompt: 'Do something', }); - entry.stepHistory.status = 'error'; - (entry.stepHistory as { error?: string }).error = 'AI could not match an option'; + entry.stepOutcome.status = 'error'; + (entry.stepOutcome as { error?: string }).error = 'AI could not match an option'; const executor = new TestableExecutor( makeContext({ @@ -206,9 +219,17 @@ describe('BaseStepExecutor', () => { }); it('includes status in History for ai-task steps without RunStore data', async () => { - const entry: { step: StepDefinition; stepHistory: StepHistory } = { - step: { id: 'ai-step', type: StepType.ReadRecord, prompt: 'Run task' }, - stepHistory: { type: 'ai-task', stepId: 'ai-step', stepIndex: 0, status: 'awaiting-input' }, + const entry: { stepDefinition: StepDefinition; stepOutcome: StepOutcome } = { + stepDefinition: { + type: StepType.ReadRecord, + prompt: 'Run task', + }, + stepOutcome: { + type: 'ai-task', + stepId: 'ai-step', + stepIndex: 0, + status: 'awaiting-input', + }, }; const executor = new TestableExecutor( @@ -226,17 +247,25 @@ describe('BaseStepExecutor', () => { expect(result).toContain('History: {"status":"awaiting-input"}'); }); - it('uses Result when RunStore has executionParams, History otherwise', async () => { + it('uses Input when RunStore has executionParams, History otherwise', async () => { const condEntry = makeHistoryEntry({ stepId: 'cond-1', stepIndex: 0, prompt: 'Approved?', }); - (condEntry.stepHistory as { selectedOption?: string }).selectedOption = 'Yes'; - - const aiEntry: { step: StepDefinition; stepHistory: StepHistory } = { - step: { id: 'read-customer', type: StepType.ReadRecord, prompt: 'Read name' }, - stepHistory: { type: 'ai-task', stepId: 'read-customer', stepIndex: 1, status: 'success' }, + (condEntry.stepOutcome as { selectedOption?: string }).selectedOption = 'Yes'; + + const aiEntry: { stepDefinition: StepDefinition; stepOutcome: StepOutcome } = { + stepDefinition: { + type: StepType.ReadRecord, + prompt: 'Read name', + }, + stepOutcome: { + type: 'ai-task', + stepId: 'read-customer', + stepIndex: 1, + status: 'success', + }, }; const executor = new TestableExecutor( @@ -259,12 +288,12 @@ describe('BaseStepExecutor', () => { expect(result).toContain('Step "cond-1"'); expect(result).toContain('History: {"status":"success","selectedOption":"Yes"}'); expect(result).toContain('Step "read-customer"'); - expect(result).toContain('Result: {"answer":"John Doe"}'); + expect(result).toContain('Input: {"answer":"John Doe"}'); }); - it('prefers RunStore executionParams over History fallback', async () => { + it('prefers RunStore execution data over History fallback', async () => { const entry = makeHistoryEntry({ stepId: 'cond-1', stepIndex: 0, prompt: 'Pick one' }); - (entry.stepHistory as { selectedOption?: string }).selectedOption = 'A'; + (entry.stepOutcome as { selectedOption?: string }).selectedOption = 'A'; const executor = new TestableExecutor( makeContext({ @@ -274,6 +303,7 @@ describe('BaseStepExecutor', () => { type: 'condition', stepIndex: 0, executionParams: { answer: 'A', reasoning: 'Best fit' }, + executionResult: { answer: 'A' }, }, ]), }), @@ -283,13 +313,49 @@ describe('BaseStepExecutor', () => { .buildPreviousStepsMessages() .then(msgs => msgs[0]?.content ?? ''); - expect(result).toContain('Result: {"answer":"A","reasoning":"Best fit"}'); + expect(result).toContain('Input: {"answer":"A","reasoning":"Best fit"}'); + expect(result).toContain('Output: {"answer":"A"}'); expect(result).not.toContain('History:'); }); + it('omits Input line when executionParams is undefined', async () => { + const entry: { stepDefinition: StepDefinition; stepOutcome: StepOutcome } = { + stepDefinition: { + type: StepType.ReadRecord, + prompt: 'Do something', + }, + stepOutcome: { + type: 'ai-task', + stepId: 'ai-step', + stepIndex: 0, + status: 'success', + }, + }; + + const executor = new TestableExecutor( + makeContext({ + history: [entry], + runStore: makeMockRunStore([ + { + type: 'ai-task', + stepIndex: 0, + }, + ]), + }), + ); + + const result = await executor + .buildPreviousStepsMessages() + .then(msgs => msgs[0]?.content ?? ''); + + expect(result).toContain('Step "ai-step"'); + expect(result).toContain('Prompt: Do something'); + expect(result).not.toContain('Input:'); + }); + it('shows "(no prompt)" when step has no prompt', async () => { const entry = makeHistoryEntry({ stepIndex: 0 }); - entry.step.prompt = undefined; + entry.stepDefinition.prompt = undefined; const executor = new TestableExecutor( makeContext({ @@ -299,6 +365,7 @@ describe('BaseStepExecutor', () => { type: 'condition', stepIndex: 0, executionParams: { answer: 'A', reasoning: 'Only option' }, + executionResult: { answer: 'A' }, }, ]), }), diff --git a/packages/workflow-executor/test/executors/condition-step-executor.test.ts b/packages/workflow-executor/test/executors/condition-step-executor.test.ts index ba7fe7f34d..b41cf35bc5 100644 --- a/packages/workflow-executor/test/executors/condition-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/condition-step-executor.test.ts @@ -1,14 +1,14 @@ import type { RunStore } from '../../src/ports/run-store'; import type { ExecutionContext } from '../../src/types/execution'; +import type { RecordRef } from '../../src/types/record'; import type { ConditionStepDefinition } from '../../src/types/step-definition'; -import type { ConditionStepHistory } from '../../src/types/step-history'; +import type { ConditionStepOutcome } from '../../src/types/step-outcome'; import ConditionStepExecutor from '../../src/executors/condition-step-executor'; import { StepType } from '../../src/types/step-definition'; function makeStep(overrides: Partial = {}): ConditionStepDefinition { return { - id: 'cond-1', type: StepType.Condition, options: ['Approve', 'Reject'], prompt: 'Should we approve this?', @@ -16,23 +16,9 @@ function makeStep(overrides: Partial = {}): ConditionSt }; } -function makeStepHistory(overrides: Partial = {}): ConditionStepHistory { - return { - type: 'condition', - stepId: 'cond-1', - stepIndex: 0, - status: 'success', - ...overrides, - }; -} - function makeMockRunStore(overrides: Partial = {}): RunStore { return { - getRecords: jest.fn().mockResolvedValue([]), - getRecord: jest.fn().mockResolvedValue(null), - saveRecord: jest.fn().mockResolvedValue(undefined), getStepExecutions: jest.fn().mockResolvedValue([]), - getStepExecution: jest.fn().mockResolvedValue(null), saveStepExecution: jest.fn().mockResolvedValue(undefined), ...overrides, }; @@ -50,9 +36,19 @@ function makeMockModel(toolCallArgs?: Record) { return { model, bindTools, invoke }; } -function makeContext(overrides: Partial = {}): ExecutionContext { +function makeContext( + overrides: Partial> = {}, +): ExecutionContext { return { runId: 'run-1', + stepId: 'cond-1', + stepIndex: 0, + baseRecordRef: { + collectionName: 'customers', + recordId: [1], + stepIndex: 0, + } as RecordRef, + stepDefinition: makeStep(), model: makeMockModel().model, agentPort: {} as ExecutionContext['agentPort'], workflowPort: {} as ExecutionContext['workflowPort'], @@ -64,24 +60,6 @@ function makeContext(overrides: Partial = {}): ExecutionContex } describe('ConditionStepExecutor', () => { - describe('immutability', () => { - it('does not mutate the input stepHistory', async () => { - const mockModel = makeMockModel({ - option: 'Reject', - reasoning: 'Incomplete', - question: 'Approve?', - }); - const stepHistory = makeStepHistory(); - const executor = new ConditionStepExecutor(makeContext({ model: mockModel.model })); - - const result = await executor.execute(makeStep(), stepHistory); - - expect(result.stepHistory).not.toBe(stepHistory); - expect(stepHistory.status).toBe('success'); - expect(stepHistory.selectedOption).toBeUndefined(); - }); - }); - describe('AI decision', () => { it('calls AI and returns selected option on success', async () => { const mockModel = makeMockModel({ @@ -96,10 +74,10 @@ describe('ConditionStepExecutor', () => { }); const executor = new ConditionStepExecutor(context); - const result = await executor.execute(makeStep(), makeStepHistory()); + const result = await executor.execute(); - expect(result.stepHistory.status).toBe('success'); - expect((result.stepHistory as ConditionStepHistory).selectedOption).toBe('Reject'); + expect(result.stepOutcome.status).toBe('success'); + expect((result.stepOutcome as ConditionStepOutcome).selectedOption).toBe('Reject'); expect(mockModel.bindTools).toHaveBeenCalledWith( [expect.objectContaining({ name: 'choose-gateway-option' })], @@ -120,13 +98,15 @@ describe('ConditionStepExecutor', () => { reasoning: 'Looks good', question: 'Should we?', }); - const executor = new ConditionStepExecutor(makeContext({ model: mockModel.model })); - - await executor.execute( - makeStep({ options: ['Approve', 'Reject', 'Defer'] }), - makeStepHistory(), + const executor = new ConditionStepExecutor( + makeContext({ + model: mockModel.model, + stepDefinition: makeStep({ options: ['Approve', 'Reject', 'Defer'] }), + }), ); + await executor.execute(); + const tool = mockModel.bindTools.mock.calls[0][0][0]; expect(tool.name).toBe('choose-gateway-option'); expect(tool.schema.parse({ option: 'Approve', reasoning: 'r', question: 'q' })).toBeTruthy(); @@ -143,13 +123,13 @@ describe('ConditionStepExecutor', () => { reasoning: 'Looks good', question: 'Should we approve?', }); - const context = makeContext({ model: mockModel.model }); + const context = makeContext({ + model: mockModel.model, + stepDefinition: makeStep({ prompt: 'Custom prompt for this step' }), + }); const executor = new ConditionStepExecutor(context); - await executor.execute( - makeStep({ prompt: 'Custom prompt for this step' }), - makeStepHistory(), - ); + await executor.execute(); const messages = mockModel.invoke.mock.calls[0][0]; expect(messages).toHaveLength(2); @@ -164,10 +144,13 @@ describe('ConditionStepExecutor', () => { reasoning: 'Default', question: 'Approve?', }); - const context = makeContext({ model: mockModel.model }); + const context = makeContext({ + model: mockModel.model, + stepDefinition: makeStep({ prompt: undefined }), + }); const executor = new ConditionStepExecutor(context); - await executor.execute(makeStep({ prompt: undefined }), makeStepHistory()); + await executor.execute(); const messages = mockModel.invoke.mock.calls[0][0]; const humanMessage = messages[messages.length - 1]; @@ -181,7 +164,6 @@ describe('ConditionStepExecutor', () => { question: 'Final approval?', }); const runStore = makeMockRunStore({ - getStepExecution: jest.fn().mockResolvedValue(null), getStepExecutions: jest.fn().mockResolvedValue([ { type: 'condition', @@ -195,13 +177,12 @@ describe('ConditionStepExecutor', () => { runStore, history: [ { - step: { - id: 'prev-step', + stepDefinition: { type: StepType.Condition, options: ['Yes', 'No'], prompt: 'Previous question', }, - stepHistory: { + stepOutcome: { type: 'condition', stepId: 'prev-step', stepIndex: 0, @@ -210,12 +191,13 @@ describe('ConditionStepExecutor', () => { }, ], }); - const executor = new ConditionStepExecutor(context); + const executor = new ConditionStepExecutor({ + ...context, + stepId: 'cond-2', + stepIndex: 1, + }); - await executor.execute( - makeStep({ id: 'cond-2' }), - makeStepHistory({ stepId: 'cond-2', stepIndex: 1 }), - ); + await executor.execute(); const messages = mockModel.invoke.mock.calls[0][0]; expect(messages).toHaveLength(3); @@ -240,11 +222,11 @@ describe('ConditionStepExecutor', () => { }); const executor = new ConditionStepExecutor(context); - const result = await executor.execute(makeStep(), makeStepHistory()); + const result = await executor.execute(); - expect(result.stepHistory.status).toBe('manual-decision'); - expect(result.stepHistory.error).toBeUndefined(); - expect((result.stepHistory as ConditionStepHistory).selectedOption).toBeUndefined(); + expect(result.stepOutcome.status).toBe('manual-decision'); + expect(result.stepOutcome.error).toBeUndefined(); + expect((result.stepOutcome as ConditionStepOutcome).selectedOption).toBeUndefined(); expect(runStore.saveStepExecution).toHaveBeenCalledWith({ type: 'condition', stepIndex: 0, @@ -268,10 +250,10 @@ describe('ConditionStepExecutor', () => { }); const executor = new ConditionStepExecutor(context); - const result = await executor.execute(makeStep(), makeStepHistory()); + const result = await executor.execute(); - expect(result.stepHistory.status).toBe('error'); - expect(result.stepHistory.error).toBe( + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( 'AI returned a malformed tool call for "choose-gateway-option": JSON parse error', ); expect(runStore.saveStepExecution).not.toHaveBeenCalled(); @@ -287,10 +269,10 @@ describe('ConditionStepExecutor', () => { }); const executor = new ConditionStepExecutor(context); - const result = await executor.execute(makeStep(), makeStepHistory()); + const result = await executor.execute(); - expect(result.stepHistory.status).toBe('error'); - expect(result.stepHistory.error).toBe('API timeout'); + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe('API timeout'); }); it('lets run store errors propagate', async () => { @@ -304,7 +286,7 @@ describe('ConditionStepExecutor', () => { }); const executor = new ConditionStepExecutor(makeContext({ model: mockModel.model, runStore })); - await expect(executor.execute(makeStep(), makeStepHistory())).rejects.toThrow('Storage full'); + await expect(executor.execute()).rejects.toThrow('Storage full'); }); }); }); diff --git a/packages/workflow-executor/test/executors/read-record-step-executor.test.ts b/packages/workflow-executor/test/executors/read-record-step-executor.test.ts new file mode 100644 index 0000000000..7435600666 --- /dev/null +++ b/packages/workflow-executor/test/executors/read-record-step-executor.test.ts @@ -0,0 +1,778 @@ +import type { AgentPort } from '../../src/ports/agent-port'; +import type { RunStore } from '../../src/ports/run-store'; +import type { WorkflowPort } from '../../src/ports/workflow-port'; +import type { ExecutionContext } from '../../src/types/execution'; +import type { CollectionSchema, RecordRef } from '../../src/types/record'; +import type { AiTaskStepDefinition } from '../../src/types/step-definition'; + +import { NoRecordsError, RecordNotFoundError } from '../../src/errors'; +import ReadRecordStepExecutor from '../../src/executors/read-record-step-executor'; +import { StepType } from '../../src/types/step-definition'; + +function makeStep(overrides: Partial = {}): AiTaskStepDefinition { + return { + type: StepType.ReadRecord, + prompt: 'Read the customer email', + ...overrides, + }; +} + +function makeRecordRef(overrides: Partial = {}): RecordRef { + return { + collectionName: 'customers', + recordId: [42], + stepIndex: 0, + ...overrides, + }; +} + +function makeMockAgentPort( + recordsByCollection: Record }> = { + customers: { values: { email: 'john@example.com', name: 'John Doe', orders: null } }, + }, +): AgentPort { + return { + getRecord: jest + .fn() + .mockImplementation((collectionName: string) => + Promise.resolve(recordsByCollection[collectionName] ?? { values: {} }), + ), + updateRecord: jest.fn(), + getRelatedData: jest.fn(), + executeAction: jest.fn(), + } as unknown as AgentPort; +} + +function makeCollectionSchema(overrides: Partial = {}): CollectionSchema { + return { + collectionName: 'customers', + collectionDisplayName: 'Customers', + primaryKeyFields: ['id'], + fields: [ + { fieldName: 'email', displayName: 'Email', isRelationship: false }, + { fieldName: 'name', displayName: 'Full Name', isRelationship: false }, + { fieldName: 'orders', displayName: 'Orders', isRelationship: true }, + ], + actions: [], + ...overrides, + }; +} + +function makeMockRunStore(overrides: Partial = {}): RunStore { + return { + getStepExecutions: jest.fn().mockResolvedValue([]), + saveStepExecution: jest.fn().mockResolvedValue(undefined), + ...overrides, + }; +} + +function makeMockWorkflowPort( + schemasByCollection: Record = { + customers: makeCollectionSchema(), + }, +): WorkflowPort { + return { + getPendingStepExecutions: jest.fn().mockResolvedValue([]), + updateStepExecution: jest.fn().mockResolvedValue(undefined), + getCollectionSchema: jest + .fn() + .mockImplementation((name: string) => + Promise.resolve( + schemasByCollection[name] ?? makeCollectionSchema({ collectionName: name }), + ), + ), + getMcpServerConfigs: jest.fn().mockResolvedValue([]), + }; +} + +function makeMockModel( + toolCallArgs?: Record, + toolName = 'read-selected-record-fields', +) { + const invoke = jest.fn().mockResolvedValue({ + tool_calls: toolCallArgs ? [{ name: toolName, args: toolCallArgs, id: 'call_1' }] : undefined, + }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const model = { bindTools } as unknown as ExecutionContext['model']; + + return { model, bindTools, invoke }; +} + +function makeContext( + overrides: Partial> = {}, +): ExecutionContext { + return { + runId: 'run-1', + stepId: 'read-1', + stepIndex: 0, + baseRecordRef: makeRecordRef(), + stepDefinition: makeStep(), + model: makeMockModel({ fieldNames: ['email'] }).model, + agentPort: makeMockAgentPort(), + workflowPort: makeMockWorkflowPort(), + runStore: makeMockRunStore(), + history: [], + remoteTools: [], + ...overrides, + }; +} + +describe('ReadRecordStepExecutor', () => { + describe('single record, single field', () => { + it('reads a single field and returns success', async () => { + const mockModel = makeMockModel({ fieldNames: ['email'] }); + const runStore = makeMockRunStore(); + const context = makeContext({ model: mockModel.model, runStore }); + const executor = new ReadRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + expect.objectContaining({ + type: 'read-record', + stepIndex: 0, + executionParams: { fieldNames: ['email'] }, + executionResult: { + fields: [{ value: 'john@example.com', fieldName: 'email', displayName: 'Email' }], + }, + }), + ); + }); + }); + + describe('single record, multiple fields', () => { + it('reads multiple fields in one call and returns success', async () => { + const mockModel = makeMockModel({ fieldNames: ['email', 'name'] }); + const runStore = makeMockRunStore(); + const context = makeContext({ model: mockModel.model, runStore }); + const executor = new ReadRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + expect.objectContaining({ + executionParams: { fieldNames: ['email', 'name'] }, + executionResult: { + fields: [ + { value: 'john@example.com', fieldName: 'email', displayName: 'Email' }, + { value: 'John Doe', fieldName: 'name', displayName: 'Full Name' }, + ], + }, + }), + ); + }); + }); + + describe('field resolution by displayName', () => { + it('resolves fields by displayName', async () => { + const mockModel = makeMockModel({ fieldNames: ['Full Name'] }); + const runStore = makeMockRunStore(); + const context = makeContext({ model: mockModel.model, runStore }); + const executor = new ReadRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + expect.objectContaining({ + executionParams: { fieldNames: ['name'] }, + executionResult: { + fields: [{ value: 'John Doe', fieldName: 'name', displayName: 'Full Name' }], + }, + }), + ); + }); + }); + + describe('getRecord receives resolved field names', () => { + it('passes resolved field names (not display names) to getRecord', async () => { + const mockModel = makeMockModel({ fieldNames: ['Full Name', 'Email'] }); + const agentPort = makeMockAgentPort(); + const runStore = makeMockRunStore(); + const context = makeContext({ model: mockModel.model, agentPort, runStore }); + const executor = new ReadRecordStepExecutor(context); + + await executor.execute(); + + expect(agentPort.getRecord).toHaveBeenCalledWith('customers', [42], ['name', 'email']); + }); + + it('passes only resolved field names when some fields are unresolved', async () => { + const mockModel = makeMockModel({ fieldNames: ['Email', 'nonexistent'] }); + const agentPort = makeMockAgentPort(); + const runStore = makeMockRunStore(); + const context = makeContext({ model: mockModel.model, agentPort, runStore }); + const executor = new ReadRecordStepExecutor(context); + + await executor.execute(); + + expect(agentPort.getRecord).toHaveBeenCalledWith('customers', [42], ['email']); + }); + + it('returns error when no fields can be resolved', async () => { + const mockModel = makeMockModel({ fieldNames: ['nonexistent', 'unknown'] }); + const agentPort = makeMockAgentPort(); + const runStore = makeMockRunStore(); + const context = makeContext({ model: mockModel.model, agentPort, runStore }); + const executor = new ReadRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + 'None of the requested fields could be resolved: nonexistent, unknown', + ); + expect(agentPort.getRecord).not.toHaveBeenCalled(); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + }); + + describe('field not found', () => { + it('returns error per field without failing globally', async () => { + const mockModel = makeMockModel({ fieldNames: ['email', 'nonexistent'] }); + const runStore = makeMockRunStore(); + const context = makeContext({ model: mockModel.model, runStore }); + const executor = new ReadRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + expect.objectContaining({ + executionResult: { + fields: [ + { value: 'john@example.com', fieldName: 'email', displayName: 'Email' }, + { + error: 'Field not found: nonexistent', + fieldName: 'nonexistent', + displayName: 'nonexistent', + }, + ], + }, + }), + ); + }); + }); + + describe('relationship fields excluded', () => { + it('excludes relationship fields from tool schema', async () => { + const mockModel = makeMockModel({ fieldNames: ['email'] }); + const runStore = makeMockRunStore(); + const context = makeContext({ model: mockModel.model, runStore }); + const executor = new ReadRecordStepExecutor(context); + + await executor.execute(); + + const tool = mockModel.bindTools.mock.calls[0][0][0]; + expect(tool.name).toBe('read-selected-record-fields'); + + // Valid field names (displayNames and fieldNames) should be accepted in an array + expect(tool.schema.parse({ fieldNames: ['Email'] })).toBeTruthy(); + expect(tool.schema.parse({ fieldNames: ['Full Name'] })).toBeTruthy(); + expect(tool.schema.parse({ fieldNames: ['email'] })).toBeTruthy(); + expect(tool.schema.parse({ fieldNames: ['email', 'name'] })).toBeTruthy(); + + // Schema accepts any strings (per-field errors handled in readFieldValues, ISO frontend) + expect(tool.schema.parse({ fieldNames: ['Orders'] })).toBeTruthy(); + + // But rejects non-array values + expect(() => tool.schema.parse({ fieldNames: 'email' })).toThrow(); + }); + }); + + describe('no records available', () => { + it('returns error when no records are available', () => { + const error = new NoRecordsError(); + + expect(error).toBeInstanceOf(NoRecordsError); + expect(error.message).toBe('No records available'); + }); + }); + + describe('no readable fields', () => { + it('returns error when all fields are relationships', async () => { + const schema = makeCollectionSchema({ + fields: [{ fieldName: 'orders', displayName: 'Orders', isRelationship: true }], + }); + const mockModel = makeMockModel({ fieldNames: ['email'] }); + const runStore = makeMockRunStore(); + const workflowPort = makeMockWorkflowPort({ customers: schema }); + const context = makeContext({ model: mockModel.model, runStore, workflowPort }); + const executor = new ReadRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + 'No readable fields on record from collection "customers"', + ); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + }); + + describe('multi-record AI selection', () => { + it('uses AI to select among multiple records then reads fields', async () => { + const baseRecordRef = makeRecordRef({ stepIndex: 1 }); + const relatedRecord = makeRecordRef({ + stepIndex: 2, + recordId: [99], + collectionName: 'orders', + }); + + const ordersSchema = makeCollectionSchema({ + collectionName: 'orders', + collectionDisplayName: 'Orders', + fields: [{ fieldName: 'total', displayName: 'Total', isRelationship: false }], + }); + + // First call: select-record, second call: read-selected-record-fields + const invoke = jest + .fn() + .mockResolvedValueOnce({ + tool_calls: [ + { + name: 'select-record', + args: { recordIdentifier: 'Step 1 - Customers #42' }, + id: 'call_1', + }, + ], + }) + .mockResolvedValueOnce({ + tool_calls: [ + { + name: 'read-selected-record-fields', + args: { fieldNames: ['email'] }, + id: 'call_2', + }, + ], + }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const model = { bindTools } as unknown as ExecutionContext['model']; + + const runStore = makeMockRunStore({ + getStepExecutions: jest + .fn() + .mockResolvedValue([ + { type: 'load-related-record', stepIndex: 2, record: relatedRecord }, + ]), + }); + const workflowPort = makeMockWorkflowPort({ + customers: makeCollectionSchema(), + orders: ordersSchema, + }); + const context = makeContext({ baseRecordRef, model, runStore, workflowPort }); + const executor = new ReadRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + expect(bindTools).toHaveBeenCalledTimes(2); + + // First call: select-record tool + const selectTool = bindTools.mock.calls[0][0][0]; + expect(selectTool.name).toBe('select-record'); + + // Second call: read-selected-record-fields tool + const readTool = bindTools.mock.calls[1][0][0]; + expect(readTool.name).toBe('read-selected-record-fields'); + + // Record selection includes previous steps context + system prompt + user prompt + const selectMessages = invoke.mock.calls[0][0]; + expect(selectMessages).toHaveLength(2); + expect(selectMessages[0].content).toContain('selecting the most relevant record'); + expect(selectMessages[1].content).toContain('Read the customer email'); + + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + expect.objectContaining({ + executionResult: { + fields: [{ value: 'john@example.com', fieldName: 'email', displayName: 'Email' }], + }, + selectedRecordRef: expect.objectContaining({ + recordId: [42], + collectionName: 'customers', + }), + }), + ); + }); + + it('reads fields from the second record when AI selects it', async () => { + const baseRecordRef = makeRecordRef({ stepIndex: 1 }); + const relatedRecord = makeRecordRef({ + stepIndex: 2, + recordId: [99], + collectionName: 'orders', + }); + + const ordersSchema = makeCollectionSchema({ + collectionName: 'orders', + collectionDisplayName: 'Orders', + fields: [{ fieldName: 'total', displayName: 'Total', isRelationship: false }], + }); + + const invoke = jest + .fn() + .mockResolvedValueOnce({ + tool_calls: [ + { + name: 'select-record', + args: { recordIdentifier: 'Step 2 - Orders #99' }, + id: 'call_1', + }, + ], + }) + .mockResolvedValueOnce({ + tool_calls: [ + { name: 'read-selected-record-fields', args: { fieldNames: ['total'] }, id: 'call_2' }, + ], + }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const model = { bindTools } as unknown as ExecutionContext['model']; + + const runStore = makeMockRunStore({ + getStepExecutions: jest + .fn() + .mockResolvedValue([ + { type: 'load-related-record', stepIndex: 2, record: relatedRecord }, + ]), + }); + const workflowPort = makeMockWorkflowPort({ + customers: makeCollectionSchema(), + orders: ordersSchema, + }); + const agentPort = makeMockAgentPort({ + orders: { values: { total: 150 } }, + }); + const context = makeContext({ baseRecordRef, model, runStore, workflowPort, agentPort }); + const executor = new ReadRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + expect.objectContaining({ + executionResult: { + fields: [{ value: 150, fieldName: 'total', displayName: 'Total' }], + }, + selectedRecordRef: expect.objectContaining({ + recordId: [99], + collectionName: 'orders', + }), + }), + ); + }); + + it('includes step index in select-record tool schema when records have stepIndex', async () => { + const baseRecordRef = makeRecordRef({ stepIndex: 3 }); + const relatedRecord = makeRecordRef({ + stepIndex: 5, + recordId: [99], + collectionName: 'orders', + }); + + const ordersSchema = makeCollectionSchema({ + collectionName: 'orders', + collectionDisplayName: 'Orders', + fields: [{ fieldName: 'total', displayName: 'Total', isRelationship: false }], + }); + + const invoke = jest + .fn() + .mockResolvedValueOnce({ + tool_calls: [ + { + name: 'select-record', + args: { recordIdentifier: 'Step 3 - Customers #42' }, + id: 'call_1', + }, + ], + }) + .mockResolvedValueOnce({ + tool_calls: [ + { name: 'read-selected-record-fields', args: { fieldNames: ['email'] }, id: 'call_2' }, + ], + }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const model = { bindTools } as unknown as ExecutionContext['model']; + + const runStore = makeMockRunStore({ + getStepExecutions: jest + .fn() + .mockResolvedValue([ + { type: 'load-related-record', stepIndex: 5, record: relatedRecord }, + ]), + }); + const workflowPort = makeMockWorkflowPort({ + customers: makeCollectionSchema(), + orders: ordersSchema, + }); + const executor = new ReadRecordStepExecutor( + makeContext({ baseRecordRef, model, runStore, workflowPort }), + ); + + await executor.execute(); + + const selectTool = bindTools.mock.calls[0][0][0]; + const schemaShape = selectTool.schema.shape; + // Enum values should include step-prefixed identifiers + expect(schemaShape.recordIdentifier.options).toEqual([ + 'Step 3 - Customers #42', + 'Step 5 - Orders #99', + ]); + }); + }); + + describe('AI record selection failure', () => { + it('returns error when AI selects a non-existent record identifier', async () => { + const baseRecordRef = makeRecordRef(); + const relatedRecord = makeRecordRef({ + stepIndex: 1, + recordId: [99], + collectionName: 'orders', + }); + + const ordersSchema = makeCollectionSchema({ + collectionName: 'orders', + collectionDisplayName: 'Orders', + fields: [{ fieldName: 'total', displayName: 'Total', isRelationship: false }], + }); + + const invoke = jest.fn().mockResolvedValueOnce({ + tool_calls: [ + { name: 'select-record', args: { recordIdentifier: 'NonExistent #999' }, id: 'call_1' }, + ], + }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const model = { bindTools } as unknown as ExecutionContext['model']; + + const runStore = makeMockRunStore({ + getStepExecutions: jest + .fn() + .mockResolvedValue([ + { type: 'load-related-record', stepIndex: 1, record: relatedRecord }, + ]), + }); + const workflowPort = makeMockWorkflowPort({ + customers: makeCollectionSchema(), + orders: ordersSchema, + }); + const context = makeContext({ baseRecordRef, model, runStore, workflowPort }); + const executor = new ReadRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + 'AI selected record "NonExistent #999" which does not match any available record', + ); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + }); + + describe('agentPort.getRecord error', () => { + it('returns error when agentPort.getRecord throws a WorkflowExecutorError', async () => { + const agentPort = makeMockAgentPort(); + (agentPort.getRecord as jest.Mock).mockRejectedValue( + new RecordNotFoundError('customers', '42'), + ); + const mockModel = makeMockModel({ fieldNames: ['email'] }); + const runStore = makeMockRunStore(); + const context = makeContext({ model: mockModel.model, runStore, agentPort }); + const executor = new ReadRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe('Record not found: collection "customers", id "42"'); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + + it('lets infrastructure errors propagate', async () => { + const agentPort = makeMockAgentPort(); + (agentPort.getRecord as jest.Mock).mockRejectedValue(new Error('Connection refused')); + const mockModel = makeMockModel({ fieldNames: ['email'] }); + const context = makeContext({ model: mockModel.model, agentPort }); + const executor = new ReadRecordStepExecutor(context); + + await expect(executor.execute()).rejects.toThrow('Connection refused'); + }); + }); + + describe('model error', () => { + it('lets non-WorkflowExecutorError propagate from AI invocation', async () => { + const invoke = jest.fn().mockRejectedValue(new Error('API timeout')); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const context = makeContext({ + model: { bindTools } as unknown as ExecutionContext['model'], + }); + const executor = new ReadRecordStepExecutor(context); + + await expect(executor.execute()).rejects.toThrow('API timeout'); + }); + }); + + describe('malformed tool call', () => { + it('returns error status on malformed tool call', async () => { + const invoke = jest.fn().mockResolvedValue({ + tool_calls: [], + invalid_tool_calls: [ + { name: 'read-selected-record-fields', args: '{bad json', error: 'JSON parse error' }, + ], + }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const runStore = makeMockRunStore(); + const context = makeContext({ + model: { bindTools } as unknown as ExecutionContext['model'], + runStore, + }); + const executor = new ReadRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + 'AI returned a malformed tool call for "read-selected-record-fields": JSON parse error', + ); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + + it('returns error status when AI returns no tool call at all', async () => { + const invoke = jest.fn().mockResolvedValue({ tool_calls: [] }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const runStore = makeMockRunStore(); + const context = makeContext({ + model: { bindTools } as unknown as ExecutionContext['model'], + runStore, + }); + const executor = new ReadRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe('AI did not return a tool call'); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + }); + + describe('RunStore error propagation', () => { + it('lets saveStepExecution errors propagate', async () => { + const mockModel = makeMockModel({ fieldNames: ['email'] }); + const runStore = makeMockRunStore({ + saveStepExecution: jest.fn().mockRejectedValue(new Error('Storage full')), + }); + const context = makeContext({ model: mockModel.model, runStore }); + const executor = new ReadRecordStepExecutor(context); + + await expect(executor.execute()).rejects.toThrow('Storage full'); + }); + + it('lets getStepExecutions errors propagate', async () => { + const mockModel = makeMockModel({ fieldNames: ['email'] }); + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockRejectedValue(new Error('Connection lost')), + }); + const context = makeContext({ model: mockModel.model, runStore }); + const executor = new ReadRecordStepExecutor(context); + + await expect(executor.execute()).rejects.toThrow('Connection lost'); + }); + }); + + describe('previous steps context', () => { + it('includes previous steps summary in read-field messages', async () => { + const mockModel = makeMockModel({ fieldNames: ['email'] }); + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([ + { + type: 'condition', + stepIndex: 0, + executionParams: { answer: 'Yes', reasoning: 'Approved' }, + }, + ]), + }); + const context = makeContext({ + model: mockModel.model, + runStore, + history: [ + { + stepDefinition: { + type: StepType.Condition, + options: ['Yes', 'No'], + prompt: 'Should we proceed?', + }, + stepOutcome: { + type: 'condition', + stepId: 'prev-step', + stepIndex: 0, + status: 'success', + }, + }, + ], + }); + const executor = new ReadRecordStepExecutor({ + ...context, + stepId: 'read-2', + stepIndex: 1, + }); + + await executor.execute(); + + const messages = mockModel.invoke.mock.calls[0][0]; + // previous steps summary + system prompt + collection info + human message = 4 + expect(messages).toHaveLength(4); + expect(messages[0].content).toContain('Should we proceed?'); + expect(messages[0].content).toContain('"answer":"Yes"'); + expect(messages[1].content).toContain('reading fields from a record'); + }); + }); + + describe('default prompt', () => { + it('uses default prompt when step.prompt is undefined', async () => { + const mockModel = makeMockModel({ fieldNames: ['email'] }); + const context = makeContext({ + model: mockModel.model, + stepDefinition: makeStep({ prompt: undefined }), + }); + const executor = new ReadRecordStepExecutor(context); + + await executor.execute(); + + const messages = mockModel.invoke.mock.calls[0][0]; + const humanMessage = messages[messages.length - 1]; + expect(humanMessage.content).toBe('**Request**: Read the relevant fields.'); + }); + }); + + describe('saveStepExecution arguments', () => { + it('saves executionParams, executionResult, and selectedRecord', async () => { + const mockModel = makeMockModel({ fieldNames: ['email', 'name'] }); + const runStore = makeMockRunStore(); + const context = makeContext({ + model: mockModel.model, + runStore, + stepIndex: 3, + }); + const executor = new ReadRecordStepExecutor(context); + + await executor.execute(); + + expect(runStore.saveStepExecution).toHaveBeenCalledWith({ + type: 'read-record', + stepIndex: 3, + executionParams: { fieldNames: ['email', 'name'] }, + executionResult: { + fields: [ + { value: 'john@example.com', fieldName: 'email', displayName: 'Email' }, + { value: 'John Doe', fieldName: 'name', displayName: 'Full Name' }, + ], + }, + selectedRecordRef: { + collectionName: 'customers', + recordId: [42], + stepIndex: 0, + }, + }); + }); + }); +}); From 613ec1ba1df5d840a4342e401ed0044743d01abc Mon Sep 17 00:00:00 2001 From: Matthieu Date: Fri, 20 Mar 2026 09:27:03 +0100 Subject: [PATCH 10/18] feat(workflow-executor): add HTTP server and WorkflowRunner scaffold (#1500) --- packages/workflow-executor/CLAUDE.md | 6 + packages/workflow-executor/package.json | 7 + .../src/executors/base-step-executor.ts | 2 +- .../src/executors/condition-step-executor.ts | 2 +- .../executors/read-record-step-executor.ts | 4 +- .../src/http/executor-http-server.ts | 88 +++++++++++++ packages/workflow-executor/src/index.ts | 4 + .../workflow-executor/src/ports/run-store.ts | 4 +- packages/workflow-executor/src/runner.ts | 52 ++++++++ .../test/executors/base-step-executor.test.ts | 18 +-- .../executors/condition-step-executor.test.ts | 4 +- .../read-record-step-executor.test.ts | 8 +- .../test/http/executor-http-server.test.ts | 122 ++++++++++++++++++ .../workflow-executor/test/runner.test.ts | 96 ++++++++++++++ 14 files changed, 400 insertions(+), 17 deletions(-) create mode 100644 packages/workflow-executor/src/http/executor-http-server.ts create mode 100644 packages/workflow-executor/src/runner.ts create mode 100644 packages/workflow-executor/test/http/executor-http-server.test.ts create mode 100644 packages/workflow-executor/test/runner.test.ts diff --git a/packages/workflow-executor/CLAUDE.md b/packages/workflow-executor/CLAUDE.md index bff9a84141..333bfdee1a 100644 --- a/packages/workflow-executor/CLAUDE.md +++ b/packages/workflow-executor/CLAUDE.md @@ -43,6 +43,7 @@ Front ◀──▶ Orchestrator ◀──pull/push──▶ Executor ── ``` src/ ├── errors.ts # WorkflowExecutorError, MissingToolCallError, MalformedToolCallError, NoRecordsError, NoReadableFieldsError +├── runner.ts # Runner class — main entry point (start/stop/triggerPoll, HTTP server wiring) ├── types/ # Core type definitions (@draft) │ ├── step-definition.ts # StepType enum + step definition interfaces │ ├── step-outcome.ts # Step outcome tracking types (StepOutcome, sent to orchestrator) @@ -53,10 +54,15 @@ src/ │ ├── agent-port.ts # Interface to the Forest Admin agent (datasource) │ ├── workflow-port.ts # Interface to the orchestrator │ └── run-store.ts # Interface for persisting run state (scoped to a run) +├── adapters/ # Port implementations +│ ├── agent-client-agent-port.ts # AgentPort via @forestadmin/agent-client +│ └── forest-server-workflow-port.ts # WorkflowPort via HTTP (forestadmin-client ServerUtils) ├── executors/ # Step executor implementations │ ├── base-step-executor.ts # Abstract base class (context injection + shared helpers) │ ├── condition-step-executor.ts # AI-powered condition step (chooses among options) │ └── read-record-step-executor.ts # AI-powered record field reading step +├── http/ # HTTP server (optional, for frontend data access) +│ └── executor-http-server.ts # Koa server: GET /runs/:runId, POST /runs/:runId/trigger └── index.ts # Barrel exports ``` diff --git a/packages/workflow-executor/package.json b/packages/workflow-executor/package.json index 3138b9a5d9..cf94a502ea 100644 --- a/packages/workflow-executor/package.json +++ b/packages/workflow-executor/package.json @@ -25,7 +25,14 @@ "dependencies": { "@forestadmin/agent-client": "1.4.13", "@forestadmin/forestadmin-client": "1.37.17", + "@koa/router": "^13.1.0", "@langchain/core": "1.1.33", + "koa": "^3.0.1", "zod": "4.3.6" + }, + "devDependencies": { + "@types/koa": "^2.13.5", + "@types/koa__router": "^12.0.4", + "supertest": "^7.1.3" } } diff --git a/packages/workflow-executor/src/executors/base-step-executor.ts b/packages/workflow-executor/src/executors/base-step-executor.ts index ed9de5cb39..2197843be8 100644 --- a/packages/workflow-executor/src/executors/base-step-executor.ts +++ b/packages/workflow-executor/src/executors/base-step-executor.ts @@ -38,7 +38,7 @@ export default abstract class BaseStepExecutor { - const allStepExecutions = await this.context.runStore.getStepExecutions(); + const allStepExecutions = await this.context.runStore.getStepExecutions(this.context.runId); return this.context.history .map(({ stepDefinition, stepOutcome }) => { diff --git a/packages/workflow-executor/src/executors/condition-step-executor.ts b/packages/workflow-executor/src/executors/condition-step-executor.ts index ee4a60f830..217abdcff4 100644 --- a/packages/workflow-executor/src/executors/condition-step-executor.ts +++ b/packages/workflow-executor/src/executors/condition-step-executor.ts @@ -80,7 +80,7 @@ export default class ConditionStepExecutor extends BaseStepExecutor f.fieldName) }, @@ -203,7 +203,7 @@ export default class ReadRecordStepExecutor extends BaseStepExecutor { - const stepExecutions = await this.context.runStore.getStepExecutions(); + const stepExecutions = await this.context.runStore.getStepExecutions(this.context.runId); const relatedRecords = stepExecutions .filter((e): e is LoadRelatedRecordStepExecutionData => e.type === 'load-related-record') .map(e => e.record); diff --git a/packages/workflow-executor/src/http/executor-http-server.ts b/packages/workflow-executor/src/http/executor-http-server.ts new file mode 100644 index 0000000000..72ea42936e --- /dev/null +++ b/packages/workflow-executor/src/http/executor-http-server.ts @@ -0,0 +1,88 @@ +import type { RunStore } from '../ports/run-store'; +import type Runner from '../runner'; +import type { Server } from 'http'; + +import Router from '@koa/router'; +import http from 'http'; +import Koa from 'koa'; + +export interface ExecutorHttpServerOptions { + port: number; + runStore: RunStore; + runner: Runner; +} + +export default class ExecutorHttpServer { + private readonly app: Koa; + private readonly options: ExecutorHttpServerOptions; + private server: Server | null = null; + + constructor(options: ExecutorHttpServerOptions) { + this.options = options; + this.app = new Koa(); + + // Error middleware — catches all async handler errors and returns structured JSON + this.app.use(async (ctx, next) => { + try { + await next(); + } catch (err: unknown) { + ctx.status = 500; + ctx.body = { error: err instanceof Error ? err.message : 'Internal server error' }; + } + }); + + const router = new Router(); + router.get('/runs/:runId', this.handleGetRun.bind(this)); + router.post('/runs/:runId/trigger', this.handleTrigger.bind(this)); + + this.app.use(router.routes()); + this.app.use(router.allowedMethods()); + } + + async start(): Promise { + return new Promise((resolve, reject) => { + this.server = http.createServer(this.app.callback()); + this.server.once('error', reject); + this.server.listen(this.options.port, resolve); + }); + } + + async stop(): Promise { + return new Promise((resolve, reject) => { + if (!this.server) { + resolve(); + + return; + } + + this.server.close(err => { + if (err) { + reject(err); + } else { + this.server = null; + resolve(); + } + }); + }); + } + + get callback() { + return this.app.callback(); + } + + private async handleGetRun(ctx: Koa.Context): Promise { + const { runId } = ctx.params; + const steps = await this.options.runStore.getStepExecutions(runId); + + ctx.body = { steps }; + } + + private async handleTrigger(ctx: Koa.Context): Promise { + const { runId } = ctx.params; + + await this.options.runner.triggerPoll(runId); + + ctx.status = 200; + ctx.body = { triggered: true }; + } +} diff --git a/packages/workflow-executor/src/index.ts b/packages/workflow-executor/src/index.ts index 16c054cfdd..916bbc0751 100644 --- a/packages/workflow-executor/src/index.ts +++ b/packages/workflow-executor/src/index.ts @@ -60,3 +60,7 @@ export { default as ConditionStepExecutor } from './executors/condition-step-exe export { default as ReadRecordStepExecutor } from './executors/read-record-step-executor'; export { default as AgentClientAgentPort } from './adapters/agent-client-agent-port'; export { default as ForestServerWorkflowPort } from './adapters/forest-server-workflow-port'; +export { default as ExecutorHttpServer } from './http/executor-http-server'; +export type { ExecutorHttpServerOptions } from './http/executor-http-server'; +export { default as Runner } from './runner'; +export type { RunnerConfig } from './runner'; diff --git a/packages/workflow-executor/src/ports/run-store.ts b/packages/workflow-executor/src/ports/run-store.ts index 6b899da848..426e826a9f 100644 --- a/packages/workflow-executor/src/ports/run-store.ts +++ b/packages/workflow-executor/src/ports/run-store.ts @@ -3,6 +3,6 @@ import type { StepExecutionData } from '../types/step-execution-data'; export interface RunStore { - getStepExecutions(): Promise; - saveStepExecution(stepExecution: StepExecutionData): Promise; + getStepExecutions(runId: string): Promise; + saveStepExecution(runId: string, stepExecution: StepExecutionData): Promise; } diff --git a/packages/workflow-executor/src/runner.ts b/packages/workflow-executor/src/runner.ts new file mode 100644 index 0000000000..652772c71c --- /dev/null +++ b/packages/workflow-executor/src/runner.ts @@ -0,0 +1,52 @@ +// TODO: implement polling loop, execution dispatch, AI wiring (see spec section 4.1) + +import type { AgentPort } from './ports/agent-port'; +import type { RunStore } from './ports/run-store'; +import type { WorkflowPort } from './ports/workflow-port'; + +import ExecutorHttpServer from './http/executor-http-server'; + +export interface RunnerConfig { + agentPort: AgentPort; + workflowPort: WorkflowPort; + runStore: RunStore; + pollingIntervalMs: number; + httpPort?: number; +} + +export default class Runner { + private readonly config: RunnerConfig; + private httpServer: ExecutorHttpServer | null = null; + + constructor(config: RunnerConfig) { + this.config = config; + } + + async start(): Promise { + if (this.config.httpPort !== undefined && !this.httpServer) { + const server = new ExecutorHttpServer({ + port: this.config.httpPort, + runStore: this.config.runStore, + runner: this, + }); + await server.start(); + this.httpServer = server; + } + + // TODO: start polling loop + } + + async stop(): Promise { + if (this.httpServer) { + await this.httpServer.stop(); + this.httpServer = null; + } + + // TODO: stop polling loop, close connections + } + + // eslint-disable-next-line class-methods-use-this, @typescript-eslint/no-unused-vars + async triggerPoll(_runId: string): Promise { + // TODO: trigger immediate poll cycle for this runId + } +} diff --git a/packages/workflow-executor/test/executors/base-step-executor.test.ts b/packages/workflow-executor/test/executors/base-step-executor.test.ts index 4d79c03cf1..86491fbb8f 100644 --- a/packages/workflow-executor/test/executors/base-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/base-step-executor.test.ts @@ -88,17 +88,18 @@ describe('BaseStepExecutor', () => { }); it('includes prompt and executionParams from previous steps', async () => { + const runStore = makeMockRunStore([ + { + type: 'condition', + stepIndex: 0, + executionParams: { answer: 'Yes', reasoning: 'Order is valid' }, + executionResult: { answer: 'Yes' }, + }, + ]); const executor = new TestableExecutor( makeContext({ history: [makeHistoryEntry({ stepId: 'cond-1', stepIndex: 0, prompt: 'Approve?' })], - runStore: makeMockRunStore([ - { - type: 'condition', - stepIndex: 0, - executionParams: { answer: 'Yes', reasoning: 'Order is valid' }, - executionResult: { answer: 'Yes' }, - }, - ]), + runStore, }), ); @@ -110,6 +111,7 @@ describe('BaseStepExecutor', () => { expect(result).toContain('Prompt: Approve?'); expect(result).toContain('Input: {"answer":"Yes","reasoning":"Order is valid"}'); expect(result).toContain('Output: {"answer":"Yes"}'); + expect(runStore.getStepExecutions).toHaveBeenCalledWith('run-1'); }); it('uses Input for matched steps and History for unmatched steps', async () => { diff --git a/packages/workflow-executor/test/executors/condition-step-executor.test.ts b/packages/workflow-executor/test/executors/condition-step-executor.test.ts index b41cf35bc5..23eb6c8365 100644 --- a/packages/workflow-executor/test/executors/condition-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/condition-step-executor.test.ts @@ -84,7 +84,7 @@ describe('ConditionStepExecutor', () => { { tool_choice: 'any' }, ); - expect(runStore.saveStepExecution).toHaveBeenCalledWith({ + expect(runStore.saveStepExecution).toHaveBeenCalledWith('run-1', { type: 'condition', stepIndex: 0, executionParams: { answer: 'Reject', reasoning: 'The request is incomplete' }, @@ -227,7 +227,7 @@ describe('ConditionStepExecutor', () => { expect(result.stepOutcome.status).toBe('manual-decision'); expect(result.stepOutcome.error).toBeUndefined(); expect((result.stepOutcome as ConditionStepOutcome).selectedOption).toBeUndefined(); - expect(runStore.saveStepExecution).toHaveBeenCalledWith({ + expect(runStore.saveStepExecution).toHaveBeenCalledWith('run-1', { type: 'condition', stepIndex: 0, executionParams: { answer: null, reasoning: 'None apply' }, diff --git a/packages/workflow-executor/test/executors/read-record-step-executor.test.ts b/packages/workflow-executor/test/executors/read-record-step-executor.test.ts index 7435600666..eb9c3bc5de 100644 --- a/packages/workflow-executor/test/executors/read-record-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/read-record-step-executor.test.ts @@ -129,6 +129,7 @@ describe('ReadRecordStepExecutor', () => { expect(result.stepOutcome.status).toBe('success'); expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', expect.objectContaining({ type: 'read-record', stepIndex: 0, @@ -152,6 +153,7 @@ describe('ReadRecordStepExecutor', () => { expect(result.stepOutcome.status).toBe('success'); expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', expect.objectContaining({ executionParams: { fieldNames: ['email', 'name'] }, executionResult: { @@ -176,6 +178,7 @@ describe('ReadRecordStepExecutor', () => { expect(result.stepOutcome.status).toBe('success'); expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', expect.objectContaining({ executionParams: { fieldNames: ['name'] }, executionResult: { @@ -240,6 +243,7 @@ describe('ReadRecordStepExecutor', () => { expect(result.stepOutcome.status).toBe('success'); expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', expect.objectContaining({ executionResult: { fields: [ @@ -385,6 +389,7 @@ describe('ReadRecordStepExecutor', () => { expect(selectMessages[1].content).toContain('Read the customer email'); expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', expect.objectContaining({ executionResult: { fields: [{ value: 'john@example.com', fieldName: 'email', displayName: 'Email' }], @@ -451,6 +456,7 @@ describe('ReadRecordStepExecutor', () => { expect(result.stepOutcome.status).toBe('success'); expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', expect.objectContaining({ executionResult: { fields: [{ value: 150, fieldName: 'total', displayName: 'Total' }], @@ -757,7 +763,7 @@ describe('ReadRecordStepExecutor', () => { await executor.execute(); - expect(runStore.saveStepExecution).toHaveBeenCalledWith({ + expect(runStore.saveStepExecution).toHaveBeenCalledWith('run-1', { type: 'read-record', stepIndex: 3, executionParams: { fieldNames: ['email', 'name'] }, diff --git a/packages/workflow-executor/test/http/executor-http-server.test.ts b/packages/workflow-executor/test/http/executor-http-server.test.ts new file mode 100644 index 0000000000..f4d415b4c8 --- /dev/null +++ b/packages/workflow-executor/test/http/executor-http-server.test.ts @@ -0,0 +1,122 @@ +import type { RunStore } from '../../src/ports/run-store'; +import type Runner from '../../src/runner'; + +import request from 'supertest'; + +import ExecutorHttpServer from '../../src/http/executor-http-server'; + +function createMockRunStore(overrides: Partial = {}): RunStore { + return { + getStepExecutions: jest.fn().mockResolvedValue([]), + saveStepExecution: jest.fn().mockResolvedValue(undefined), + ...overrides, + }; +} + +function createMockRunner(overrides: Partial = {}): Runner { + return { + start: jest.fn().mockResolvedValue(undefined), + stop: jest.fn().mockResolvedValue(undefined), + triggerPoll: jest.fn().mockResolvedValue(undefined), + ...overrides, + } as unknown as Runner; +} + +describe('ExecutorHttpServer', () => { + describe('GET /runs/:runId', () => { + it('should return steps from the run store', async () => { + const steps = [{ type: 'condition' as const, stepIndex: 0 }]; + + const runStore = createMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue(steps), + }); + + const server = new ExecutorHttpServer({ + port: 0, + runStore, + runner: createMockRunner(), + }); + + const response = await request(server.callback).get('/runs/run-1'); + + expect(response.status).toBe(200); + expect(response.body).toEqual({ steps }); + expect(runStore.getStepExecutions).toHaveBeenCalledWith('run-1'); + }); + + it('should return 500 when getStepExecutions rejects', async () => { + const runStore = createMockRunStore({ + getStepExecutions: jest.fn().mockRejectedValue(new Error('db error')), + }); + + const server = new ExecutorHttpServer({ + port: 0, + runStore, + runner: createMockRunner(), + }); + + const response = await request(server.callback).get('/runs/run-1'); + + expect(response.status).toBe(500); + expect(response.body).toEqual({ error: 'db error' }); + }); + }); + + describe('POST /runs/:runId/trigger', () => { + it('should call runner.triggerPoll with the runId', async () => { + const runner = createMockRunner(); + + const server = new ExecutorHttpServer({ + port: 0, + runStore: createMockRunStore(), + runner, + }); + + const response = await request(server.callback).post('/runs/run-1/trigger'); + + expect(response.status).toBe(200); + expect(response.body).toEqual({ triggered: true }); + expect(runner.triggerPoll).toHaveBeenCalledWith('run-1'); + }); + + it('should propagate errors from runner', async () => { + const runner = createMockRunner({ + triggerPoll: jest.fn().mockRejectedValue(new Error('poll failed')), + }); + + const server = new ExecutorHttpServer({ + port: 0, + runStore: createMockRunStore(), + runner, + }); + + const response = await request(server.callback).post('/runs/run-1/trigger'); + + expect(response.status).toBe(500); + expect(response.body).toEqual({ error: 'poll failed' }); + }); + }); + + describe('start / stop', () => { + it('should start and stop the server', async () => { + const server = new ExecutorHttpServer({ + port: 0, + runStore: createMockRunStore(), + runner: createMockRunner(), + }); + + await server.start(); + await expect(server.stop()).resolves.toBeUndefined(); + }); + + it('should handle stop when not started', async () => { + const server = new ExecutorHttpServer({ + port: 0, + runStore: createMockRunStore(), + runner: createMockRunner(), + }); + + await expect(server.stop()).resolves.toBeUndefined(); + }); + }); +}); diff --git a/packages/workflow-executor/test/runner.test.ts b/packages/workflow-executor/test/runner.test.ts new file mode 100644 index 0000000000..0ea16bd276 --- /dev/null +++ b/packages/workflow-executor/test/runner.test.ts @@ -0,0 +1,96 @@ +import type { AgentPort } from '../src/ports/agent-port'; +import type { RunStore } from '../src/ports/run-store'; +import type { WorkflowPort } from '../src/ports/workflow-port'; + +import ExecutorHttpServer from '../src/http/executor-http-server'; +import Runner from '../src/runner'; + +jest.mock('../src/http/executor-http-server'); + +const MockedExecutorHttpServer = ExecutorHttpServer as jest.MockedClass; + +function createRunnerConfig(overrides: { httpPort?: number } = {}) { + return { + agentPort: {} as AgentPort, + workflowPort: {} as WorkflowPort, + runStore: {} as RunStore, + pollingIntervalMs: 2000, + ...overrides, + }; +} + +describe('Runner', () => { + beforeEach(() => { + jest.clearAllMocks(); + MockedExecutorHttpServer.prototype.start = jest.fn().mockResolvedValue(undefined); + MockedExecutorHttpServer.prototype.stop = jest.fn().mockResolvedValue(undefined); + }); + + describe('start', () => { + it('should start the HTTP server when httpPort is configured', async () => { + const config = createRunnerConfig({ httpPort: 3100 }); + const runner = new Runner(config); + + await runner.start(); + + expect(MockedExecutorHttpServer).toHaveBeenCalledWith({ + port: 3100, + runStore: config.runStore, + runner, + }); + expect(MockedExecutorHttpServer.prototype.start).toHaveBeenCalled(); + }); + + it('should not start the HTTP server when httpPort is not configured', async () => { + const runner = new Runner(createRunnerConfig()); + + await runner.start(); + + expect(MockedExecutorHttpServer).not.toHaveBeenCalled(); + }); + + it('should not create a second HTTP server if already started', async () => { + const runner = new Runner(createRunnerConfig({ httpPort: 3100 })); + + await runner.start(); + await runner.start(); + + expect(MockedExecutorHttpServer).toHaveBeenCalledTimes(1); + }); + }); + + describe('stop', () => { + it('should stop the HTTP server when running', async () => { + const runner = new Runner(createRunnerConfig({ httpPort: 3100 })); + + await runner.start(); + await runner.stop(); + + expect(MockedExecutorHttpServer.prototype.stop).toHaveBeenCalled(); + }); + + it('should handle stop when no HTTP server is running', async () => { + const runner = new Runner(createRunnerConfig()); + + await expect(runner.stop()).resolves.toBeUndefined(); + }); + + it('should allow restarting after stop', async () => { + const runner = new Runner(createRunnerConfig({ httpPort: 3100 })); + + await runner.start(); + await runner.stop(); + await runner.start(); + + expect(MockedExecutorHttpServer).toHaveBeenCalledTimes(2); + }); + }); + + describe('triggerPoll', () => { + it('should resolve without error', async () => { + const runner = new Runner(createRunnerConfig()); + + await expect(runner.triggerPoll('run-1')).resolves.toBeUndefined(); + }); + }); +}); From 39de72a6db4f37f6d55302b7b7aa71e4be433da0 Mon Sep 17 00:00:00 2001 From: scra Date: Tue, 24 Mar 2026 11:41:28 +0100 Subject: [PATCH 11/18] refactor(workflow-executor): workflow steps (#1502) --- WORKFLOW-EXECUTOR-CONTRACT.md | 232 +++ packages/ai-proxy/src/index.ts | 7 + packages/ai-proxy/test/ai-client.test.ts | 12 +- packages/ai-proxy/test/errors.test.ts | 2 +- .../ai-proxy/test/langchain-adapter.test.ts | 9 +- .../ai-proxy/test/provider-dispatcher.test.ts | 8 +- packages/workflow-executor/CLAUDE.md | 11 +- packages/workflow-executor/jest.config.ts | 9 + packages/workflow-executor/package.json | 2 +- .../src/adapters/agent-client-agent-port.ts | 77 +- .../src/adapters/console-logger.ts | 7 + .../adapters/forest-server-workflow-port.ts | 10 + packages/workflow-executor/src/errors.ts | 177 +- .../src/executors/base-step-executor.ts | 275 ++- .../src/executors/condition-step-executor.ts | 76 +- .../load-related-record-step-executor.ts | 413 +++++ .../src/executors/mcp-task-step-executor.ts | 202 +++ .../executors/read-record-step-executor.ts | 177 +- .../executors/record-task-step-executor.ts | 23 + .../src/executors/safe-agent-port.ts | 39 + .../src/executors/step-executor-factory.ts | 109 ++ .../summary/step-execution-formatters.ts | 60 + .../executors/summary/step-summary-builder.ts | 43 + .../trigger-record-action-step-executor.ts | 164 ++ .../executors/update-record-step-executor.ts | 175 ++ .../src/http/executor-http-server.ts | 25 +- packages/workflow-executor/src/index.ts | 50 +- .../workflow-executor/src/ports/agent-port.ts | 41 +- .../src/ports/logger-port.ts | 3 + .../src/ports/workflow-port.ts | 5 +- packages/workflow-executor/src/runner.ts | 164 +- .../workflow-executor/src/types/execution.ts | 16 +- .../workflow-executor/src/types/record.ts | 2 + .../src/types/step-definition.ts | 21 +- .../src/types/step-execution-data.ts | 123 +- .../src/types/step-outcome.ts | 30 +- .../adapters/agent-client-agent-port.test.ts | 122 +- .../forest-server-workflow-port.test.ts | 34 + .../test/executors/base-step-executor.test.ts | 395 ++--- .../executors/condition-step-executor.test.ts | 23 +- .../load-related-record-step-executor.test.ts | 1497 +++++++++++++++++ .../executors/mcp-task-step-executor.test.ts | 726 ++++++++ .../read-record-step-executor.test.ts | 201 ++- .../test/executors/safe-agent-port.test.ts | 173 ++ .../step-execution-formatters.test.ts | 141 ++ .../executors/step-summary-builder.test.ts | 284 ++++ ...rigger-record-action-step-executor.test.ts | 920 ++++++++++ .../update-record-step-executor.test.ts | 875 ++++++++++ .../test/http/executor-http-server.test.ts | 26 +- packages/workflow-executor/test/index.test.ts | 5 +- .../workflow-executor/test/runner.test.ts | 792 ++++++++- .../test/types/step-outcome.test.ts | 32 + yarn.lock | 39 - 53 files changed, 8238 insertions(+), 846 deletions(-) create mode 100644 WORKFLOW-EXECUTOR-CONTRACT.md create mode 100644 packages/workflow-executor/src/adapters/console-logger.ts create mode 100644 packages/workflow-executor/src/executors/load-related-record-step-executor.ts create mode 100644 packages/workflow-executor/src/executors/mcp-task-step-executor.ts create mode 100644 packages/workflow-executor/src/executors/record-task-step-executor.ts create mode 100644 packages/workflow-executor/src/executors/safe-agent-port.ts create mode 100644 packages/workflow-executor/src/executors/step-executor-factory.ts create mode 100644 packages/workflow-executor/src/executors/summary/step-execution-formatters.ts create mode 100644 packages/workflow-executor/src/executors/summary/step-summary-builder.ts create mode 100644 packages/workflow-executor/src/executors/trigger-record-action-step-executor.ts create mode 100644 packages/workflow-executor/src/executors/update-record-step-executor.ts create mode 100644 packages/workflow-executor/src/ports/logger-port.ts create mode 100644 packages/workflow-executor/test/executors/load-related-record-step-executor.test.ts create mode 100644 packages/workflow-executor/test/executors/mcp-task-step-executor.test.ts create mode 100644 packages/workflow-executor/test/executors/safe-agent-port.test.ts create mode 100644 packages/workflow-executor/test/executors/step-execution-formatters.test.ts create mode 100644 packages/workflow-executor/test/executors/step-summary-builder.test.ts create mode 100644 packages/workflow-executor/test/executors/trigger-record-action-step-executor.test.ts create mode 100644 packages/workflow-executor/test/executors/update-record-step-executor.test.ts create mode 100644 packages/workflow-executor/test/types/step-outcome.test.ts diff --git a/WORKFLOW-EXECUTOR-CONTRACT.md b/WORKFLOW-EXECUTOR-CONTRACT.md new file mode 100644 index 0000000000..1313a0b13f --- /dev/null +++ b/WORKFLOW-EXECUTOR-CONTRACT.md @@ -0,0 +1,232 @@ +# Workflow Executor — Contract Types + +> Types exchanged between the **orchestrator (server)**, the **executor (agent-nodejs)**, and the **frontend**. +> Last updated: 2026-03-24 + +--- + +## 1. Polling + +**`GET /liana/v1/workflow-step-executions/pending?runId=`** + +The executor polls for the current pending step of a run. The server must return **one object** (not an array), or `null` if the run is not found. + +```typescript +interface PendingStepExecution { + runId: string; + stepId: string; + stepIndex: number; + baseRecordRef: RecordRef; + stepDefinition: StepDefinition; + previousSteps: Step[]; + userConfirmed?: boolean; // true = user confirmed a pending action on this step +} +``` + +> **`null` response** → executor throws `RunNotFoundError` → HTTP 404 returned to caller. + +### RecordRef + +Lightweight pointer to a specific record. + +```typescript +interface RecordRef { + collectionName: string; + recordId: Array; + stepIndex: number; // index of the workflow step that loaded this record +} +``` + +### Step + +History entry for an already-executed step (used in `previousSteps`). + +```typescript +interface Step { + stepDefinition: StepDefinition; + stepOutcome: StepOutcome; +} +``` + +### StepDefinition + +Discriminated union on `type`. + +```typescript +type StepDefinition = + | ConditionStepDefinition + | RecordTaskStepDefinition + | McpTaskStepDefinition; + +interface ConditionStepDefinition { + type: "condition"; + options: [string, ...string[]]; // at least one option required + prompt?: string; + aiConfigName?: string; +} + +interface RecordTaskStepDefinition { + type: "read-record" + | "update-record" + | "trigger-action" + | "load-related-record"; + prompt?: string; + aiConfigName?: string; + automaticExecution?: boolean; +} + +interface McpTaskStepDefinition { + type: "mcp-task"; + mcpServerId?: string; + prompt?: string; + aiConfigName?: string; + automaticExecution?: boolean; +} +``` + +### StepOutcome + +What the executor previously reported for each past step (used in `previousSteps`). + +```typescript +type StepOutcome = + | ConditionStepOutcome + | RecordTaskStepOutcome + | McpTaskStepOutcome; + +interface ConditionStepOutcome { + type: "condition"; + stepId: string; + stepIndex: number; + status: "success" | "error" | "manual-decision"; + selectedOption?: string; // present when status = "success" + error?: string; // present when status = "error" +} + +interface RecordTaskStepOutcome { + type: "record-task"; + stepId: string; + stepIndex: number; + status: "success" | "error" | "awaiting-input"; + error?: string; // present when status = "error" +} + +interface McpTaskStepOutcome { + type: "mcp-task"; + stepId: string; + stepIndex: number; + status: "success" | "error" | "awaiting-input"; + error?: string; // present when status = "error" +} +``` + +--- + +## 2. Step Result + +**`POST /liana/v1/workflow-step-executions//complete`** + +After executing a step, the executor posts the outcome back to the server. The body is one of the `StepOutcome` shapes above. + +> ⚠️ **NEVER contains client data** (field values, AI reasoning, etc.) — those stay in the `RunStore` on the client side. + +--- + +## 3. Pending Data + +Steps that require user input pause with `status: "awaiting-input"`. The frontend writes `pendingData` to unblock them via a dedicated endpoint on the executor HTTP server. + +> **TODO** — The pending-data write endpoint is not yet implemented. Route, method, and per-step-type body shapes are TBD (PRD-240). + +Once written, the frontend calls `POST /runs/:runId/trigger` and the executor resumes with `userConfirmed: true`. + +### update-record — user picks a field + value to write + +> **TODO** — Pending-data write endpoint TBD (PRD-240). + +```typescript +interface UpdateRecordPendingData { + name: string; // technical field name + displayName: string; // label shown in the UI + value: string; // value chosen by the user +} +``` + +### trigger-action — user confirmation only + +No payload required from the frontend. The executor selects the action and writes `pendingData` itself (action name + displayName) to the RunStore. The frontend just confirms: + +``` +POST /runs/:runId/trigger +``` + +### load-related-record — user picks the relation and/or the record + +The frontend can override **both** the relation (field) and the selected record. + +> **Current status** — The frontend cannot yet override the AI selection. The executor HTTP server does not yet expose the pending-data write endpoint. Until it is implemented, the executor writes the AI's pick directly into `selectedRecordId`. + +```typescript +// Written by the executor; overwritable by the frontend via the pending-data endpoint (TBD) +interface LoadRelatedRecordPendingData { + name: string; // technical relation name + displayName: string; // label shown in the UI + relatedCollectionName: string; // collection of the related record + suggestedFields?: string[]; // fields suggested for display + selectedRecordId: Array; // AI's pick; overwritten by the frontend via the pending-data endpoint +} +``` + +The executor initially writes the AI's pick into `selectedRecordId`. The pending-data endpoint overwrites it (and optionally `name`, `displayName`, `relatedCollectionName`) when the user changes the selection. + +#### Future endpoint — pending-data write (not yet implemented) + +> **TODO** — Route and method TBD (PRD-240). + +Request body: + +```typescript +{ + selectedRecordId?: Array; // record chosen by the user + name?: string; // relation changed + displayName?: string; // relation changed + relatedCollectionName?: string; // required if name is provided +} +``` + +Response: `204 No Content`. + +The frontend calls this endpoint **before** `POST /runs/:runId/trigger`. On the next poll, `userConfirmed: true` and the executor reads `selectedRecordId` from the RunStore. + +### mcp-task — user confirmation only + +No payload required from the frontend. The executor selects the tool and writes `pendingData` itself (tool name + input) to the RunStore. The frontend just confirms: + +``` +POST /runs/:runId/trigger +``` + +The executor resumes with `userConfirmed: true` and executes the pre-selected tool. + +--- + +## Flow Summary + +``` +Orchestrator ──► GET pending?runId=X ──► Executor + │ + executes step + │ + ┌───────────────┴───────────────┐ + needs input done + │ │ + status: awaiting-input POST /complete + │ (StepOutcome) + Frontend writes pendingData + to executor HTTP server TODO: route TBD + │ + POST /runs/:runId/trigger + (next poll: userConfirmed = true) + │ + Executor resumes +``` diff --git a/packages/ai-proxy/src/index.ts b/packages/ai-proxy/src/index.ts index c355e0f9bb..6fa9cb86a5 100644 --- a/packages/ai-proxy/src/index.ts +++ b/packages/ai-proxy/src/index.ts @@ -8,6 +8,7 @@ export { default as ProviderDispatcher } from './provider-dispatcher'; export * from './provider-dispatcher'; export * from './ai-client'; export * from './remote-tools'; +export { default as RemoteTool } from './remote-tool'; export * from './router'; export * from './mcp-client'; export * from './oauth-token-injector'; @@ -16,3 +17,9 @@ export * from './errors'; export function validMcpConfigurationOrThrow(mcpConfig: McpConfiguration) { return McpConfigChecker.check(mcpConfig); } + +export type { BaseChatModel } from '@langchain/core/language_models/chat_models'; +export type { BaseMessage } from '@langchain/core/messages'; +export { HumanMessage, SystemMessage } from '@langchain/core/messages'; +export type { StructuredToolInterface } from '@langchain/core/tools'; +export { DynamicStructuredTool } from '@langchain/core/tools'; diff --git a/packages/ai-proxy/test/ai-client.test.ts b/packages/ai-proxy/test/ai-client.test.ts index 6c6929dd2c..f926eecbf2 100644 --- a/packages/ai-proxy/test/ai-client.test.ts +++ b/packages/ai-proxy/test/ai-client.test.ts @@ -23,9 +23,7 @@ describe('Model validation', () => { expect( () => new AiClient({ - aiConfigurations: [ - { name: 'test', provider: 'openai', apiKey: 'dev', model: 'gpt-4' }, - ], + aiConfigurations: [{ name: 'test', provider: 'openai', apiKey: 'dev', model: 'gpt-4' }], }), ).toThrow(AIModelNotSupportedError); }); @@ -34,9 +32,7 @@ describe('Model validation', () => { expect( () => new AiClient({ - aiConfigurations: [ - { name: 'test', provider: 'openai', apiKey: 'dev', model: 'gpt-4o' }, - ], + aiConfigurations: [{ name: 'test', provider: 'openai', apiKey: 'dev', model: 'gpt-4o' }], }), ).not.toThrow(); }); @@ -143,9 +139,7 @@ describe('getModel', () => { 'Warn', expect.stringContaining("AI configuration 'non-existent' not found"), ); - expect(createBaseChatModelMock).toHaveBeenCalledWith( - expect.objectContaining({ name: 'gpt4' }), - ); + expect(createBaseChatModelMock).toHaveBeenCalledWith(expect.objectContaining({ name: 'gpt4' })); expect(result).toBe(fakeModel); }); }); diff --git a/packages/ai-proxy/test/errors.test.ts b/packages/ai-proxy/test/errors.test.ts index c0817275a9..8cfab73fec 100644 --- a/packages/ai-proxy/test/errors.test.ts +++ b/packages/ai-proxy/test/errors.test.ts @@ -17,9 +17,9 @@ import { AIProviderError, AIProviderUnavailableError, AITooManyRequestsError, - AIUnauthorizedError, AIToolNotFoundError, AIToolUnprocessableError, + AIUnauthorizedError, McpConfigError, McpConflictError, McpConnectionError, diff --git a/packages/ai-proxy/test/langchain-adapter.test.ts b/packages/ai-proxy/test/langchain-adapter.test.ts index cc7ca89aea..1adfd64fe7 100644 --- a/packages/ai-proxy/test/langchain-adapter.test.ts +++ b/packages/ai-proxy/test/langchain-adapter.test.ts @@ -90,15 +90,11 @@ describe('LangChainAdapter', () => { { role: 'assistant', content: '', - tool_calls: [ - { id: 'call_1', function: { name: 'my_tool', arguments: 'not-json' } }, - ], + tool_calls: [{ id: 'call_1', function: { name: 'my_tool', arguments: 'not-json' } }], }, ]), ).toThrow( - new AIBadRequestError( - "Invalid JSON in tool_calls arguments for tool 'my_tool': not-json", - ), + new AIBadRequestError("Invalid JSON in tool_calls arguments for tool 'my_tool': not-json"), ); }); }); @@ -256,5 +252,4 @@ describe('LangChainAdapter', () => { ); }); }); - }); diff --git a/packages/ai-proxy/test/provider-dispatcher.test.ts b/packages/ai-proxy/test/provider-dispatcher.test.ts index 7cefcf3dc4..0138fa0fda 100644 --- a/packages/ai-proxy/test/provider-dispatcher.test.ts +++ b/packages/ai-proxy/test/provider-dispatcher.test.ts @@ -181,9 +181,7 @@ describe('ProviderDispatcher', () => { const thrown = await dispatcher.dispatch(buildBody()).catch(e => e); expect(thrown).toBeInstanceOf(AIProviderUnavailableError); - expect(thrown.message).toBe( - 'OpenAI server error (HTTP 500): Internal Server Error', - ); + expect(thrown.message).toBe('OpenAI server error (HTTP 500): Internal Server Error'); expect(thrown.provider).toBe('OpenAI'); expect(thrown.providerStatusCode).toBe(500); expect(thrown.baseBusinessErrorName).toBe('InternalServerError'); @@ -468,9 +466,7 @@ describe('ProviderDispatcher', () => { .catch(e => e); expect(thrown).toBeInstanceOf(AIProviderUnavailableError); - expect(thrown.message).toBe( - 'Anthropic server error (HTTP 503): Service Unavailable', - ); + expect(thrown.message).toBe('Anthropic server error (HTTP 503): Service Unavailable'); expect(thrown.provider).toBe('Anthropic'); expect(thrown.providerStatusCode).toBe(503); expect(thrown.baseBusinessErrorName).toBe('InternalServerError'); diff --git a/packages/workflow-executor/CLAUDE.md b/packages/workflow-executor/CLAUDE.md index 333bfdee1a..a54d599883 100644 --- a/packages/workflow-executor/CLAUDE.md +++ b/packages/workflow-executor/CLAUDE.md @@ -42,7 +42,7 @@ Front ◀──▶ Orchestrator ◀──pull/push──▶ Executor ── ``` src/ -├── errors.ts # WorkflowExecutorError, MissingToolCallError, MalformedToolCallError, NoRecordsError, NoReadableFieldsError +├── errors.ts # WorkflowExecutorError, MissingToolCallError, MalformedToolCallError, NoRecordsError, NoReadableFieldsError, NoWritableFieldsError, NoActionsError, StepPersistenceError, NoRelationshipFieldsError, RelatedRecordNotFoundError ├── runner.ts # Runner class — main entry point (start/stop/triggerPoll, HTTP server wiring) ├── types/ # Core type definitions (@draft) │ ├── step-definition.ts # StepType enum + step definition interfaces @@ -60,7 +60,10 @@ src/ ├── executors/ # Step executor implementations │ ├── base-step-executor.ts # Abstract base class (context injection + shared helpers) │ ├── condition-step-executor.ts # AI-powered condition step (chooses among options) -│ └── read-record-step-executor.ts # AI-powered record field reading step +│ ├── read-record-step-executor.ts # AI-powered record field reading step +│ ├── update-record-step-executor.ts # AI-powered record field update step (with confirmation flow) +│ ├── trigger-record-action-step-executor.ts # AI-powered action trigger step (with confirmation flow) +│ └── load-related-record-step-executor.ts # AI-powered relation loading step (with confirmation flow) ├── http/ # HTTP server (optional, for frontend data access) │ └── executor-http-server.ts # Koa server: GET /runs/:runId, POST /runs/:runId/trigger └── index.ts # Barrel exports @@ -73,7 +76,11 @@ src/ - **Privacy** — Zero client data leaves the client's infrastructure. `StepOutcome` is sent to the orchestrator and must NEVER contain client data. Privacy-sensitive information (e.g. AI reasoning) must stay in `StepExecutionData` (persisted in the RunStore, client-side only). - **Ports (IO injection)** — All external IO goes through injected port interfaces, keeping the core pure and testable. - **AI integration** — Uses `@langchain/core` (`BaseChatModel`, `DynamicStructuredTool`) for AI-powered steps. `ExecutionContext.model` is a `BaseChatModel`. +- **Error hierarchy** — All domain errors must extend `WorkflowExecutorError` (defined in `src/errors.ts`). This ensures executors can distinguish domain errors (caught → error outcome) from infrastructure errors (uncaught → propagate to caller). Never throw a plain `Error` for a domain error case. +- **Dual error messages** — `WorkflowExecutorError` carries two messages: `message` (technical, for dev logs) and `userMessage` (human-readable, surfaced to the Forest Admin UI via `stepOutcome.error`). The mapping happens in a single place: `base-step-executor.ts` uses `error.userMessage` when building the error outcome. When adding a new error subclass, always provide a distinct `userMessage` oriented toward end-users (no collection names, field names, or AI internals). If `userMessage` is omitted in the constructor call, it falls back to `message`. +- **displayName in AI tools** — All `DynamicStructuredTool` schemas and system message prompts must use `displayName`, never `fieldName`. `displayName` is a Forest Admin frontend feature that replaces the technical field/relation/action name with a product-oriented label configured by the Forest Admin admin. End users write their workflow prompts using these display names, not the underlying technical names. After an AI tool call returns display names, map them back to `fieldName`/`name` before using them in datasource operations (e.g. filtering record values, calling `getRecord`). - **No recovery/retry** — Once the executor returns a step result to the orchestrator, the step is considered executed. There is no mechanism to re-dispatch a step, so executors must NOT include recovery checks (e.g. checking the RunStore for cached results before executing). Each step executes exactly once. +- **Fetched steps must be executed** — Any step retrieved from the orchestrator via `getPendingStepExecutions()` must be executed. Silently discarding a fetched step (e.g. filtering it out by `runId` after fetching) violates the executor contract: the orchestrator assumes execution is guaranteed once the step is dispatched. The only valid filter before executing is deduplication via `inFlightSteps` (to avoid running the same step twice concurrently). ## Commands diff --git a/packages/workflow-executor/jest.config.ts b/packages/workflow-executor/jest.config.ts index d622773e8a..695cb0997c 100644 --- a/packages/workflow-executor/jest.config.ts +++ b/packages/workflow-executor/jest.config.ts @@ -1,8 +1,17 @@ /* eslint-disable import/no-relative-packages */ +import path from 'path'; + import jestConfig from '../../jest.config'; +// Jest < 30 doesn't resolve wildcard exports in package.json. +// @anthropic-ai/sdk uses "./lib/*" exports that need this workaround. +const anthropicSdkDir = path.dirname(require.resolve('@anthropic-ai/sdk')); + export default { ...jestConfig, collectCoverageFrom: ['/src/**/*.ts'], testMatch: ['/test/**/*.test.ts'], + moduleNameMapper: { + '^@anthropic-ai/sdk/(.*)$': `${anthropicSdkDir}/$1`, + }, }; diff --git a/packages/workflow-executor/package.json b/packages/workflow-executor/package.json index cf94a502ea..4a6a93b0a4 100644 --- a/packages/workflow-executor/package.json +++ b/packages/workflow-executor/package.json @@ -23,10 +23,10 @@ "test": "jest" }, "dependencies": { + "@forestadmin/ai-proxy": "1.6.1", "@forestadmin/agent-client": "1.4.13", "@forestadmin/forestadmin-client": "1.37.17", "@koa/router": "^13.1.0", - "@langchain/core": "1.1.33", "koa": "^3.0.1", "zod": "4.3.6" }, diff --git a/packages/workflow-executor/src/adapters/agent-client-agent-port.ts b/packages/workflow-executor/src/adapters/agent-client-agent-port.ts index cf8949a1a6..963e7de4be 100644 --- a/packages/workflow-executor/src/adapters/agent-client-agent-port.ts +++ b/packages/workflow-executor/src/adapters/agent-client-agent-port.ts @@ -1,4 +1,10 @@ -import type { AgentPort } from '../ports/agent-port'; +import type { + AgentPort, + ExecuteActionQuery, + GetRecordQuery, + GetRelatedDataQuery, + UpdateRecordQuery, +} from '../ports/agent-port'; import type { CollectionSchema } from '../types/record'; import type { RemoteAgentClient, SelectOptions } from '@forestadmin/agent-client'; @@ -6,10 +12,10 @@ import { RecordNotFoundError } from '../errors'; function buildPkFilter( primaryKeyFields: string[], - recordId: Array, + id: Array, ): SelectOptions['filters'] { if (primaryKeyFields.length === 1) { - return { field: primaryKeyFields[0], operator: 'Equal', value: recordId[0] }; + return { field: primaryKeyFields[0], operator: 'Equal', value: id[0] }; } return { @@ -17,14 +23,14 @@ function buildPkFilter( conditions: primaryKeyFields.map((field, i) => ({ field, operator: 'Equal', - value: recordId[i], + value: id[i], })), }; } // agent-client methods (update, relation, action) still expect the pipe-encoded string format -function encodePk(recordId: Array): string { - return recordId.map(v => String(v)).join('|'); +function encodePk(id: Array): string { + return id.map(v => String(v)).join('|'); } function extractRecordId( @@ -46,44 +52,39 @@ export default class AgentClientAgentPort implements AgentPort { this.collectionSchemas = params.collectionSchemas; } - async getRecord(collectionName: string, recordId: Array, fieldNames?: string[]) { - const schema = this.resolveSchema(collectionName); - const records = await this.client.collection(collectionName).list>({ - filters: buildPkFilter(schema.primaryKeyFields, recordId), + async getRecord({ collection, id, fields }: GetRecordQuery) { + const schema = this.resolveSchema(collection); + const records = await this.client.collection(collection).list>({ + filters: buildPkFilter(schema.primaryKeyFields, id), pagination: { size: 1, number: 1 }, - ...(fieldNames?.length && { fields: fieldNames }), + ...(fields?.length && { fields }), }); if (records.length === 0) { - throw new RecordNotFoundError(collectionName, encodePk(recordId)); + throw new RecordNotFoundError(collection, encodePk(id)); } - return { collectionName, recordId, values: records[0] }; + return { collectionName: collection, recordId: id, values: records[0] }; } - async updateRecord( - collectionName: string, - recordId: Array, - values: Record, - ) { + async updateRecord({ collection, id, values }: UpdateRecordQuery) { const updatedRecord = await this.client - .collection(collectionName) - .update>(encodePk(recordId), values); + .collection(collection) + .update>(encodePk(id), values); - return { collectionName, recordId, values: updatedRecord }; + return { collectionName: collection, recordId: id, values: updatedRecord }; } - async getRelatedData( - collectionName: string, - recordId: Array, - relationName: string, - ) { - const relatedSchema = this.resolveSchema(relationName); + async getRelatedData({ collection, id, relation, limit, fields }: GetRelatedDataQuery) { + const relatedSchema = this.resolveSchema(relation); const records = await this.client - .collection(collectionName) - .relation(relationName, encodePk(recordId)) - .list>(); + .collection(collection) + .relation(relation, encodePk(id)) + .list>({ + ...(limit !== null && { pagination: { size: limit, number: 1 } }), + ...(fields?.length && { fields }), + }); return records.map(record => ({ collectionName: relatedSchema.collectionName, @@ -92,17 +93,11 @@ export default class AgentClientAgentPort implements AgentPort { })); } - async executeAction( - collectionName: string, - actionName: string, - recordIds: Array[], - ): Promise { - const encodedIds = recordIds.map(id => encodePk(id)); - const action = await this.client - .collection(collectionName) - .action(actionName, { recordIds: encodedIds }); - - return action.execute(); + async executeAction({ collection, action, id }: ExecuteActionQuery): Promise { + const encodedId = id?.length ? [encodePk(id)] : []; + const act = await this.client.collection(collection).action(action, { recordIds: encodedId }); + + return act.execute(); } private resolveSchema(collectionName: string): CollectionSchema { diff --git a/packages/workflow-executor/src/adapters/console-logger.ts b/packages/workflow-executor/src/adapters/console-logger.ts new file mode 100644 index 0000000000..cbe989ab33 --- /dev/null +++ b/packages/workflow-executor/src/adapters/console-logger.ts @@ -0,0 +1,7 @@ +import type { Logger } from '../ports/logger-port'; + +export default class ConsoleLogger implements Logger { + error(message: string, context: Record): void { + console.error(JSON.stringify({ message, timestamp: new Date().toISOString(), ...context })); + } +} diff --git a/packages/workflow-executor/src/adapters/forest-server-workflow-port.ts b/packages/workflow-executor/src/adapters/forest-server-workflow-port.ts index 16037570bd..47f45a6c1f 100644 --- a/packages/workflow-executor/src/adapters/forest-server-workflow-port.ts +++ b/packages/workflow-executor/src/adapters/forest-server-workflow-port.ts @@ -9,6 +9,8 @@ import { ServerUtils } from '@forestadmin/forestadmin-client'; // TODO: finalize route paths with the team — these are placeholders const ROUTES = { pendingStepExecutions: '/liana/v1/workflow-step-executions/pending', + pendingStepExecutionForRun: (runId: string) => + `/liana/v1/workflow-step-executions/pending?runId=${encodeURIComponent(runId)}`, updateStepExecution: (runId: string) => `/liana/v1/workflow-step-executions/${runId}/complete`, collectionSchema: (collectionName: string) => `/liana/v1/collections/${collectionName}`, mcpServerConfigs: '/liana/mcp-server-configs-with-details', @@ -29,6 +31,14 @@ export default class ForestServerWorkflowPort implements WorkflowPort { ); } + async getPendingStepExecutionsForRun(runId: string): Promise { + return ServerUtils.query( + this.options, + 'get', + ROUTES.pendingStepExecutionForRun(runId), + ); + } + async updateStepExecution(runId: string, stepOutcome: StepOutcome): Promise { await ServerUtils.query( this.options, diff --git a/packages/workflow-executor/src/errors.ts b/packages/workflow-executor/src/errors.ts index b835c391fa..4560bafdeb 100644 --- a/packages/workflow-executor/src/errors.ts +++ b/packages/workflow-executor/src/errors.ts @@ -1,15 +1,28 @@ /* eslint-disable max-classes-per-file */ -export class WorkflowExecutorError extends Error { - constructor(message: string) { +export function causeMessage(error: unknown): string | undefined { + const { cause } = error as { cause?: unknown }; + + return cause instanceof Error ? cause.message : undefined; +} + +export abstract class WorkflowExecutorError extends Error { + readonly userMessage: string; + cause?: unknown; + + constructor(message: string, userMessage?: string) { super(message); this.name = this.constructor.name; + this.userMessage = userMessage ?? message; } } export class MissingToolCallError extends WorkflowExecutorError { constructor() { - super('AI did not return a tool call'); + super( + 'AI did not return a tool call', + "The AI couldn't decide what to do. Try rephrasing the step's prompt.", + ); } } @@ -17,14 +30,20 @@ export class MalformedToolCallError extends WorkflowExecutorError { readonly toolName: string; constructor(toolName: string, details: string) { - super(`AI returned a malformed tool call for "${toolName}": ${details}`); + super( + `AI returned a malformed tool call for "${toolName}": ${details}`, + "The AI returned an unexpected response. Try rephrasing the step's prompt.", + ); this.toolName = toolName; } } export class RecordNotFoundError extends WorkflowExecutorError { constructor(collectionName: string, recordId: string) { - super(`Record not found: collection "${collectionName}", id "${recordId}"`); + super( + `Record not found: collection "${collectionName}", id "${recordId}"`, + 'The record no longer exists. It may have been deleted.', + ); } } @@ -36,12 +55,156 @@ export class NoRecordsError extends WorkflowExecutorError { export class NoReadableFieldsError extends WorkflowExecutorError { constructor(collectionName: string) { - super(`No readable fields on record from collection "${collectionName}"`); + super( + `No readable fields on record from collection "${collectionName}"`, + 'This record type has no readable fields configured in Forest Admin.', + ); } } export class NoResolvedFieldsError extends WorkflowExecutorError { constructor(fieldNames: string[]) { - super(`None of the requested fields could be resolved: ${fieldNames.join(', ')}`); + super( + `None of the requested fields could be resolved: ${fieldNames.join(', ')}`, + "The AI selected fields that don't exist on this record. Try rephrasing the step's prompt.", + ); + } +} + +export class NoWritableFieldsError extends WorkflowExecutorError { + constructor(collectionName: string) { + super( + `No writable fields on record from collection "${collectionName}"`, + 'This record type has no editable fields configured in Forest Admin.', + ); + } +} + +export class NoActionsError extends WorkflowExecutorError { + constructor(collectionName: string) { + super( + `No actions available on collection "${collectionName}"`, + 'No actions are available on this record.', + ); + } +} + +/** + * Thrown when a step's side effect succeeded (action/update/decision) + * but the resulting state could not be persisted to the RunStore. + */ +export class StepPersistenceError extends WorkflowExecutorError { + constructor(message: string, cause?: unknown) { + super(message, 'The step result could not be saved. Please retry.'); + if (cause !== undefined) this.cause = cause; + } +} + +export class NoRelationshipFieldsError extends WorkflowExecutorError { + constructor(collectionName: string) { + super( + `No relationship fields on record from collection "${collectionName}"`, + 'This record type has no relations configured in Forest Admin.', + ); + } +} + +export class RelatedRecordNotFoundError extends WorkflowExecutorError { + constructor(collectionName: string, relationName: string) { + super( + `No related record found for relation "${relationName}" on collection "${collectionName}"`, + 'The related record could not be found. It may have been deleted.', + ); + } +} + +/** Thrown when the AI returns a response that violates expected constraints (bad index, empty selection, unknown identifier, etc.). */ +export class InvalidAIResponseError extends WorkflowExecutorError { + constructor(message: string) { + super(message, "The AI made an unexpected choice. Try rephrasing the step's prompt."); + } +} + +/** Thrown when a named relation is not found in the collection schema. */ +export class RelationNotFoundError extends WorkflowExecutorError { + constructor(name: string, collectionName: string) { + super( + `Relation "${name}" not found in collection "${collectionName}"`, + "The AI selected a relation that doesn't exist on this record. Try rephrasing the step's prompt.", + ); + } +} + +/** Thrown when a named field is not found in the collection schema. */ +export class FieldNotFoundError extends WorkflowExecutorError { + constructor(name: string, collectionName: string) { + super( + `Field "${name}" not found in collection "${collectionName}"`, + "The AI selected a field that doesn't exist on this record. Try rephrasing the step's prompt.", + ); + } +} + +/** Thrown when a named action is not found in the collection schema. */ +export class ActionNotFoundError extends WorkflowExecutorError { + constructor(name: string, collectionName: string) { + super( + `Action "${name}" not found in collection "${collectionName}"`, + "The AI selected an action that doesn't exist on this record. Try rephrasing the step's prompt.", + ); + } +} + +/** Thrown when step execution state is invalid (missing execution record, missing pending data, etc.). */ +export class StepStateError extends WorkflowExecutorError { + constructor(message: string) { + super(message, 'An unexpected error occurred while processing this step.'); + } +} + +export class NoMcpToolsError extends WorkflowExecutorError { + constructor() { + super('No MCP tools available', 'No tools are available to execute this step.'); + } +} + +export class McpToolNotFoundError extends WorkflowExecutorError { + constructor(name: string) { + super( + `MCP tool "${name}" not found`, + "The AI selected a tool that doesn't exist. Try rephrasing the step's prompt.", + ); + } +} + +export class AgentPortError extends WorkflowExecutorError { + constructor(operation: string, cause: unknown) { + super( + `Agent port "${operation}" failed: ${cause instanceof Error ? cause.message : String(cause)}`, + 'An error occurred while accessing your data. Please try again.', + ); + this.cause = cause; + } +} + +export class McpToolInvocationError extends WorkflowExecutorError { + constructor(toolName: string, cause: unknown) { + super( + `MCP tool "${toolName}" invocation failed: ${ + cause instanceof Error ? cause.message : String(cause) + }`, + 'The tool failed to execute. Please try again or contact your administrator.', + ); + this.cause = cause; + } +} + +export class RunNotFoundError extends Error { + cause?: unknown; + + constructor(runId: string, cause?: unknown) { + super(`Run "${runId}" not found or unavailable`); + this.name = 'RunNotFoundError'; + if (cause !== undefined) this.cause = cause; } } diff --git a/packages/workflow-executor/src/executors/base-step-executor.ts b/packages/workflow-executor/src/executors/base-step-executor.ts index 2197843be8..4472ff6ba0 100644 --- a/packages/workflow-executor/src/executors/base-step-executor.ts +++ b/packages/workflow-executor/src/executors/base-step-executor.ts @@ -1,77 +1,177 @@ -import type { ExecutionContext, StepExecutionResult } from '../types/execution'; +import type { AgentPort } from '../ports/agent-port'; +import type { ExecutionContext, IStepExecutor, StepExecutionResult } from '../types/execution'; +import type { CollectionSchema, FieldSchema, RecordRef } from '../types/record'; import type { StepDefinition } from '../types/step-definition'; import type { StepExecutionData } from '../types/step-execution-data'; -import type { StepOutcome } from '../types/step-outcome'; -import type { AIMessage, BaseMessage } from '@langchain/core/messages'; -import type { DynamicStructuredTool } from '@langchain/core/tools'; +import type { BaseStepStatus } from '../types/step-outcome'; +import type { BaseMessage, StructuredToolInterface } from '@forestadmin/ai-proxy'; -import { SystemMessage } from '@langchain/core/messages'; +import { DynamicStructuredTool, HumanMessage, SystemMessage } from '@forestadmin/ai-proxy'; +import { z } from 'zod'; -import { MalformedToolCallError, MissingToolCallError } from '../errors'; -import { isExecutedStepOnExecutor } from '../types/step-execution-data'; +import { + InvalidAIResponseError, + MalformedToolCallError, + MissingToolCallError, + NoRecordsError, + StepStateError, + WorkflowExecutorError, +} from '../errors'; +import SafeAgentPort from './safe-agent-port'; +import StepSummaryBuilder from './summary/step-summary-builder'; -export default abstract class BaseStepExecutor { +type WithPendingData = StepExecutionData & { pendingData?: object }; + +export default abstract class BaseStepExecutor + implements IStepExecutor +{ protected readonly context: ExecutionContext; + protected readonly agentPort: AgentPort; + + protected readonly schemaCache = new Map(); + constructor(context: ExecutionContext) { this.context = context; + this.agentPort = new SafeAgentPort(context.agentPort); + } + + async execute(): Promise { + try { + return await this.doExecute(); + } catch (error) { + if (error instanceof WorkflowExecutorError) { + if (error.cause !== undefined) { + this.context.logger.error(error.message, { + runId: this.context.runId, + stepId: this.context.stepId, + stepIndex: this.context.stepIndex, + cause: error.cause instanceof Error ? error.cause.message : String(error.cause), + stack: error.cause instanceof Error ? error.cause.stack : undefined, + }); + } + + return this.buildOutcomeResult({ status: 'error', error: error.userMessage }); + } + + const { cause: errorCause } = error as { cause?: unknown }; + this.context.logger.error('Unexpected error during step execution', { + runId: this.context.runId, + stepId: this.context.stepId, + stepIndex: this.context.stepIndex, + error: error instanceof Error ? error.message : String(error), + cause: errorCause instanceof Error ? errorCause.message : undefined, + stack: error instanceof Error ? error.stack : undefined, + }); + + return this.buildOutcomeResult({ + status: 'error', + error: 'Unexpected error during step execution', + }); + } } - abstract execute(): Promise; + protected abstract doExecute(): Promise; + + /** Find a field by displayName first, then fallback to fieldName. */ + protected findField(schema: CollectionSchema, name: string): FieldSchema | undefined { + return ( + schema.fields.find(f => f.displayName === name) ?? + schema.fields.find(f => f.fieldName === name) + ); + } + + /** Builds a StepExecutionResult with the step-type-specific outcome shape. */ + protected abstract buildOutcomeResult(outcome: { + status: BaseStepStatus; + error?: string; + }): StepExecutionResult; /** - * Returns a SystemMessage array summarizing previously executed steps. - * Empty array when there is no history. Ready to spread into a messages array. + * Shared confirmation flow for executors that require user approval before acting. + * Handles the find → guard → skipped → delegate pattern. */ - protected async buildPreviousStepsMessages(): Promise { - if (!this.context.history.length) return []; + protected async handleConfirmationFlow( + typeDiscriminator: string, + resolveAndExecute: (execution: TExec) => Promise, + ): Promise { + const stepExecutions = await this.context.runStore.getStepExecutions(this.context.runId); + const execution = stepExecutions.find( + (e): e is TExec => + (e as TExec).type === typeDiscriminator && e.stepIndex === this.context.stepIndex, + ); + + if (!execution) { + throw new StepStateError( + `No execution record found for step at index ${this.context.stepIndex}`, + ); + } - const summary = await this.summarizePreviousSteps(); + if (!execution.pendingData) { + throw new StepStateError(`Step at index ${this.context.stepIndex} has no pending data`); + } - return [new SystemMessage(summary)]; + if (!this.context.userConfirmed) { + await this.context.runStore.saveStepExecution(this.context.runId, { + ...execution, + executionResult: { skipped: true }, + } as StepExecutionData); + + return this.buildOutcomeResult({ status: 'success' }); + } + + return resolveAndExecute(execution); } /** - * Builds a text summary of previously executed steps for AI prompts. - * Correlates history entries (step + stepOutcome pairs) with execution data - * from the RunStore (matched by stepOutcome.stepIndex). - * When no execution data is available, falls back to StepOutcome details. + * Returns a SystemMessage array summarizing previously executed steps. + * Empty array when there is no history. Ready to spread into a messages array. */ - private async summarizePreviousSteps(): Promise { - const allStepExecutions = await this.context.runStore.getStepExecutions(this.context.runId); + protected async buildPreviousStepsMessages(): Promise { + if (!this.context.previousSteps.length) return []; - return this.context.history + const allStepExecutions = await this.context.runStore.getStepExecutions(this.context.runId); + const summary = this.context.previousSteps .map(({ stepDefinition, stepOutcome }) => { const execution = allStepExecutions.find(e => e.stepIndex === stepOutcome.stepIndex); - return this.buildStepSummary(stepDefinition, stepOutcome, execution); + return StepSummaryBuilder.build(stepDefinition, stepOutcome, execution); }) .join('\n\n'); + + return [new SystemMessage(summary)]; } - private buildStepSummary( - step: StepDefinition, - stepOutcome: StepOutcome, - execution: StepExecutionData | undefined, - ): string { - const prompt = step.prompt ?? '(no prompt)'; - const header = `Step "${stepOutcome.stepId}" (index ${stepOutcome.stepIndex}):`; - const lines = [header, ` Prompt: ${prompt}`]; - - if (isExecutedStepOnExecutor(execution)) { - if (execution.executionParams !== undefined) { - lines.push(` Input: ${JSON.stringify(execution.executionParams)}`); - } + /** + * Binds multiple tools to the model, invokes it, and returns the selected tool name + args. + * Throws MalformedToolCallError or MissingToolCallError on invalid AI responses. + */ + protected async invokeWithTools>( + messages: BaseMessage[], + tools: StructuredToolInterface[], + ): Promise<{ toolName: string; args: T }> { + const modelWithTools = this.context.model.bindTools(tools, { tool_choice: 'any' }); + const response = await modelWithTools.invoke(messages); + const toolCall = response.tool_calls?.[0]; - if (execution.executionResult) { - lines.push(` Output: ${JSON.stringify(execution.executionResult)}`); + if (toolCall !== undefined) { + if (toolCall.args !== undefined && toolCall.args !== null) { + return { toolName: toolCall.name, args: toolCall.args as T }; } - } else { - const { stepId, stepIndex, type, ...historyDetails } = stepOutcome; - lines.push(` History: ${JSON.stringify(historyDetails)}`); + + throw new MalformedToolCallError(toolCall.name ?? 'unknown', 'args field is missing or null'); + } + + const invalidCall = response.invalid_tool_calls?.[0]; + + if (invalidCall) { + throw new MalformedToolCallError( + invalidCall.name ?? 'unknown', + invalidCall.error ?? 'no details available', + ); } - return lines.join('\n'); + throw new MissingToolCallError(); } /** @@ -82,29 +182,88 @@ export default abstract class BaseStepExecutor { - const modelWithTool = this.context.model.bindTools([tool], { tool_choice: 'any' }); - const response = await modelWithTool.invoke(messages); + return (await this.invokeWithTools(messages, [tool])).args; + } - return this.extractToolCallArgs(response); + /** Returns baseRecordRef + any related records loaded by previous steps. */ + protected async getAvailableRecordRefs(): Promise { + const stepExecutions = await this.context.runStore.getStepExecutions(this.context.runId); + const relatedRecords = stepExecutions.flatMap(e => { + if ( + e.type === 'load-related-record' && + e.executionResult !== undefined && + 'record' in e.executionResult + ) { + return [e.executionResult.record]; + } + + return []; + }); + + return [this.context.baseRecordRef, ...relatedRecords]; } - /** - * Extracts the first tool call's args from an AI response. - * Throws if the AI returned a malformed tool call (invalid_tool_calls) or no tool call at all. - */ - private extractToolCallArgs>(response: AIMessage): T { - const toolCall = response.tool_calls?.[0]; - if (toolCall?.args) return toolCall.args as T; + /** Selects a record ref via AI when multiple are available, returns directly when only one. */ + protected async selectRecordRef( + records: RecordRef[], + prompt: string | undefined, + ): Promise { + if (records.length === 0) throw new NoRecordsError(); + if (records.length === 1) return records[0]; - const invalidCall = response.invalid_tool_calls?.[0]; + const identifiers = await Promise.all(records.map(r => this.toRecordIdentifier(r))); + const identifierTuple = identifiers as [string, ...string[]]; - if (invalidCall) { - throw new MalformedToolCallError( - invalidCall.name ?? 'unknown', - invalidCall.error ?? 'no details available', + const tool = new DynamicStructuredTool({ + name: 'select-record', + description: 'Select the most relevant record for this workflow step.', + schema: z.object({ + recordIdentifier: z.enum(identifierTuple), + }), + func: undefined, + }); + + const messages = [ + ...(await this.buildPreviousStepsMessages()), + new SystemMessage( + 'You are an AI agent selecting the most relevant record for a workflow step.\n' + + 'Choose the record whose collection best matches the user request.\n' + + 'Pay attention to the collection name of each record.', + ), + new HumanMessage(prompt ?? 'Select the most relevant record.'), + ]; + + const { recordIdentifier } = await this.invokeWithTool<{ recordIdentifier: string }>( + messages, + tool, + ); + + const selectedIndex = identifiers.indexOf(recordIdentifier); + + if (selectedIndex === -1) { + throw new InvalidAIResponseError( + `AI selected record "${recordIdentifier}" which does not match any available record`, ); } - throw new MissingToolCallError(); + return records[selectedIndex]; + } + + /** Fetches a collection schema from WorkflowPort, with caching. */ + protected async getCollectionSchema(collectionName: string): Promise { + const cached = this.schemaCache.get(collectionName); + if (cached) return cached; + + const schema = await this.context.workflowPort.getCollectionSchema(collectionName); + this.schemaCache.set(collectionName, schema); + + return schema; + } + + /** Formats a record ref as "Step X - CollectionDisplayName #id". */ + protected async toRecordIdentifier(record: RecordRef): Promise { + const schema = await this.getCollectionSchema(record.collectionName); + + return `Step ${record.stepIndex} - ${schema.collectionDisplayName} #${record.recordId}`; } } diff --git a/packages/workflow-executor/src/executors/condition-step-executor.ts b/packages/workflow-executor/src/executors/condition-step-executor.ts index 217abdcff4..43fd995e3c 100644 --- a/packages/workflow-executor/src/executors/condition-step-executor.ts +++ b/packages/workflow-executor/src/executors/condition-step-executor.ts @@ -1,10 +1,11 @@ import type { StepExecutionResult } from '../types/execution'; import type { ConditionStepDefinition } from '../types/step-definition'; +import type { ConditionStepStatus } from '../types/step-outcome'; -import { HumanMessage, SystemMessage } from '@langchain/core/messages'; -import { DynamicStructuredTool } from '@langchain/core/tools'; +import { DynamicStructuredTool, HumanMessage, SystemMessage } from '@forestadmin/ai-proxy'; import { z } from 'zod'; +import { StepPersistenceError } from '../errors'; import BaseStepExecutor from './base-step-executor'; interface GatewayToolArgs { @@ -36,7 +37,22 @@ const GATEWAY_SYSTEM_PROMPT = `You are an AI agent selecting the correct option - Do not refer to yourself as "I" in the response, use a passive formulation instead.`; export default class ConditionStepExecutor extends BaseStepExecutor { - async execute(): Promise { + protected buildOutcomeResult(outcome: { + status: ConditionStepStatus; + error?: string; + selectedOption?: string; + }): StepExecutionResult { + return { + stepOutcome: { + type: 'condition', + stepId: this.context.stepId, + stepIndex: this.context.stepIndex, + ...outcome, + }, + }; + } + + protected async doExecute(): Promise { const { stepDefinition: step } = this.context; const tool = new DynamicStructuredTool({ @@ -62,50 +78,28 @@ export default class ConditionStepExecutor extends BaseStepExecutor(messages, tool); + const { option: selectedOption, reasoning } = args; try { - args = await this.invokeWithTool(messages, tool); - } catch (error: unknown) { - return { - stepOutcome: { - type: 'condition', - stepId: this.context.stepId, - stepIndex: this.context.stepIndex, - status: 'error', - error: (error as Error).message, - }, - }; + await this.context.runStore.saveStepExecution(this.context.runId, { + type: 'condition', + stepIndex: this.context.stepIndex, + executionParams: { answer: selectedOption, reasoning }, + executionResult: selectedOption ? { answer: selectedOption } : undefined, + }); + } catch (cause) { + throw new StepPersistenceError( + `Condition step state could not be persisted ` + + `(run "${this.context.runId}", step ${this.context.stepIndex})`, + cause, + ); } - const { option: selectedOption, reasoning } = args; - - await this.context.runStore.saveStepExecution(this.context.runId, { - type: 'condition', - stepIndex: this.context.stepIndex, - executionParams: { answer: selectedOption, reasoning }, - executionResult: selectedOption ? { answer: selectedOption } : undefined, - }); - if (!selectedOption) { - return { - stepOutcome: { - type: 'condition', - stepId: this.context.stepId, - stepIndex: this.context.stepIndex, - status: 'manual-decision', - }, - }; + return this.buildOutcomeResult({ status: 'manual-decision' }); } - return { - stepOutcome: { - type: 'condition', - stepId: this.context.stepId, - stepIndex: this.context.stepIndex, - status: 'success', - selectedOption, - }, - }; + return this.buildOutcomeResult({ status: 'success', selectedOption }); } } diff --git a/packages/workflow-executor/src/executors/load-related-record-step-executor.ts b/packages/workflow-executor/src/executors/load-related-record-step-executor.ts new file mode 100644 index 0000000000..998157bb6a --- /dev/null +++ b/packages/workflow-executor/src/executors/load-related-record-step-executor.ts @@ -0,0 +1,413 @@ +import type { StepExecutionResult } from '../types/execution'; +import type { CollectionSchema, RecordData, RecordRef } from '../types/record'; +import type { RecordTaskStepDefinition } from '../types/step-definition'; +import type { LoadRelatedRecordStepExecutionData, RelationRef } from '../types/step-execution-data'; + +import { DynamicStructuredTool, HumanMessage, SystemMessage } from '@forestadmin/ai-proxy'; +import { z } from 'zod'; + +import { + InvalidAIResponseError, + NoRelationshipFieldsError, + RelatedRecordNotFoundError, + RelationNotFoundError, + StepPersistenceError, + StepStateError, +} from '../errors'; +import RecordTaskStepExecutor from './record-task-step-executor'; + +const SELECT_RELATION_SYSTEM_PROMPT = `You are an AI agent loading a related record based on a user request. +Select the relation to follow. + +Important rules: +- Be precise: only select the relation directly relevant to the request. +- Final answer is definitive, you won't receive any other input from the user. +- Do not refer to yourself as "I" in the response, use a passive formulation instead.`; + +const SELECT_FIELDS_SYSTEM_PROMPT = `You are an AI agent selecting the most relevant fields to identify a related record. +Choose the fields that are most useful for determining which record best matches the user request.`; + +const SELECT_RECORD_SYSTEM_PROMPT = `You are an AI agent selecting the most relevant related record from a list of candidates. +Choose the record that best matches the user request based on the provided field values.`; + +interface RelationTarget extends RelationRef { + selectedRecordRef: RecordRef; + relationType?: 'BelongsTo' | 'HasMany' | 'HasOne'; +} + +export default class LoadRelatedRecordStepExecutor extends RecordTaskStepExecutor { + protected async doExecute(): Promise { + // Branch A -- Re-entry with user confirmation + if (this.context.userConfirmed !== undefined) { + return this.handleConfirmation(); + } + + // Branches B & C -- First call + return this.handleFirstCall(); + } + + private async handleConfirmation(): Promise { + return this.handleConfirmationFlow( + 'load-related-record', + async execution => this.resolveFromSelection(execution), + ); + } + + private async handleFirstCall(): Promise { + const { stepDefinition: step } = this.context; + const records = await this.getAvailableRecordRefs(); + const selectedRecordRef = await this.selectRecordRef(records, step.prompt); + const schema = await this.getCollectionSchema(selectedRecordRef.collectionName); + const args = await this.selectRelation(schema, step.prompt); + const target = this.buildTarget(schema, args.relationName, selectedRecordRef); + + // Branch B -- automaticExecution + if (step.automaticExecution) { + return this.resolveAndLoadAutomatic(target); + } + + // Branch C -- pre-fetch candidates, await user confirmation + return this.saveAndAwaitInput(target); + } + + private buildTarget( + schema: CollectionSchema, + relationName: string, + selectedRecordRef: RecordRef, + ): RelationTarget { + const field = this.findField(schema, relationName); + + if (!field) { + throw new RelationNotFoundError(relationName, schema.collectionName); + } + + return { + selectedRecordRef, + displayName: field.displayName, + name: field.fieldName, + relationType: field.relationType, + }; + } + + /** + * Branch C: uses AI to select the best candidate, persists pendingData with suggestion, returns awaiting-input. + * Unlike persistAndReturn (Branches A/B), storage errors propagate directly here: + * the relation-load has not yet happened so the step can safely be retried. + */ + private async saveAndAwaitInput(target: RelationTarget): Promise { + const { selectedRecordRef, name, displayName } = target; + + const { relatedData, bestIndex, suggestedFields } = await this.selectBestFromRelatedData( + target, + 50, + ); + + const relatedCollectionName = relatedData[0].collectionName; + const selectedRecordId = relatedData[bestIndex].recordId; + + await this.context.runStore.saveStepExecution(this.context.runId, { + type: 'load-related-record', + stepIndex: this.context.stepIndex, + pendingData: { displayName, name, relatedCollectionName, suggestedFields, selectedRecordId }, + selectedRecordRef, + }); + + return this.buildOutcomeResult({ status: 'awaiting-input' }); + } + + /** Branch B: automatic execution. HasMany uses 2 AI calls; others take the first result. */ + private async resolveAndLoadAutomatic(target: RelationTarget): Promise { + const record = + target.relationType === 'HasMany' + ? await this.selectBestRelatedRecord(target) + : await this.fetchFirstCandidate(target); + + return this.persistAndReturn(record, target, undefined); + } + + /** + * Branch A: builds RecordRef from pendingData.selectedRecordId. + * No additional getRelatedData call. + */ + private async resolveFromSelection( + execution: LoadRelatedRecordStepExecutionData, + ): Promise { + const { selectedRecordRef, pendingData } = execution; + + if (!pendingData) { + throw new StepStateError(`Step at index ${this.context.stepIndex} has no pending data`); + } + + const { name, displayName, relatedCollectionName, selectedRecordId } = pendingData; + + const record: RecordRef = { + collectionName: relatedCollectionName, + recordId: selectedRecordId, + stepIndex: this.context.stepIndex, + }; + + return this.persistAndReturn(record, { selectedRecordRef, name, displayName }, execution); + } + + /** + * Fetches up to `limit` related records and uses AI to select the best one when multiple exist. + * Returns the full RecordData array, the best index, and the AI-selected fields. + */ + private async selectBestFromRelatedData( + target: Pick, + limit: number, + ): Promise<{ relatedData: RecordData[]; bestIndex: number; suggestedFields: string[] }> { + const { selectedRecordRef, name } = target; + + const relatedData = await this.agentPort.getRelatedData({ + collection: selectedRecordRef.collectionName, + id: selectedRecordRef.recordId, + relation: name, + limit, + }); + + if (relatedData.length === 0) { + throw new RelatedRecordNotFoundError(selectedRecordRef.collectionName, name); + } + + if (relatedData.length === 1) { + return { relatedData, bestIndex: 0, suggestedFields: [] }; + } + + const relatedSchema = await this.getCollectionSchema(relatedData[0].collectionName); + const suggestedFields = await this.selectRelevantFields( + relatedSchema, + this.context.stepDefinition.prompt, + ); + const bestIndex = await this.selectBestRecordIndex( + relatedData, + suggestedFields, + this.context.stepDefinition.prompt, + ); + + return { relatedData, bestIndex, suggestedFields }; + } + + /** HasMany + automaticExecution: fetch top 50, then AI calls to select the best record. */ + private async selectBestRelatedRecord(target: RelationTarget): Promise { + const { relatedData, bestIndex } = await this.selectBestFromRelatedData(target, 50); + + return this.toRecordRef(relatedData[bestIndex]); + } + + /** BelongsTo / HasOne: fetch 1 record and take it directly. */ + private async fetchFirstCandidate(target: RelationTarget): Promise { + const candidates = await this.fetchCandidates(target, 1); + + return candidates[0]; + } + + /** + * Fetches related records and converts them to RecordRefs. + * Throws RelatedRecordNotFoundError when the result is empty. + */ + private async fetchCandidates( + target: Pick, + limit: number, + ): Promise { + const { selectedRecordRef, name } = target; + const relatedData = await this.agentPort.getRelatedData({ + collection: selectedRecordRef.collectionName, + id: selectedRecordRef.recordId, + relation: name, + limit, + }); + + if (relatedData.length === 0) { + throw new RelatedRecordNotFoundError(selectedRecordRef.collectionName, name); + } + + return relatedData.map(r => this.toRecordRef(r)); + } + + /** Persists the loaded record ref and returns a success outcome. */ + private async persistAndReturn( + record: RecordRef, + target: Pick, + existingExecution: LoadRelatedRecordStepExecutionData | undefined, + ): Promise { + const { selectedRecordRef, name, displayName } = target; + + try { + await this.context.runStore.saveStepExecution(this.context.runId, { + ...existingExecution, + type: 'load-related-record', + stepIndex: this.context.stepIndex, + executionParams: { displayName, name }, + executionResult: { relation: { name, displayName }, record }, + selectedRecordRef, + }); + } catch (cause) { + throw new StepPersistenceError( + `Related record loaded but step state could not be persisted ` + + `(run "${this.context.runId}", step ${this.context.stepIndex})`, + cause, + ); + } + + return this.buildOutcomeResult({ status: 'success' }); + } + + private async selectRelation( + schema: CollectionSchema, + prompt: string | undefined, + ): Promise<{ relationName: string; reasoning: string }> { + const tool = this.buildSelectRelationTool(schema); + const messages = [ + ...(await this.buildPreviousStepsMessages()), + new SystemMessage(SELECT_RELATION_SYSTEM_PROMPT), + new SystemMessage( + `The selected record belongs to the "${schema.collectionDisplayName}" collection.`, + ), + new HumanMessage(`**Request**: ${prompt ?? 'Load the relevant related record.'}`), + ]; + + return this.invokeWithTool<{ relationName: string; reasoning: string }>(messages, tool); + } + + private buildSelectRelationTool(schema: CollectionSchema): DynamicStructuredTool { + const relationFields = schema.fields.filter(f => f.isRelationship); + + if (relationFields.length === 0) { + throw new NoRelationshipFieldsError(schema.collectionName); + } + + const displayNames = relationFields.map(f => f.displayName) as [string, ...string[]]; + const technicalNames = relationFields + .map(f => `${f.displayName} (technical name: ${f.fieldName})`) + .join(', '); + + return new DynamicStructuredTool({ + name: 'select-relation', + description: 'Select the relation to follow from the record.', + schema: z.object({ + relationName: z + .enum(displayNames) + .describe(`The name of the relation to follow. Available: ${technicalNames}`), + reasoning: z.string().describe('Why this relation was chosen'), + }), + func: undefined, + }); + } + + /** AI call 1 for HasMany: selects the most relevant fields to compare candidates. */ + private async selectRelevantFields( + schema: CollectionSchema, + prompt: string | undefined, + ): Promise { + const nonRelationFields = schema.fields.filter(f => !f.isRelationship); + + if (nonRelationFields.length === 0) return []; + + // Use displayName in both the enum and the prompt for consistency — the AI sees human-readable + // names throughout. Results are mapped back to technical fieldNames before returning. + const displayNames = nonRelationFields.map(f => f.displayName) as [string, ...string[]]; + + const tool = new DynamicStructuredTool({ + name: 'select-fields', + description: 'Select the most relevant fields to identify the right record.', + schema: z.object({ + fieldNames: z + .array(z.enum(displayNames)) + .min(1) + .describe('Field names most useful for identifying the relevant record'), + }), + func: undefined, + }); + + const messages = [ + new SystemMessage(SELECT_FIELDS_SYSTEM_PROMPT), + new SystemMessage( + `The related records are from the "${schema.collectionDisplayName}" collection. ` + + `Available fields: ${nonRelationFields.map(f => f.displayName).join(', ')}.`, + ), + new HumanMessage(`**Request**: ${prompt ?? 'Select the most relevant record.'}`), + ]; + + const { fieldNames: selectedDisplayNames } = await this.invokeWithTool<{ + fieldNames: string[]; + }>(messages, tool); + + // Zod's .min(1) shapes the prompt but is NOT validated against the AI response. + // Guard explicitly to avoid silently passing all fields to selectBestRecordIndex. + if (selectedDisplayNames.length === 0) { + throw new InvalidAIResponseError( + `AI returned no field names for field selection in collection "${schema.collectionName}"`, + ); + } + + // Map display names back to technical field names — values in RecordData are keyed by fieldName. + return selectedDisplayNames.map( + dn => nonRelationFields.find(f => f.displayName === dn)?.fieldName ?? dn, + ); + } + + /** AI call 2 for HasMany: selects the best record by index from the candidate list. */ + private async selectBestRecordIndex( + candidates: RecordData[], + fieldNames: string[], + prompt: string | undefined, + ): Promise { + const maxIndex = candidates.length - 1; + const filteredCandidates = candidates.map((c, i) => ({ + index: i, + values: + fieldNames.length > 0 + ? Object.fromEntries(Object.entries(c.values).filter(([k]) => fieldNames.includes(k))) + : c.values, + })); + + const tool = new DynamicStructuredTool({ + name: 'select-record-by-content', + description: 'Select the most relevant related record by its index.', + schema: z.object({ + recordIndex: z + .number() + .int() + .min(0) + .max(maxIndex) + .describe(`0-based index of the most relevant record (0 to ${maxIndex})`), + reasoning: z.string().describe('Why this record was chosen'), + }), + func: undefined, + }); + + const recordList = filteredCandidates + .map(c => `[${c.index}] ${JSON.stringify(c.values)}`) + .join('\n'); + + const messages = [ + new SystemMessage(SELECT_RECORD_SYSTEM_PROMPT), + new SystemMessage(`Candidates:\n${recordList}`), + new HumanMessage(`**Request**: ${prompt ?? 'Select the most relevant record.'}`), + ]; + + const { recordIndex } = await this.invokeWithTool<{ recordIndex: number; reasoning: string }>( + messages, + tool, + ); + + // NOTE: The Zod schema's .min(0).max(maxIndex) shapes the tool prompt only — it is NOT + // validated against the AI response. This guard is the sole runtime enforcement. + if (recordIndex < 0 || recordIndex > maxIndex) { + throw new InvalidAIResponseError( + `AI selected record index ${recordIndex} which is out of range (0-${maxIndex})`, + ); + } + + return recordIndex; + } + + private toRecordRef(data: RecordData): RecordRef { + return { + collectionName: data.collectionName, + recordId: data.recordId, + stepIndex: this.context.stepIndex, + }; + } +} diff --git a/packages/workflow-executor/src/executors/mcp-task-step-executor.ts b/packages/workflow-executor/src/executors/mcp-task-step-executor.ts new file mode 100644 index 0000000000..fe9c84e9c2 --- /dev/null +++ b/packages/workflow-executor/src/executors/mcp-task-step-executor.ts @@ -0,0 +1,202 @@ +import type { ExecutionContext, StepExecutionResult } from '../types/execution'; +import type { McpTaskStepDefinition } from '../types/step-definition'; +import type { McpTaskStepExecutionData, McpToolCall } from '../types/step-execution-data'; +import type { RecordTaskStepStatus } from '../types/step-outcome'; +import type { RemoteTool } from '@forestadmin/ai-proxy'; + +import { DynamicStructuredTool, HumanMessage, SystemMessage } from '@forestadmin/ai-proxy'; +import { z } from 'zod'; + +import { + McpToolInvocationError, + McpToolNotFoundError, + NoMcpToolsError, + StepPersistenceError, +} from '../errors'; +import BaseStepExecutor from './base-step-executor'; + +const MCP_TASK_SYSTEM_PROMPT = `You are an AI agent selecting and executing a tool to fulfill a user request. +Select the most appropriate tool and fill in its parameters precisely. + +Important rules: +- Select only the tool directly relevant to the request. +- Final answer is definitive, you won't receive any other input from the user.`; + +export default class McpTaskStepExecutor extends BaseStepExecutor { + private readonly remoteTools: readonly RemoteTool[]; + + constructor( + context: ExecutionContext, + remoteTools: readonly RemoteTool[], + ) { + super(context); + this.remoteTools = remoteTools; + } + + protected buildOutcomeResult(outcome: { + status: RecordTaskStepStatus; + error?: string; + }): StepExecutionResult { + return { + stepOutcome: { + type: 'mcp-task', + stepId: this.context.stepId, + stepIndex: this.context.stepIndex, + ...outcome, + }, + }; + } + + protected async doExecute(): Promise { + if (this.context.userConfirmed !== undefined) { + // Branch A -- Re-entry with user confirmation + return this.handleConfirmationFlow('mcp-task', execution => + this.executeToolAndPersist(execution.pendingData as McpToolCall, execution), + ); + } + + // Branches B & C -- First call + const tools = this.getFilteredTools(); + const { toolName, args } = await this.selectTool(tools); + const target: McpToolCall = { name: toolName, input: args }; + + if (this.context.stepDefinition.automaticExecution) { + // Branch B -- direct execution + return this.executeToolAndPersist(target); + } + + // Branch C -- Awaiting confirmation + try { + await this.context.runStore.saveStepExecution(this.context.runId, { + type: 'mcp-task', + stepIndex: this.context.stepIndex, + pendingData: target, + }); + } catch (cause) { + throw new StepPersistenceError( + `MCP task step state could not be persisted ` + + `(run "${this.context.runId}", step ${this.context.stepIndex})`, + cause, + ); + } + + return this.buildOutcomeResult({ status: 'awaiting-input' }); + } + + private async executeToolAndPersist( + target: McpToolCall, + existingExecution?: McpTaskStepExecutionData, + ): Promise { + const tools = this.getFilteredTools(); + const tool = tools.find(t => t.base.name === target.name); + if (!tool) throw new McpToolNotFoundError(target.name); + + let toolResult: unknown; + + try { + toolResult = await tool.base.invoke(target.input); + } catch (cause) { + throw new McpToolInvocationError(target.name, cause); + } + + // 1. Persist raw result immediately — safe state before any further network calls + const baseExecutionResult = { success: true as const, toolResult }; + const baseData: McpTaskStepExecutionData = { + ...existingExecution, + type: 'mcp-task', + stepIndex: this.context.stepIndex, + executionParams: { name: target.name, input: target.input }, + executionResult: baseExecutionResult, + }; + + try { + await this.context.runStore.saveStepExecution(this.context.runId, baseData); + } catch (cause) { + throw new StepPersistenceError( + `MCP tool "${target.name}" executed but step state could not be persisted ` + + `(run "${this.context.runId}", step ${this.context.stepIndex})`, + cause, + ); + } + + // 2. AI formatting — non-blocking; errors are logged but do not fail the step + try { + const formattedResponse = await this.formatToolResult(target, toolResult); + + if (formattedResponse) { + await this.context.runStore.saveStepExecution(this.context.runId, { + ...baseData, + executionResult: { ...baseExecutionResult, formattedResponse }, + }); + } + } catch (cause) { + this.context.logger.error('Failed to format MCP tool result, using generic fallback', { + runId: this.context.runId, + stepIndex: this.context.stepIndex, + toolName: target.name, + cause: cause instanceof Error ? cause.message : String(cause), + }); + } + + return this.buildOutcomeResult({ status: 'success' }); + } + + private async formatToolResult(tool: McpToolCall, toolResult: unknown): Promise { + if (toolResult === null || toolResult === undefined) return null; + + const resultStr = typeof toolResult === 'string' ? toolResult : JSON.stringify(toolResult); + const truncatedResult = + resultStr.length > 20_000 ? `${resultStr.slice(0, 20_000)}\n... [truncated]` : resultStr; + + const summaryTool = new DynamicStructuredTool({ + name: 'summarize-result', + description: 'Provides a human-readable summary of the tool execution result.', + schema: z.object({ + summary: z.string().min(1).describe('Concise human-readable summary of the tool result.'), + }), + func: undefined, + }); + + const messages = [ + new SystemMessage( + 'You are summarizing the result of a workflow tool execution for the end user. ' + + 'Be concise and factual. Do not include raw JSON or technical identifiers.', + ), + new HumanMessage( + `Tool "${tool.name}" was executed with input: ${JSON.stringify(tool.input)}.\n` + + `Result: ${truncatedResult}\n\n` + + `Provide a concise human-readable summary.`, + ), + ]; + + const { summary } = await this.invokeWithTool<{ summary: string }>(messages, summaryTool); + + return summary || null; + } + + private async selectTool(tools: RemoteTool[]) { + const messages = [ + ...(await this.buildPreviousStepsMessages()), + new SystemMessage(MCP_TASK_SYSTEM_PROMPT), + new HumanMessage( + `**Request**: ${this.context.stepDefinition.prompt ?? 'Execute the relevant tool.'}`, + ), + ]; + + return this.invokeWithTools( + messages, + tools.map(t => t.base), + ); + } + + /** Returns tools filtered by mcpServerId (if specified). Throws NoMcpToolsError if empty. */ + private getFilteredTools(): RemoteTool[] { + const { mcpServerId } = this.context.stepDefinition; + const tools = mcpServerId + ? this.remoteTools.filter(t => t.sourceId === mcpServerId) + : [...this.remoteTools]; + if (tools.length === 0) throw new NoMcpToolsError(); + + return tools; + } +} diff --git a/packages/workflow-executor/src/executors/read-record-step-executor.ts b/packages/workflow-executor/src/executors/read-record-step-executor.ts index 6f7248c3c4..4b8622014a 100644 --- a/packages/workflow-executor/src/executors/read-record-step-executor.ts +++ b/packages/workflow-executor/src/executors/read-record-step-executor.ts @@ -1,22 +1,13 @@ import type { StepExecutionResult } from '../types/execution'; -import type { CollectionSchema, RecordRef } from '../types/record'; -import type { AiTaskStepDefinition } from '../types/step-definition'; -import type { - FieldReadResult, - LoadRelatedRecordStepExecutionData, -} from '../types/step-execution-data'; - -import { HumanMessage, SystemMessage } from '@langchain/core/messages'; -import { DynamicStructuredTool } from '@langchain/core/tools'; +import type { CollectionSchema } from '../types/record'; +import type { RecordTaskStepDefinition } from '../types/step-definition'; +import type { FieldReadResult } from '../types/step-execution-data'; + +import { DynamicStructuredTool, HumanMessage, SystemMessage } from '@forestadmin/ai-proxy'; import { z } from 'zod'; -import { - NoReadableFieldsError, - NoRecordsError, - NoResolvedFieldsError, - WorkflowExecutorError, -} from '../errors'; -import BaseStepExecutor from './base-step-executor'; +import { NoReadableFieldsError, NoResolvedFieldsError } from '../errors'; +import RecordTaskStepExecutor from './record-task-step-executor'; const READ_RECORD_SYSTEM_PROMPT = `You are an AI agent reading fields from a record to answer a user request. Select the field(s) that best answer the request. You can read one field or multiple fields at once. @@ -26,70 +17,40 @@ Important rules: - Final answer is definitive, you won't receive any other input from the user. - Do not refer to yourself as "I" in the response, use a passive formulation instead.`; -export default class ReadRecordStepExecutor extends BaseStepExecutor { - private readonly schemaCache = new Map(); - - async execute(): Promise { +export default class ReadRecordStepExecutor extends RecordTaskStepExecutor { + protected async doExecute(): Promise { const { stepDefinition: step } = this.context; const records = await this.getAvailableRecordRefs(); - let selectedRecordRef: RecordRef; - let schema: CollectionSchema; - let fieldResults: FieldReadResult[]; - - try { - selectedRecordRef = await this.selectRecordRef(records, step.prompt); - schema = await this.getCollectionSchema(selectedRecordRef.collectionName); - const selectedDisplayNames = await this.selectFields(schema, step.prompt); - const resolvedFieldNames = selectedDisplayNames - .map( - name => - schema.fields.find(f => f.fieldName === name || f.displayName === name)?.fieldName, - ) - .filter((name): name is string => name !== undefined); - - if (resolvedFieldNames.length === 0) { - throw new NoResolvedFieldsError(selectedDisplayNames); - } - - const recordData = await this.context.agentPort.getRecord( - selectedRecordRef.collectionName, - selectedRecordRef.recordId, - resolvedFieldNames, - ); - fieldResults = this.formatFieldResults(recordData.values, schema, selectedDisplayNames); - } catch (error) { - if (error instanceof WorkflowExecutorError) { - return { - stepOutcome: { - type: 'ai-task', - stepId: this.context.stepId, - stepIndex: this.context.stepIndex, - status: 'error', - error: error.message, - }, - }; - } - - throw error; + const selectedRecordRef = await this.selectRecordRef(records, step.prompt); + const schema = await this.getCollectionSchema(selectedRecordRef.collectionName); + const selectedDisplayNames = await this.selectFields(schema, step.prompt); + const resolvedFieldNames = selectedDisplayNames + .map(name => this.findField(schema, name)?.fieldName) + .filter((name): name is string => name !== undefined); + + if (resolvedFieldNames.length === 0) { + throw new NoResolvedFieldsError(selectedDisplayNames); } + const recordData = await this.agentPort.getRecord({ + collection: selectedRecordRef.collectionName, + id: selectedRecordRef.recordId, + fields: resolvedFieldNames, + }); + const fieldResults = this.formatFieldResults(recordData.values, schema, selectedDisplayNames); + await this.context.runStore.saveStepExecution(this.context.runId, { type: 'read-record', stepIndex: this.context.stepIndex, - executionParams: { fieldNames: fieldResults.map(f => f.fieldName) }, + executionParams: { + fields: fieldResults.map(({ name, displayName }) => ({ name, displayName })), + }, executionResult: { fields: fieldResults }, selectedRecordRef, }); - return { - stepOutcome: { - type: 'ai-task', - stepId: this.context.stepId, - stepIndex: this.context.stepIndex, - status: 'success', - }, - }; + return this.buildOutcomeResult({ status: 'success' }); } private async selectFields( @@ -111,51 +72,6 @@ export default class ReadRecordStepExecutor extends BaseStepExecutor { - if (records.length === 0) throw new NoRecordsError(); - if (records.length === 1) return records[0]; - - const identifiers = await Promise.all(records.map(r => this.toRecordIdentifier(r))); - const identifierTuple = identifiers as [string, ...string[]]; - - const tool = new DynamicStructuredTool({ - name: 'select-record', - description: 'Select the most relevant record for this workflow step.', - schema: z.object({ - recordIdentifier: z.enum(identifierTuple), - }), - func: undefined, - }); - - const messages = [ - ...(await this.buildPreviousStepsMessages()), - new SystemMessage( - 'You are an AI agent selecting the most relevant record for a workflow step.\n' + - 'Choose the record whose collection best matches the user request.\n' + - 'Pay attention to the collection name of each record.', - ), - new HumanMessage(prompt ?? 'Select the most relevant record.'), - ]; - - const { recordIdentifier } = await this.invokeWithTool<{ recordIdentifier: string }>( - messages, - tool, - ); - - const selectedIndex = identifiers.indexOf(recordIdentifier); - - if (selectedIndex === -1) { - throw new WorkflowExecutorError( - `AI selected record "${recordIdentifier}" which does not match any available record`, - ); - } - - return records[selectedIndex]; - } - private buildReadFieldTool(schema: CollectionSchema): DynamicStructuredTool { const nonRelationFields = schema.fields.filter(f => !f.isRelationship); @@ -187,43 +103,18 @@ export default class ReadRecordStepExecutor extends BaseStepExecutor, schema: CollectionSchema, - fieldNames: string[], + fieldDisplayNames: string[], ): FieldReadResult[] { - return fieldNames.map(name => { - const field = schema.fields.find(f => f.fieldName === name || f.displayName === name); + return fieldDisplayNames.map(name => { + const field = this.findField(schema, name); - if (!field) return { error: `Field not found: ${name}`, fieldName: name, displayName: name }; + if (!field) return { error: `Field not found: ${name}`, name, displayName: name }; return { value: values[field.fieldName], - fieldName: field.fieldName, + name: field.fieldName, displayName: field.displayName, }; }); } - - private async getAvailableRecordRefs(): Promise { - const stepExecutions = await this.context.runStore.getStepExecutions(this.context.runId); - const relatedRecords = stepExecutions - .filter((e): e is LoadRelatedRecordStepExecutionData => e.type === 'load-related-record') - .map(e => e.record); - - return [this.context.baseRecordRef, ...relatedRecords]; - } - - private async getCollectionSchema(collectionName: string): Promise { - const cached = this.schemaCache.get(collectionName); - if (cached) return cached; - - const schema = await this.context.workflowPort.getCollectionSchema(collectionName); - this.schemaCache.set(collectionName, schema); - - return schema; - } - - private async toRecordIdentifier(record: RecordRef): Promise { - const schema = await this.getCollectionSchema(record.collectionName); - - return `Step ${record.stepIndex} - ${schema.collectionDisplayName} #${record.recordId}`; - } } diff --git a/packages/workflow-executor/src/executors/record-task-step-executor.ts b/packages/workflow-executor/src/executors/record-task-step-executor.ts new file mode 100644 index 0000000000..86b8841092 --- /dev/null +++ b/packages/workflow-executor/src/executors/record-task-step-executor.ts @@ -0,0 +1,23 @@ +import type { StepExecutionResult } from '../types/execution'; +import type { StepDefinition } from '../types/step-definition'; +import type { RecordTaskStepStatus } from '../types/step-outcome'; + +import BaseStepExecutor from './base-step-executor'; + +export default abstract class RecordTaskStepExecutor< + TStep extends StepDefinition = StepDefinition, +> extends BaseStepExecutor { + protected buildOutcomeResult(outcome: { + status: RecordTaskStepStatus; + error?: string; + }): StepExecutionResult { + return { + stepOutcome: { + type: 'record-task', + stepId: this.context.stepId, + stepIndex: this.context.stepIndex, + ...outcome, + }, + }; + } +} diff --git a/packages/workflow-executor/src/executors/safe-agent-port.ts b/packages/workflow-executor/src/executors/safe-agent-port.ts new file mode 100644 index 0000000000..4ed4d3ef5c --- /dev/null +++ b/packages/workflow-executor/src/executors/safe-agent-port.ts @@ -0,0 +1,39 @@ +import type { + AgentPort, + ExecuteActionQuery, + GetRecordQuery, + GetRelatedDataQuery, + UpdateRecordQuery, +} from '../ports/agent-port'; +import type { RecordData } from '../types/record'; + +import { AgentPortError, WorkflowExecutorError } from '../errors'; + +export default class SafeAgentPort implements AgentPort { + constructor(private readonly port: AgentPort) {} + + async getRecord(query: GetRecordQuery): Promise { + return this.call('getRecord', () => this.port.getRecord(query)); + } + + async updateRecord(query: UpdateRecordQuery): Promise { + return this.call('updateRecord', () => this.port.updateRecord(query)); + } + + async getRelatedData(query: GetRelatedDataQuery): Promise { + return this.call('getRelatedData', () => this.port.getRelatedData(query)); + } + + async executeAction(query: ExecuteActionQuery): Promise { + return this.call('executeAction', () => this.port.executeAction(query)); + } + + private async call(operation: string, fn: () => Promise): Promise { + try { + return await fn(); + } catch (cause) { + if (cause instanceof WorkflowExecutorError) throw cause; + throw new AgentPortError(operation, cause); + } + } +} diff --git a/packages/workflow-executor/src/executors/step-executor-factory.ts b/packages/workflow-executor/src/executors/step-executor-factory.ts new file mode 100644 index 0000000000..2f321dd638 --- /dev/null +++ b/packages/workflow-executor/src/executors/step-executor-factory.ts @@ -0,0 +1,109 @@ +import type { AgentPort } from '../ports/agent-port'; +import type { Logger } from '../ports/logger-port'; +import type { RunStore } from '../ports/run-store'; +import type { WorkflowPort } from '../ports/workflow-port'; +import type { + ExecutionContext, + IStepExecutor, + PendingStepExecution, + StepExecutionResult, +} from '../types/execution'; +import type { + ConditionStepDefinition, + McpTaskStepDefinition, + RecordTaskStepDefinition, +} from '../types/step-definition'; +import type { AiClient, RemoteTool } from '@forestadmin/ai-proxy'; + +import { StepStateError, causeMessage } from '../errors'; +import ConditionStepExecutor from './condition-step-executor'; +import LoadRelatedRecordStepExecutor from './load-related-record-step-executor'; +import McpTaskStepExecutor from './mcp-task-step-executor'; +import ReadRecordStepExecutor from './read-record-step-executor'; +import TriggerRecordActionStepExecutor from './trigger-record-action-step-executor'; +import UpdateRecordStepExecutor from './update-record-step-executor'; +import { StepType } from '../types/step-definition'; +import { stepTypeToOutcomeType } from '../types/step-outcome'; + +export interface StepContextConfig { + aiClient: AiClient; + agentPort: AgentPort; + workflowPort: WorkflowPort; + runStore: RunStore; + logger: Logger; +} + +export default class StepExecutorFactory { + static async create( + step: PendingStepExecution, + contextConfig: StepContextConfig, + loadTools: () => Promise, + ): Promise { + try { + const context = StepExecutorFactory.buildContext(step, contextConfig); + + switch (step.stepDefinition.type) { + case StepType.Condition: + return new ConditionStepExecutor(context as ExecutionContext); + case StepType.ReadRecord: + return new ReadRecordStepExecutor(context as ExecutionContext); + case StepType.UpdateRecord: + return new UpdateRecordStepExecutor( + context as ExecutionContext, + ); + case StepType.TriggerAction: + return new TriggerRecordActionStepExecutor( + context as ExecutionContext, + ); + case StepType.LoadRelatedRecord: + return new LoadRelatedRecordStepExecutor( + context as ExecutionContext, + ); + case StepType.McpTask: + return new McpTaskStepExecutor( + context as ExecutionContext, + await loadTools(), + ); + default: + throw new StepStateError( + `Unknown step type: ${(step.stepDefinition as { type: string }).type}`, + ); + } + } catch (error) { + contextConfig.logger.error('Step execution failed unexpectedly', { + runId: step.runId, + stepId: step.stepId, + stepIndex: step.stepIndex, + error: error instanceof Error ? error.message : String(error), + cause: causeMessage(error), + stack: error instanceof Error ? error.stack : undefined, + }); + + return { + execute: async (): Promise => ({ + stepOutcome: { + type: stepTypeToOutcomeType(step.stepDefinition.type), + stepId: step.stepId, + stepIndex: step.stepIndex, + status: 'error', + error: 'An unexpected error occurred.', + }, + }), + }; + } + } + + private static buildContext( + step: PendingStepExecution, + cfg: StepContextConfig, + ): ExecutionContext { + return { + ...step, + model: cfg.aiClient.getModel(step.stepDefinition.aiConfigName), + agentPort: cfg.agentPort, + workflowPort: cfg.workflowPort, + runStore: cfg.runStore, + logger: cfg.logger, + }; + } +} diff --git a/packages/workflow-executor/src/executors/summary/step-execution-formatters.ts b/packages/workflow-executor/src/executors/summary/step-execution-formatters.ts new file mode 100644 index 0000000000..55fdda59a2 --- /dev/null +++ b/packages/workflow-executor/src/executors/summary/step-execution-formatters.ts @@ -0,0 +1,60 @@ +import type { + LoadRelatedRecordStepExecutionData, + McpTaskStepExecutionData, + StepExecutionData, +} from '../../types/step-execution-data'; + +/** + * Stateless utility class — all methods are static. + * Provides type-specific formatting for step execution results. + * Add one private static method per step type that needs a non-generic display format, + * and dispatch from `format`. + */ +export default class StepExecutionFormatters { + /** + * Returns the full output line (indent + label + content) for the given execution, or null when: + * - No custom format is defined for this step type (switch default) — caller uses generic fallback, or + * - The execution data does not satisfy the formatter's preconditions (e.g. skipped/incomplete). + * In both cases, `StepSummaryBuilder` renders the generic Input:/Output: fallback. + */ + static format(execution: StepExecutionData): string | null { + switch (execution.type) { + case 'load-related-record': + return StepExecutionFormatters.formatLoadRelatedRecord(execution); + case 'mcp-task': + return StepExecutionFormatters.formatMcpTask(execution as McpTaskStepExecutionData); + default: + return null; + } + } + + private static formatMcpTask(execution: McpTaskStepExecutionData): string | null { + const { executionResult } = execution; + if (!executionResult) return null; + if ('skipped' in executionResult) return null; + + if (executionResult.formattedResponse) { + return ` Result: ${executionResult.formattedResponse}`; + } + + const toolName = execution.executionParams?.name ?? 'unknown tool'; + + return ` Executed: ${toolName} (result not summarized)`; + } + + private static formatLoadRelatedRecord( + execution: LoadRelatedRecordStepExecutionData, + ): string | null { + const { executionResult } = execution; + + if (!executionResult) return null; // pending phase — no result yet + if ('skipped' in executionResult) return null; // user skipped — generic fallback + + const { selectedRecordRef } = execution; + const { relation, record } = executionResult; + const sourceId = selectedRecordRef.recordId.join(', '); + const recordId = record.recordId.join(', '); + + return ` Loaded: ${selectedRecordRef.collectionName} #${sourceId} → [${relation.displayName}] → ${record.collectionName} #${recordId} (step ${record.stepIndex})`; + } +} diff --git a/packages/workflow-executor/src/executors/summary/step-summary-builder.ts b/packages/workflow-executor/src/executors/summary/step-summary-builder.ts new file mode 100644 index 0000000000..abe155924f --- /dev/null +++ b/packages/workflow-executor/src/executors/summary/step-summary-builder.ts @@ -0,0 +1,43 @@ +import type { StepDefinition } from '../../types/step-definition'; +import type { StepExecutionData } from '../../types/step-execution-data'; +import type { StepOutcome } from '../../types/step-outcome'; + +import StepExecutionFormatters from './step-execution-formatters'; + +export default class StepSummaryBuilder { + static build( + step: StepDefinition, + stepOutcome: StepOutcome, + execution: StepExecutionData | undefined, + ): string { + const prompt = step.prompt ?? '(no prompt)'; + const header = `Step "${stepOutcome.stepId}" (index ${stepOutcome.stepIndex}):`; + const lines = [header, ` Prompt: ${prompt}`]; + + if (execution !== undefined) { + // Try custom formatting — if it fires, it owns the entire output section (no Input: line) + const customLine = execution.executionResult + ? StepExecutionFormatters.format(execution) + : null; + + if (customLine !== null) { + lines.push(customLine); + } else { + if (execution.executionParams !== undefined) { + lines.push(` Input: ${JSON.stringify(execution.executionParams)}`); + } else if ('pendingData' in execution && execution.pendingData !== undefined) { + lines.push(` Pending: ${JSON.stringify(execution.pendingData)}`); + } + + if (execution.executionResult) { + lines.push(` Output: ${JSON.stringify(execution.executionResult)}`); + } + } + } else { + const { stepId, stepIndex, type, ...historyDetails } = stepOutcome; + lines.push(` History: ${JSON.stringify(historyDetails)}`); + } + + return lines.join('\n'); + } +} diff --git a/packages/workflow-executor/src/executors/trigger-record-action-step-executor.ts b/packages/workflow-executor/src/executors/trigger-record-action-step-executor.ts new file mode 100644 index 0000000000..167e3f018c --- /dev/null +++ b/packages/workflow-executor/src/executors/trigger-record-action-step-executor.ts @@ -0,0 +1,164 @@ +import type { StepExecutionResult } from '../types/execution'; +import type { CollectionSchema, RecordRef } from '../types/record'; +import type { RecordTaskStepDefinition } from '../types/step-definition'; +import type { ActionRef, TriggerRecordActionStepExecutionData } from '../types/step-execution-data'; + +import { DynamicStructuredTool, HumanMessage, SystemMessage } from '@forestadmin/ai-proxy'; +import { z } from 'zod'; + +import { ActionNotFoundError, NoActionsError, StepPersistenceError } from '../errors'; +import RecordTaskStepExecutor from './record-task-step-executor'; + +const TRIGGER_ACTION_SYSTEM_PROMPT = `You are an AI agent triggering an action on a record based on a user request. +Select the action to trigger. + +Important rules: +- Be precise: only trigger the action directly relevant to the request. +- Final answer is definitive, you won't receive any other input from the user. +- Do not refer to yourself as "I" in the response, use a passive formulation instead.`; + +interface ActionTarget extends ActionRef { + selectedRecordRef: RecordRef; +} + +export default class TriggerRecordActionStepExecutor extends RecordTaskStepExecutor { + protected async doExecute(): Promise { + // Branch A -- Re-entry with user confirmation + if (this.context.userConfirmed !== undefined) { + return this.handleConfirmation(); + } + + // Branches B & C -- First call + return this.handleFirstCall(); + } + + private async handleConfirmation(): Promise { + return this.handleConfirmationFlow( + 'trigger-action', + async execution => { + const { selectedRecordRef, pendingData } = execution; + const target: ActionTarget = { + selectedRecordRef, + ...(pendingData as ActionRef), + }; + + return this.resolveAndExecute(target, execution); + }, + ); + } + + private async handleFirstCall(): Promise { + const { stepDefinition: step } = this.context; + const records = await this.getAvailableRecordRefs(); + + const selectedRecordRef = await this.selectRecordRef(records, step.prompt); + const schema = await this.getCollectionSchema(selectedRecordRef.collectionName); + const args = await this.selectAction(schema, step.prompt); + const name = this.resolveActionName(schema, args.actionName); + const target: ActionTarget = { selectedRecordRef, displayName: args.actionName, name }; + + // Branch B -- automaticExecution + if (step.automaticExecution) { + return this.resolveAndExecute(target); + } + + // Branch C -- Awaiting confirmation + await this.context.runStore.saveStepExecution(this.context.runId, { + type: 'trigger-action', + stepIndex: this.context.stepIndex, + pendingData: { displayName: target.displayName, name: target.name }, + selectedRecordRef: target.selectedRecordRef, + }); + + return this.buildOutcomeResult({ status: 'awaiting-input' }); + } + + /** + * Resolves the action name, calls executeAction, and persists execution data. + * When `existingExecution` is provided (confirmation flow), it is spread into the + * saved execution to preserve pendingData for traceability. + */ + private async resolveAndExecute( + target: ActionTarget, + existingExecution?: TriggerRecordActionStepExecutionData, + ): Promise { + const { selectedRecordRef, displayName, name } = target; + + const actionResult = await this.agentPort.executeAction({ + collection: selectedRecordRef.collectionName, + action: name, + id: selectedRecordRef.recordId, + }); + + try { + await this.context.runStore.saveStepExecution(this.context.runId, { + ...existingExecution, + type: 'trigger-action', + stepIndex: this.context.stepIndex, + executionParams: { displayName, name }, + executionResult: { success: true, actionResult }, + selectedRecordRef, + }); + } catch (cause) { + throw new StepPersistenceError( + `Action "${name}" executed but step state could not be persisted ` + + `(run "${this.context.runId}", step ${this.context.stepIndex})`, + cause, + ); + } + + return this.buildOutcomeResult({ status: 'success' }); + } + + private async selectAction( + schema: CollectionSchema, + prompt: string | undefined, + ): Promise<{ actionName: string; reasoning: string }> { + const tool = this.buildSelectActionTool(schema); + const messages = [ + ...(await this.buildPreviousStepsMessages()), + new SystemMessage(TRIGGER_ACTION_SYSTEM_PROMPT), + new SystemMessage( + `The selected record belongs to the "${schema.collectionDisplayName}" collection.`, + ), + new HumanMessage(`**Request**: ${prompt ?? 'Trigger the relevant action.'}`), + ]; + + return this.invokeWithTool<{ actionName: string; reasoning: string }>(messages, tool); + } + + private buildSelectActionTool(schema: CollectionSchema): DynamicStructuredTool { + if (schema.actions.length === 0) { + throw new NoActionsError(schema.collectionName); + } + + const displayNames = schema.actions.map(a => a.displayName) as [string, ...string[]]; + const technicalNames = schema.actions + .map(a => `${a.displayName} (technical name: ${a.name})`) + .join(', '); + + return new DynamicStructuredTool({ + name: 'select-action', + description: 'Select the action to trigger on the record.', + schema: z.object({ + actionName: z + .enum(displayNames) + .describe(`The name of the action to trigger. Available: ${technicalNames}`), + reasoning: z.string().describe('Why this action was chosen'), + }), + func: undefined, + }); + } + + private resolveActionName(schema: CollectionSchema, displayName: string): string { + const action = + schema.actions.find(a => a.displayName === displayName) ?? + schema.actions.find(a => a.name === displayName); + + if (!action) { + throw new ActionNotFoundError(displayName, schema.collectionName); + } + + return action.name; + } +} diff --git a/packages/workflow-executor/src/executors/update-record-step-executor.ts b/packages/workflow-executor/src/executors/update-record-step-executor.ts new file mode 100644 index 0000000000..97c0cb1d41 --- /dev/null +++ b/packages/workflow-executor/src/executors/update-record-step-executor.ts @@ -0,0 +1,175 @@ +import type { StepExecutionResult } from '../types/execution'; +import type { CollectionSchema, RecordRef } from '../types/record'; +import type { RecordTaskStepDefinition } from '../types/step-definition'; +import type { FieldRef, UpdateRecordStepExecutionData } from '../types/step-execution-data'; + +import { DynamicStructuredTool, HumanMessage, SystemMessage } from '@forestadmin/ai-proxy'; +import { z } from 'zod'; + +import { FieldNotFoundError, NoWritableFieldsError, StepPersistenceError } from '../errors'; +import RecordTaskStepExecutor from './record-task-step-executor'; + +const UPDATE_RECORD_SYSTEM_PROMPT = `You are an AI agent updating a field on a record based on a user request. +Select the field to update and provide the new value. + +Important rules: +- Be precise: only update the field that is directly relevant to the request. +- Final answer is definitive, you won't receive any other input from the user. +- Do not refer to yourself as "I" in the response, use a passive formulation instead.`; + +interface UpdateTarget extends FieldRef { + selectedRecordRef: RecordRef; + value: string; +} + +export default class UpdateRecordStepExecutor extends RecordTaskStepExecutor { + protected async doExecute(): Promise { + // Branch A -- Re-entry with user confirmation + if (this.context.userConfirmed !== undefined) { + return this.handleConfirmation(); + } + + // Branches B & C -- First call + return this.handleFirstCall(); + } + + private async handleConfirmation(): Promise { + return this.handleConfirmationFlow( + 'update-record', + async execution => { + const { selectedRecordRef, pendingData } = execution; + const target: UpdateTarget = { + selectedRecordRef, + ...(pendingData as FieldRef & { value: string }), + }; + + return this.resolveAndUpdate(target, execution); + }, + ); + } + + private async handleFirstCall(): Promise { + const { stepDefinition: step } = this.context; + const records = await this.getAvailableRecordRefs(); + + const selectedRecordRef = await this.selectRecordRef(records, step.prompt); + const schema = await this.getCollectionSchema(selectedRecordRef.collectionName); + const args = await this.selectFieldAndValue(schema, step.prompt); + const name = this.resolveFieldName(schema, args.fieldName); + const target: UpdateTarget = { + selectedRecordRef, + displayName: args.fieldName, + name, + value: args.value, + }; + + // Branch B -- automaticExecution + if (step.automaticExecution) { + return this.resolveAndUpdate(target); + } + + // Branch C -- Awaiting confirmation + await this.context.runStore.saveStepExecution(this.context.runId, { + type: 'update-record', + stepIndex: this.context.stepIndex, + pendingData: { + displayName: target.displayName, + name: target.name, + value: target.value, + }, + selectedRecordRef: target.selectedRecordRef, + }); + + return this.buildOutcomeResult({ status: 'awaiting-input' }); + } + + /** + * Resolves the field name, calls updateRecord, and persists execution data. + * When `existingExecution` is provided (confirmation flow), it is spread into the + * saved execution to preserve pendingData for traceability. + */ + private async resolveAndUpdate( + target: UpdateTarget, + existingExecution?: UpdateRecordStepExecutionData, + ): Promise { + const { selectedRecordRef, displayName, name, value } = target; + + const updated = await this.agentPort.updateRecord({ + collection: selectedRecordRef.collectionName, + id: selectedRecordRef.recordId, + values: { [name]: value }, + }); + + try { + await this.context.runStore.saveStepExecution(this.context.runId, { + ...existingExecution, + type: 'update-record', + stepIndex: this.context.stepIndex, + executionParams: { displayName, name, value }, + executionResult: { updatedValues: updated.values }, + selectedRecordRef, + }); + } catch (cause) { + throw new StepPersistenceError( + `Record update persisted but step state could not be saved ` + + `(run "${this.context.runId}", step ${this.context.stepIndex})`, + cause, + ); + } + + return this.buildOutcomeResult({ status: 'success' }); + } + + private async selectFieldAndValue( + schema: CollectionSchema, + prompt: string | undefined, + ): Promise<{ fieldName: string; value: string; reasoning: string }> { + const tool = this.buildUpdateFieldTool(schema); + const messages = [ + ...(await this.buildPreviousStepsMessages()), + new SystemMessage(UPDATE_RECORD_SYSTEM_PROMPT), + new SystemMessage( + `The selected record belongs to the "${schema.collectionDisplayName}" collection.`, + ), + new HumanMessage(`**Request**: ${prompt ?? 'Update the relevant field.'}`), + ]; + + return this.invokeWithTool<{ fieldName: string; value: string; reasoning: string }>( + messages, + tool, + ); + } + + private buildUpdateFieldTool(schema: CollectionSchema): DynamicStructuredTool { + const nonRelationFields = schema.fields.filter(f => !f.isRelationship); + + if (nonRelationFields.length === 0) { + throw new NoWritableFieldsError(schema.collectionName); + } + + const displayNames = nonRelationFields.map(f => f.displayName) as [string, ...string[]]; + + return new DynamicStructuredTool({ + name: 'update-record-field', + description: 'Update a field on the selected record.', + schema: z.object({ + fieldName: z.enum(displayNames), + // z.string() intentionally: the value is always transmitted as string + // to updateRecord; data typing is handled by the agent/datasource layer. + value: z.string().describe('The new value for the field'), + reasoning: z.string().describe('Why this field and value were chosen'), + }), + func: undefined, + }); + } + + private resolveFieldName(schema: CollectionSchema, displayName: string): string { + const field = this.findField(schema, displayName); + + if (!field) { + throw new FieldNotFoundError(displayName, schema.collectionName); + } + + return field.fieldName; + } +} diff --git a/packages/workflow-executor/src/http/executor-http-server.ts b/packages/workflow-executor/src/http/executor-http-server.ts index 72ea42936e..10062deddd 100644 --- a/packages/workflow-executor/src/http/executor-http-server.ts +++ b/packages/workflow-executor/src/http/executor-http-server.ts @@ -1,3 +1,4 @@ +import type { Logger } from '../ports/logger-port'; import type { RunStore } from '../ports/run-store'; import type Runner from '../runner'; import type { Server } from 'http'; @@ -6,10 +7,13 @@ import Router from '@koa/router'; import http from 'http'; import Koa from 'koa'; +import { RunNotFoundError } from '../errors'; + export interface ExecutorHttpServerOptions { port: number; runStore: RunStore; runner: Runner; + logger?: Logger; } export default class ExecutorHttpServer { @@ -26,8 +30,14 @@ export default class ExecutorHttpServer { try { await next(); } catch (err: unknown) { + this.options.logger?.error('Unhandled HTTP error', { + method: ctx.method, + path: ctx.path, + error: err instanceof Error ? err.message : String(err), + stack: err instanceof Error ? err.stack : undefined, + }); ctx.status = 500; - ctx.body = { error: err instanceof Error ? err.message : 'Internal server error' }; + ctx.body = { error: 'Internal server error' }; } }); @@ -80,7 +90,18 @@ export default class ExecutorHttpServer { private async handleTrigger(ctx: Koa.Context): Promise { const { runId } = ctx.params; - await this.options.runner.triggerPoll(runId); + try { + await this.options.runner.triggerPoll(runId); + } catch (err) { + if (err instanceof RunNotFoundError) { + ctx.status = 404; + ctx.body = { error: 'Run not found or unavailable' }; + + return; + } + + throw err; + } ctx.status = 200; ctx.body = { triggered: true }; diff --git a/packages/workflow-executor/src/index.ts b/packages/workflow-executor/src/index.ts index 916bbc0751..3075c5f418 100644 --- a/packages/workflow-executor/src/index.ts +++ b/packages/workflow-executor/src/index.ts @@ -1,14 +1,16 @@ export { StepType } from './types/step-definition'; export type { ConditionStepDefinition, - AiTaskStepDefinition, + RecordTaskStepDefinition, + McpTaskStepDefinition, StepDefinition, } from './types/step-definition'; export type { StepStatus, ConditionStepOutcome, - AiTaskStepOutcome, + RecordTaskStepOutcome, + McpTaskStepOutcome, StepOutcome, } from './types/step-outcome'; @@ -16,16 +18,23 @@ export type { FieldReadSuccess, FieldReadError, FieldReadResult, + ActionRef, + RelationRef, + FieldRef, ConditionStepExecutionData, ReadRecordStepExecutionData, - AiTaskStepExecutionData, + UpdateRecordStepExecutionData, + TriggerRecordActionStepExecutionData, + RecordTaskStepExecutionData, + LoadRelatedRecordPendingData, LoadRelatedRecordStepExecutionData, + McpToolRef, + McpToolCall, + McpTaskStepExecutionData, ExecutedStepExecutionData, StepExecutionData, } from './types/step-execution-data'; -export { isExecutedStepOnExecutor } from './types/step-execution-data'; - export type { FieldSchema, ActionSchema, @@ -36,15 +45,24 @@ export type { export type { Step, - UserInput, PendingStepExecution, StepExecutionResult, ExecutionContext, } from './types/execution'; -export type { AgentPort } from './ports/agent-port'; +export type { + AgentPort, + ExecuteActionQuery, + GetRecordQuery, + GetRelatedDataQuery, + Id, + Limit, + UpdateRecordQuery, +} from './ports/agent-port'; export type { McpConfiguration, WorkflowPort } from './ports/workflow-port'; export type { RunStore } from './ports/run-store'; +export type { Logger } from './ports/logger-port'; +export { default as ConsoleLogger } from './adapters/console-logger'; export { WorkflowExecutorError, @@ -54,10 +72,28 @@ export { NoRecordsError, NoReadableFieldsError, NoResolvedFieldsError, + NoWritableFieldsError, + NoActionsError, + StepPersistenceError, + NoRelationshipFieldsError, + RelatedRecordNotFoundError, + InvalidAIResponseError, + RelationNotFoundError, + FieldNotFoundError, + ActionNotFoundError, + StepStateError, + NoMcpToolsError, + McpToolNotFoundError, + McpToolInvocationError, + AgentPortError, } from './errors'; export { default as BaseStepExecutor } from './executors/base-step-executor'; export { default as ConditionStepExecutor } from './executors/condition-step-executor'; export { default as ReadRecordStepExecutor } from './executors/read-record-step-executor'; +export { default as UpdateRecordStepExecutor } from './executors/update-record-step-executor'; +export { default as TriggerRecordActionStepExecutor } from './executors/trigger-record-action-step-executor'; +export { default as LoadRelatedRecordStepExecutor } from './executors/load-related-record-step-executor'; +export { default as McpTaskStepExecutor } from './executors/mcp-task-step-executor'; export { default as AgentClientAgentPort } from './adapters/agent-client-agent-port'; export { default as ForestServerWorkflowPort } from './adapters/forest-server-workflow-port'; export { default as ExecutorHttpServer } from './http/executor-http-server'; diff --git a/packages/workflow-executor/src/ports/agent-port.ts b/packages/workflow-executor/src/ports/agent-port.ts index a0964e250f..4a95c92cdf 100644 --- a/packages/workflow-executor/src/ports/agent-port.ts +++ b/packages/workflow-executor/src/ports/agent-port.ts @@ -2,25 +2,26 @@ import type { RecordData } from '../types/record'; +export type Id = string | number; + +export type Limit = { limit: number } | { limit: null }; + +export type GetRecordQuery = { collection: string; id: Id[]; fields?: string[] }; + +export type UpdateRecordQuery = { collection: string; id: Id[]; values: Record }; + +export type GetRelatedDataQuery = { + collection: string; + id: Id[]; + relation: string; + fields?: string[]; +} & Limit; + +export type ExecuteActionQuery = { collection: string; action: string; id?: Id[] }; + export interface AgentPort { - getRecord( - collectionName: string, - recordId: Array, - fieldNames?: string[], - ): Promise; - updateRecord( - collectionName: string, - recordId: Array, - values: Record, - ): Promise; - getRelatedData( - collectionName: string, - recordId: Array, - relationName: string, - ): Promise; - executeAction( - collectionName: string, - actionName: string, - recordIds: Array[], - ): Promise; + getRecord(query: GetRecordQuery): Promise; + updateRecord(query: UpdateRecordQuery): Promise; + getRelatedData(query: GetRelatedDataQuery): Promise; + executeAction(query: ExecuteActionQuery): Promise; } diff --git a/packages/workflow-executor/src/ports/logger-port.ts b/packages/workflow-executor/src/ports/logger-port.ts new file mode 100644 index 0000000000..017f8742ab --- /dev/null +++ b/packages/workflow-executor/src/ports/logger-port.ts @@ -0,0 +1,3 @@ +export interface Logger { + error(message: string, context: Record): void; +} diff --git a/packages/workflow-executor/src/ports/workflow-port.ts b/packages/workflow-executor/src/ports/workflow-port.ts index 392473a95d..223123b756 100644 --- a/packages/workflow-executor/src/ports/workflow-port.ts +++ b/packages/workflow-executor/src/ports/workflow-port.ts @@ -3,12 +3,13 @@ import type { PendingStepExecution } from '../types/execution'; import type { CollectionSchema } from '../types/record'; import type { StepOutcome } from '../types/step-outcome'; +import type { McpConfiguration } from '@forestadmin/ai-proxy'; -/** Placeholder -- will be typed as McpConfiguration from @forestadmin/ai-proxy/mcp-client once added as dependency. */ -export type McpConfiguration = unknown; +export type { McpConfiguration }; export interface WorkflowPort { getPendingStepExecutions(): Promise; + getPendingStepExecutionsForRun(runId: string): Promise; updateStepExecution(runId: string, stepOutcome: StepOutcome): Promise; getCollectionSchema(collectionName: string): Promise; getMcpServerConfigs(): Promise; diff --git a/packages/workflow-executor/src/runner.ts b/packages/workflow-executor/src/runner.ts index 652772c71c..b97a8260aa 100644 --- a/packages/workflow-executor/src/runner.ts +++ b/packages/workflow-executor/src/runner.ts @@ -1,9 +1,14 @@ -// TODO: implement polling loop, execution dispatch, AI wiring (see spec section 4.1) - +import type { StepContextConfig } from './executors/step-executor-factory'; import type { AgentPort } from './ports/agent-port'; +import type { Logger } from './ports/logger-port'; import type { RunStore } from './ports/run-store'; -import type { WorkflowPort } from './ports/workflow-port'; +import type { McpConfiguration, WorkflowPort } from './ports/workflow-port'; +import type { PendingStepExecution, StepExecutionResult } from './types/execution'; +import type { AiClient, RemoteTool } from '@forestadmin/ai-proxy'; +import ConsoleLogger from './adapters/console-logger'; +import { RunNotFoundError, causeMessage } from './errors'; +import StepExecutorFactory from './executors/step-executor-factory'; import ExecutorHttpServer from './http/executor-http-server'; export interface RunnerConfig { @@ -11,42 +16,169 @@ export interface RunnerConfig { workflowPort: WorkflowPort; runStore: RunStore; pollingIntervalMs: number; + aiClient: AiClient; + logger?: Logger; httpPort?: number; } export default class Runner { private readonly config: RunnerConfig; private httpServer: ExecutorHttpServer | null = null; + private pollingTimer: NodeJS.Timeout | null = null; + private readonly inFlightSteps = new Set(); + private isRunning = false; + private readonly logger: Logger; + + private static once(fn: () => Promise): () => Promise { + let cached: Promise | undefined; + + return () => { + cached ??= fn(); + + return cached; + }; + } + + private static stepKey(step: PendingStepExecution): string { + return `${step.runId}:${step.stepId}`; + } constructor(config: RunnerConfig) { this.config = config; + this.logger = config.logger ?? new ConsoleLogger(); } async start(): Promise { - if (this.config.httpPort !== undefined && !this.httpServer) { - const server = new ExecutorHttpServer({ - port: this.config.httpPort, - runStore: this.config.runStore, - runner: this, - }); - await server.start(); - this.httpServer = server; + if (this.isRunning) return; + this.isRunning = true; + + try { + if (this.config.httpPort !== undefined && !this.httpServer) { + const server = new ExecutorHttpServer({ + port: this.config.httpPort, + runStore: this.config.runStore, + runner: this, + }); + await server.start(); + this.httpServer = server; + } + } catch (error) { + this.isRunning = false; + throw error; } - // TODO: start polling loop + this.schedulePoll(); } async stop(): Promise { + this.isRunning = false; + + if (this.pollingTimer !== null) { + clearTimeout(this.pollingTimer); + this.pollingTimer = null; + } + if (this.httpServer) { await this.httpServer.stop(); this.httpServer = null; } - // TODO: stop polling loop, close connections + await this.config.aiClient.closeConnections(); + + // TODO: graceful drain of in-flight steps (out of scope PRD-223) + } + + async triggerPoll(runId: string): Promise { + const step = await this.config.workflowPort.getPendingStepExecutionsForRun(runId); + + if (!step) throw new RunNotFoundError(runId); + + if (this.inFlightSteps.has(Runner.stepKey(step))) return; + + const loadTools = Runner.once(() => this.fetchRemoteTools()); + await this.executeStep(step, loadTools); + } + + private schedulePoll(): void { + if (!this.isRunning) return; + this.pollingTimer = setTimeout(() => this.runPollCycle(), this.config.pollingIntervalMs); + } + + private async runPollCycle(): Promise { + try { + const steps = await this.config.workflowPort.getPendingStepExecutions(); + const pending = steps.filter(s => !this.inFlightSteps.has(Runner.stepKey(s))); + const loadTools = Runner.once(() => this.fetchRemoteTools()); + await Promise.allSettled(pending.map(s => this.executeStep(s, loadTools))); + } catch (error) { + this.logger.error('Poll cycle failed', { + error: error instanceof Error ? error.message : String(error), + stack: error instanceof Error ? error.stack : undefined, + }); + } finally { + this.schedulePoll(); + } + } + + private async fetchRemoteTools(): Promise { + const configs = await this.config.workflowPort.getMcpServerConfigs(); + if (configs.length === 0) return []; + + const mergedConfig: McpConfiguration = { + ...configs[0], + configs: Object.assign({}, ...configs.map(c => c.configs)), + }; + + return this.config.aiClient.loadRemoteTools(mergedConfig); + } + + private async executeStep( + step: PendingStepExecution, + loadTools: () => Promise, + ): Promise { + const key = Runner.stepKey(step); + this.inFlightSteps.add(key); + + let result: StepExecutionResult; + + try { + const executor = await StepExecutorFactory.create(step, this.contextConfig, loadTools); + result = await executor.execute(); + } catch (error) { + // This block should never execute: the factory and executor contracts guarantee no rejection. + // It guards against future regressions. + this.logger.error('FATAL: executor contract violated — step outcome not reported', { + runId: step.runId, + stepId: step.stepId, + error: error instanceof Error ? error.message : String(error), + }); + + return; // Cannot report an outcome: the orchestrator will timeout on this step + } finally { + this.inFlightSteps.delete(key); + } + + try { + await this.config.workflowPort.updateStepExecution(step.runId, result.stepOutcome); + } catch (error) { + this.logger.error('Failed to report step outcome', { + runId: step.runId, + stepId: step.stepId, + stepIndex: step.stepIndex, + error: error instanceof Error ? error.message : String(error), + cause: causeMessage(error), + stack: error instanceof Error ? error.stack : undefined, + }); + } } - // eslint-disable-next-line class-methods-use-this, @typescript-eslint/no-unused-vars - async triggerPoll(_runId: string): Promise { - // TODO: trigger immediate poll cycle for this runId + private get contextConfig(): StepContextConfig { + return { + aiClient: this.config.aiClient, + agentPort: this.config.agentPort, + workflowPort: this.config.workflowPort, + runStore: this.config.runStore, + logger: this.logger, + }; } } diff --git a/packages/workflow-executor/src/types/execution.ts b/packages/workflow-executor/src/types/execution.ts index 406d1e4f0f..3ec08b3345 100644 --- a/packages/workflow-executor/src/types/execution.ts +++ b/packages/workflow-executor/src/types/execution.ts @@ -4,17 +4,16 @@ import type { RecordRef } from './record'; import type { StepDefinition } from './step-definition'; import type { StepOutcome } from './step-outcome'; import type { AgentPort } from '../ports/agent-port'; +import type { Logger } from '../ports/logger-port'; import type { RunStore } from '../ports/run-store'; import type { WorkflowPort } from '../ports/workflow-port'; -import type { BaseChatModel } from '@langchain/core/language_models/chat_models'; +import type { BaseChatModel } from '@forestadmin/ai-proxy'; export interface Step { stepDefinition: StepDefinition; stepOutcome: StepOutcome; } -export type UserInput = { type: 'confirmation'; confirmed: boolean }; - export interface PendingStepExecution { readonly runId: string; readonly stepId: string; @@ -22,13 +21,17 @@ export interface PendingStepExecution { readonly baseRecordRef: RecordRef; readonly stepDefinition: StepDefinition; readonly previousSteps: ReadonlyArray; - readonly userInput?: UserInput; + readonly userConfirmed?: boolean; } export interface StepExecutionResult { stepOutcome: StepOutcome; } +export interface IStepExecutor { + execute(): Promise; +} + export interface ExecutionContext { readonly runId: string; readonly stepId: string; @@ -39,6 +42,7 @@ export interface ExecutionContext readonly agentPort: AgentPort; readonly workflowPort: WorkflowPort; readonly runStore: RunStore; - readonly history: ReadonlyArray>; - readonly remoteTools: readonly unknown[]; + readonly previousSteps: ReadonlyArray>; + readonly userConfirmed?: boolean; + readonly logger: Logger; } diff --git a/packages/workflow-executor/src/types/record.ts b/packages/workflow-executor/src/types/record.ts index b5070c39f4..2237600fb7 100644 --- a/packages/workflow-executor/src/types/record.ts +++ b/packages/workflow-executor/src/types/record.ts @@ -6,6 +6,8 @@ export interface FieldSchema { fieldName: string; displayName: string; isRelationship: boolean; + /** Cardinality of the relation. Absent for non-relationship fields. */ + relationType?: 'BelongsTo' | 'HasMany' | 'HasOne'; } export interface ActionSchema { diff --git a/packages/workflow-executor/src/types/step-definition.ts b/packages/workflow-executor/src/types/step-definition.ts index ca23e5b413..e2a324618a 100644 --- a/packages/workflow-executor/src/types/step-definition.ts +++ b/packages/workflow-executor/src/types/step-definition.ts @@ -6,6 +6,7 @@ export enum StepType { UpdateRecord = 'update-record', TriggerAction = 'trigger-action', LoadRelatedRecord = 'load-related-record', + McpTask = 'mcp-task', } interface BaseStepDefinition { @@ -19,12 +20,18 @@ export interface ConditionStepDefinition extends BaseStepDefinition { options: [string, ...string[]]; } -export interface AiTaskStepDefinition extends BaseStepDefinition { - type: Exclude; - recordSourceStepId?: string; - automaticCompletion?: boolean; - allowedTools?: string[]; - remoteToolsSourceId?: string; +export interface RecordTaskStepDefinition extends BaseStepDefinition { + type: Exclude; + automaticExecution?: boolean; } -export type StepDefinition = ConditionStepDefinition | AiTaskStepDefinition; +export interface McpTaskStepDefinition extends BaseStepDefinition { + type: StepType.McpTask; + mcpServerId?: string; + automaticExecution?: boolean; +} + +export type StepDefinition = + | ConditionStepDefinition + | RecordTaskStepDefinition + | McpTaskStepDefinition; diff --git a/packages/workflow-executor/src/types/step-execution-data.ts b/packages/workflow-executor/src/types/step-execution-data.ts index eb022a273c..edd5f8df02 100644 --- a/packages/workflow-executor/src/types/step-execution-data.ts +++ b/packages/workflow-executor/src/types/step-execution-data.ts @@ -13,21 +13,23 @@ interface BaseStepExecutionData { export interface ConditionStepExecutionData extends BaseStepExecutionData { type: 'condition'; executionParams: { answer: string | null; reasoning?: string }; - executionResult: { answer: string }; + executionResult?: { answer: string }; } -// -- Read Record -- +// -- Shared -- -interface FieldReadBase { - fieldName: string; +export interface FieldRef { + name: string; displayName: string; } -export interface FieldReadSuccess extends FieldReadBase { +// -- Read Record -- + +export interface FieldReadSuccess extends FieldRef { value: unknown; } -export interface FieldReadError extends FieldReadBase { +export interface FieldReadError extends FieldRef { error: string; } @@ -35,15 +37,72 @@ export type FieldReadResult = FieldReadSuccess | FieldReadError; export interface ReadRecordStepExecutionData extends BaseStepExecutionData { type: 'read-record'; - executionParams: { fieldNames: string[] }; + executionParams: { fields: FieldRef[] }; executionResult: { fields: FieldReadResult[] }; selectedRecordRef: RecordRef; } +// -- Update Record -- + +export interface UpdateRecordStepExecutionData extends BaseStepExecutionData { + type: 'update-record'; + executionParams?: FieldRef & { value: string }; + /** User confirmed → values returned by updateRecord. User rejected → skipped. */ + executionResult?: { updatedValues: Record } | { skipped: true }; + /** AI-selected field and value awaiting user confirmation. Used in the confirmation flow only. */ + pendingData?: FieldRef & { value: string }; + selectedRecordRef: RecordRef; +} + +// -- Trigger Action -- + +export interface ActionRef { + name: string; + displayName: string; +} + +// Intentionally separate from ActionRef/FieldRef: expected to gain relation-specific +// fields (e.g. relationType) in a future iteration. +export interface RelationRef { + name: string; + displayName: string; +} + +export interface TriggerRecordActionStepExecutionData extends BaseStepExecutionData { + type: 'trigger-action'; + /** Display name and technical name of the executed action. */ + executionParams?: ActionRef; + executionResult?: { success: true; actionResult: unknown } | { skipped: true }; + /** AI-selected action awaiting user confirmation. Used in the confirmation flow only. */ + pendingData?: ActionRef; + selectedRecordRef: RecordRef; +} + +// -- Mcp Task -- + +/** Reference to an MCP tool by its sanitized name (OpenAI-safe, alphanumeric + underscores/hyphens). */ +export interface McpToolRef { + name: string; +} + +/** A resolved tool call: sanitized tool name + input parameters sent to the tool. */ +export interface McpToolCall extends McpToolRef { + input: Record; +} + +export interface McpTaskStepExecutionData extends BaseStepExecutionData { + type: 'mcp-task'; + executionParams?: McpToolCall; + executionResult?: + | { success: true; toolResult: unknown; formattedResponse?: string } + | { skipped: true }; + pendingData?: McpToolCall; +} + // -- Generic AI Task (fallback for untyped steps) -- -export interface AiTaskStepExecutionData extends BaseStepExecutionData { - type: 'ai-task'; +export interface RecordTaskStepExecutionData extends BaseStepExecutionData { + type: 'record-task'; executionParams?: Record; executionResult?: Record; toolConfirmationInterruption?: Record; @@ -51,9 +110,30 @@ export interface AiTaskStepExecutionData extends BaseStepExecutionData { // -- Load Related Record -- +export interface LoadRelatedRecordPendingData extends RelationRef { + /** Collection name of the related records — needed to build RecordRef in Branch A. */ + relatedCollectionName: string; + /** AI-selected fields suggested for display on the frontend. undefined = not computed (no non-relation fields). */ + suggestedFields?: string[]; + /** + * The record id to load. Initially set by the AI; overwritten by the frontend via + * PATCH /runs/:runId/steps/:stepIndex/pending-data (not yet implemented). + */ + selectedRecordId: Array; +} + export interface LoadRelatedRecordStepExecutionData extends BaseStepExecutionData { type: 'load-related-record'; - record: RecordRef; + /** AI-selected relation with pre-fetched candidates awaiting user confirmation. */ + pendingData?: LoadRelatedRecordPendingData; + /** The record ref used to load the relation. Required for handleConfirmationFlow. */ + selectedRecordRef: RecordRef; + executionParams?: RelationRef; + /** + * Navigation path captured at execution time — used by StepSummaryBuilder for AI context. + * Source is not repeated here — it is always selectedRecordRef, consistent with other step types. + */ + executionResult?: { relation: RelationRef; record: RecordRef } | { skipped: true }; } // -- Union -- @@ -61,18 +141,11 @@ export interface LoadRelatedRecordStepExecutionData extends BaseStepExecutionDat export type StepExecutionData = | ConditionStepExecutionData | ReadRecordStepExecutionData - | AiTaskStepExecutionData - | LoadRelatedRecordStepExecutionData; - -export type ExecutedStepExecutionData = - | ConditionStepExecutionData - | ReadRecordStepExecutionData - | AiTaskStepExecutionData; - -// TODO: this condition should change when load-related-record gets its own executor -// and produces executionParams/executionResult like other steps. -export function isExecutedStepOnExecutor( - data: StepExecutionData | undefined, -): data is ExecutedStepExecutionData { - return !!data && data.type !== 'load-related-record'; -} + | UpdateRecordStepExecutionData + | TriggerRecordActionStepExecutionData + | RecordTaskStepExecutionData + | LoadRelatedRecordStepExecutionData + | McpTaskStepExecutionData; + +/** Alias for StepExecutionData — kept for backwards-compatible consumption at the call sites. */ +export type ExecutedStepExecutionData = StepExecutionData; diff --git a/packages/workflow-executor/src/types/step-outcome.ts b/packages/workflow-executor/src/types/step-outcome.ts index 9a564748eb..3421b60176 100644 --- a/packages/workflow-executor/src/types/step-outcome.ts +++ b/packages/workflow-executor/src/types/step-outcome.ts @@ -1,15 +1,17 @@ /** @draft Types derived from the workflow-executor spec -- subject to change. */ -type BaseStepStatus = 'success' | 'error'; +import { StepType } from './step-definition'; + +export type BaseStepStatus = 'success' | 'error'; /** Condition steps can fall back to human decision when the AI is uncertain. */ export type ConditionStepStatus = BaseStepStatus | 'manual-decision'; -/** AI task steps can pause mid-execution to await user input (e.g. tool confirmation). */ -export type AiTaskStepStatus = BaseStepStatus | 'awaiting-input'; +/** AI task steps can pause mid-execution to await user input (e.g. awaiting-input). */ +export type RecordTaskStepStatus = BaseStepStatus | 'awaiting-input'; /** Union of all step statuses. */ -export type StepStatus = ConditionStepStatus | AiTaskStepStatus; +export type StepStatus = ConditionStepStatus | RecordTaskStepStatus; /** * StepOutcome is sent to the orchestrator — it must NEVER contain client data. @@ -30,9 +32,21 @@ export interface ConditionStepOutcome extends BaseStepOutcome { selectedOption?: string; } -export interface AiTaskStepOutcome extends BaseStepOutcome { - type: 'ai-task'; - status: AiTaskStepStatus; +export interface RecordTaskStepOutcome extends BaseStepOutcome { + type: 'record-task'; + status: RecordTaskStepStatus; +} + +export interface McpTaskStepOutcome extends BaseStepOutcome { + type: 'mcp-task'; + status: RecordTaskStepStatus; } -export type StepOutcome = ConditionStepOutcome | AiTaskStepOutcome; +export type StepOutcome = ConditionStepOutcome | RecordTaskStepOutcome | McpTaskStepOutcome; + +export function stepTypeToOutcomeType(type: StepType): 'condition' | 'record-task' | 'mcp-task' { + if (type === StepType.Condition) return 'condition'; + if (type === StepType.McpTask) return 'mcp-task'; + + return 'record-task'; +} diff --git a/packages/workflow-executor/test/adapters/agent-client-agent-port.test.ts b/packages/workflow-executor/test/adapters/agent-client-agent-port.test.ts index b564eeaf5e..cca7c3b4f9 100644 --- a/packages/workflow-executor/test/adapters/agent-client-agent-port.test.ts +++ b/packages/workflow-executor/test/adapters/agent-client-agent-port.test.ts @@ -77,7 +77,7 @@ describe('AgentClientAgentPort', () => { it('should return a RecordData for a simple PK', async () => { mockCollection.list.mockResolvedValue([{ id: 42, name: 'Alice' }]); - const result = await port.getRecord('users', [42]); + const result = await port.getRecord({ collection: 'users', id: [42] }); expect(mockCollection.list).toHaveBeenCalledWith({ filters: { field: 'id', operator: 'Equal', value: 42 }, @@ -93,7 +93,7 @@ describe('AgentClientAgentPort', () => { it('should build a composite And filter for composite PKs', async () => { mockCollection.list.mockResolvedValue([{ tenantId: 1, orderId: 2 }]); - await port.getRecord('orders', [1, 2]); + await port.getRecord({ collection: 'orders', id: [1, 2] }); expect(mockCollection.list).toHaveBeenCalledWith({ filters: { @@ -110,13 +110,15 @@ describe('AgentClientAgentPort', () => { it('should throw a RecordNotFoundError when no record is found', async () => { mockCollection.list.mockResolvedValue([]); - await expect(port.getRecord('users', [999])).rejects.toThrow(RecordNotFoundError); + await expect(port.getRecord({ collection: 'users', id: [999] })).rejects.toThrow( + RecordNotFoundError, + ); }); - it('should pass fields to list when fieldNames is provided', async () => { + it('should pass fields to list when fields is provided', async () => { mockCollection.list.mockResolvedValue([{ id: 42, name: 'Alice' }]); - await port.getRecord('users', [42], ['id', 'name']); + await port.getRecord({ collection: 'users', id: [42], fields: ['id', 'name'] }); expect(mockCollection.list).toHaveBeenCalledWith({ filters: { field: 'id', operator: 'Equal', value: 42 }, @@ -125,10 +127,10 @@ describe('AgentClientAgentPort', () => { }); }); - it('should not pass fields to list when fieldNames is an empty array', async () => { + it('should not pass fields to list when fields is an empty array', async () => { mockCollection.list.mockResolvedValue([{ id: 42, name: 'Alice' }]); - await port.getRecord('users', [42], []); + await port.getRecord({ collection: 'users', id: [42], fields: [] }); expect(mockCollection.list).toHaveBeenCalledWith({ filters: { field: 'id', operator: 'Equal', value: 42 }, @@ -136,10 +138,10 @@ describe('AgentClientAgentPort', () => { }); }); - it('should not pass fields to list when fieldNames is undefined', async () => { + it('should not pass fields to list when fields is undefined', async () => { mockCollection.list.mockResolvedValue([{ id: 42, name: 'Alice' }]); - await port.getRecord('users', [42]); + await port.getRecord({ collection: 'users', id: [42] }); expect(mockCollection.list).toHaveBeenCalledWith({ filters: { field: 'id', operator: 'Equal', value: 42 }, @@ -150,7 +152,7 @@ describe('AgentClientAgentPort', () => { it('should fallback to pk field "id" when collection is unknown', async () => { mockCollection.list.mockResolvedValue([{ id: 1 }]); - const result = await port.getRecord('unknown', [1]); + const result = await port.getRecord({ collection: 'unknown', id: [1] }); expect(mockCollection.list).toHaveBeenCalledWith( expect.objectContaining({ @@ -165,7 +167,11 @@ describe('AgentClientAgentPort', () => { it('should call update with pipe-encoded id and return a RecordData', async () => { mockCollection.update.mockResolvedValue({ id: 42, name: 'Bob' }); - const result = await port.updateRecord('users', [42], { name: 'Bob' }); + const result = await port.updateRecord({ + collection: 'users', + id: [42], + values: { name: 'Bob' }, + }); expect(mockCollection.update).toHaveBeenCalledWith('42', { name: 'Bob' }); expect(result).toEqual({ @@ -178,7 +184,7 @@ describe('AgentClientAgentPort', () => { it('should encode composite PK to pipe format for update', async () => { mockCollection.update.mockResolvedValue({ tenantId: 1, orderId: 2 }); - await port.updateRecord('orders', [1, 2], { status: 'done' }); + await port.updateRecord({ collection: 'orders', id: [1, 2], values: { status: 'done' } }); expect(mockCollection.update).toHaveBeenCalledWith('1|2', { status: 'done' }); }); @@ -191,7 +197,12 @@ describe('AgentClientAgentPort', () => { { id: 11, title: 'Post B' }, ]); - const result = await port.getRelatedData('users', [42], 'posts'); + const result = await port.getRelatedData({ + collection: 'users', + id: [42], + relation: 'posts', + limit: null, + }); expect(mockCollection.relation).toHaveBeenCalledWith('posts', '42'); expect(result).toEqual([ @@ -208,10 +219,33 @@ describe('AgentClientAgentPort', () => { ]); }); + it('should apply pagination when limit is a number', async () => { + mockRelation.list.mockResolvedValue([{ id: 10, title: 'Post A' }]); + + await port.getRelatedData({ collection: 'users', id: [42], relation: 'posts', limit: 5 }); + + expect(mockRelation.list).toHaveBeenCalledWith( + expect.objectContaining({ pagination: { size: 5, number: 1 } }), + ); + }); + + it('should not apply pagination when limit is null', async () => { + mockRelation.list.mockResolvedValue([]); + + await port.getRelatedData({ collection: 'users', id: [42], relation: 'posts', limit: null }); + + expect(mockRelation.list).toHaveBeenCalledWith({}); + }); + it('should fallback to relationName when no CollectionSchema exists', async () => { mockRelation.list.mockResolvedValue([{ id: 1 }]); - const result = await port.getRelatedData('users', [42], 'unknownRelation'); + const result = await port.getRelatedData({ + collection: 'users', + id: [42], + relation: 'unknownRelation', + limit: null, + }); expect(result[0].collectionName).toBe('unknownRelation'); expect(result[0].recordId).toEqual([1]); @@ -220,26 +254,72 @@ describe('AgentClientAgentPort', () => { it('should return an empty array when no related data exists', async () => { mockRelation.list.mockResolvedValue([]); - expect(await port.getRelatedData('users', [42], 'posts')).toEqual([]); + expect( + await port.getRelatedData({ + collection: 'users', + id: [42], + relation: 'posts', + limit: null, + }), + ).toEqual([]); + }); + + it('should forward fields to the list call when provided', async () => { + mockRelation.list.mockResolvedValue([{ id: 10, title: 'Post A' }]); + + await port.getRelatedData({ + collection: 'users', + id: [42], + relation: 'posts', + limit: null, + fields: ['title'], + }); + + expect(mockRelation.list).toHaveBeenCalledWith( + expect.objectContaining({ fields: ['title'] }), + ); + }); + + it('should omit fields from the list call when not provided', async () => { + mockRelation.list.mockResolvedValue([{ id: 10 }]); + + await port.getRelatedData({ collection: 'users', id: [42], relation: 'posts', limit: null }); + + expect(mockRelation.list).toHaveBeenCalledWith( + expect.not.objectContaining({ fields: expect.anything() }), + ); }); }); describe('executeAction', () => { - it('should encode recordIds to pipe format and call execute', async () => { + it('should encode ids to pipe format and call execute', async () => { mockAction.execute.mockResolvedValue({ success: 'done' }); - const result = await port.executeAction('users', 'sendEmail', [[1], [2]]); + const result = await port.executeAction({ + collection: 'users', + action: 'sendEmail', + id: [1], + }); - expect(mockCollection.action).toHaveBeenCalledWith('sendEmail', { recordIds: ['1', '2'] }); + expect(mockCollection.action).toHaveBeenCalledWith('sendEmail', { recordIds: ['1'] }); expect(result).toEqual({ success: 'done' }); }); + it('should call execute with empty recordIds when ids is not provided', async () => { + mockAction.execute.mockResolvedValue(undefined); + + await port.executeAction({ collection: 'users', action: 'archive' }); + + expect(mockCollection.action).toHaveBeenCalledWith('archive', { recordIds: [] }); + expect(mockAction.execute).toHaveBeenCalled(); + }); + it('should propagate errors from action execution', async () => { mockAction.execute.mockRejectedValue(new Error('Action failed')); - await expect(port.executeAction('users', 'sendEmail', [[1]])).rejects.toThrow( - 'Action failed', - ); + await expect( + port.executeAction({ collection: 'users', action: 'sendEmail', id: [1] }), + ).rejects.toThrow('Action failed'); }); }); }); diff --git a/packages/workflow-executor/test/adapters/forest-server-workflow-port.test.ts b/packages/workflow-executor/test/adapters/forest-server-workflow-port.test.ts index 9e69a04eaf..8b38812dff 100644 --- a/packages/workflow-executor/test/adapters/forest-server-workflow-port.test.ts +++ b/packages/workflow-executor/test/adapters/forest-server-workflow-port.test.ts @@ -38,6 +38,34 @@ describe('ForestServerWorkflowPort', () => { }); }); + describe('getPendingStepExecutionsForRun', () => { + it('calls the pending step execution route with the runId query param', async () => { + const step = { runId: 'run-42' } as PendingStepExecution; + mockQuery.mockResolvedValue(step); + + const result = await port.getPendingStepExecutionsForRun('run-42'); + + expect(mockQuery).toHaveBeenCalledWith( + options, + 'get', + '/liana/v1/workflow-step-executions/pending?runId=run-42', + ); + expect(result).toBe(step); + }); + + it('encodes special characters in the runId', async () => { + mockQuery.mockResolvedValue({} as PendingStepExecution); + + await port.getPendingStepExecutionsForRun('run/42 special'); + + expect(mockQuery).toHaveBeenCalledWith( + options, + 'get', + '/liana/v1/workflow-step-executions/pending?runId=run%2F42%20special', + ); + }); + }); + describe('updateStepExecution', () => { it('should post step outcome to the complete route', async () => { mockQuery.mockResolvedValue(undefined); @@ -101,5 +129,11 @@ describe('ForestServerWorkflowPort', () => { await expect(port.getPendingStepExecutions()).rejects.toThrow('Network error'); }); + + it('should propagate errors from getPendingStepExecutionsForRun', async () => { + mockQuery.mockRejectedValue(new Error('Network error')); + + await expect(port.getPendingStepExecutionsForRun('run-1')).rejects.toThrow('Network error'); + }); }); }); diff --git a/packages/workflow-executor/test/executors/base-step-executor.test.ts b/packages/workflow-executor/test/executors/base-step-executor.test.ts index 86491fbb8f..8c3f5c2543 100644 --- a/packages/workflow-executor/test/executors/base-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/base-step-executor.test.ts @@ -1,20 +1,49 @@ +/* eslint-disable max-classes-per-file */ +import type { Logger } from '../../src/ports/logger-port'; import type { RunStore } from '../../src/ports/run-store'; import type { ExecutionContext, StepExecutionResult } from '../../src/types/execution'; import type { RecordRef } from '../../src/types/record'; import type { StepDefinition } from '../../src/types/step-definition'; import type { StepExecutionData } from '../../src/types/step-execution-data'; -import type { StepOutcome } from '../../src/types/step-outcome'; -import type { BaseMessage, SystemMessage } from '@langchain/core/messages'; -import type { DynamicStructuredTool } from '@langchain/core/tools'; +import type { BaseStepStatus, StepOutcome } from '../../src/types/step-outcome'; +import type { BaseMessage, DynamicStructuredTool } from '@forestadmin/ai-proxy'; -import { MalformedToolCallError, MissingToolCallError } from '../../src/errors'; +import { SystemMessage } from '@forestadmin/ai-proxy'; + +import { + MalformedToolCallError, + MissingToolCallError, + NoRecordsError, + StepPersistenceError, +} from '../../src/errors'; import BaseStepExecutor from '../../src/executors/base-step-executor'; import { StepType } from '../../src/types/step-definition'; /** Concrete subclass that exposes protected methods for testing. */ class TestableExecutor extends BaseStepExecutor { - async execute(): Promise { - throw new Error('not used'); + constructor(context: ExecutionContext, private readonly errorToThrow?: unknown) { + super(context); + } + + protected async doExecute(): Promise { + if (this.errorToThrow !== undefined) throw this.errorToThrow; + + return this.buildOutcomeResult({ status: 'success' }); + } + + protected buildOutcomeResult(outcome: { + status: BaseStepStatus; + error?: string; + }): StepExecutionResult { + return { + stepOutcome: { + type: 'record-task', + stepId: this.context.stepId, + stepIndex: this.context.stepIndex, + status: outcome.status, + ...(outcome.error !== undefined && { error: outcome.error }), + }, + }; } override buildPreviousStepsMessages(): Promise { @@ -54,6 +83,10 @@ function makeMockRunStore(stepExecutions: StepExecutionData[] = []): RunStore { }; } +function makeMockLogger(): Logger { + return { error: jest.fn() }; +} + function makeContext(overrides: Partial = {}): ExecutionContext { return { runId: 'run-1', @@ -73,8 +106,8 @@ function makeContext(overrides: Partial = {}): ExecutionContex agentPort: {} as ExecutionContext['agentPort'], workflowPort: {} as ExecutionContext['workflowPort'], runStore: makeMockRunStore(), - history: [], - remoteTools: [], + previousSteps: [], + logger: makeMockLogger(), ...overrides, }; } @@ -87,7 +120,7 @@ describe('BaseStepExecutor', () => { expect(await executor.buildPreviousStepsMessages()).toEqual([]); }); - it('includes prompt and executionParams from previous steps', async () => { + it('calls getStepExecutions with runId and returns a SystemMessage with step content', async () => { const runStore = makeMockRunStore([ { type: 'condition', @@ -98,286 +131,140 @@ describe('BaseStepExecutor', () => { ]); const executor = new TestableExecutor( makeContext({ - history: [makeHistoryEntry({ stepId: 'cond-1', stepIndex: 0, prompt: 'Approve?' })], + previousSteps: [makeHistoryEntry({ stepId: 'cond-1', stepIndex: 0, prompt: 'Approve?' })], runStore, }), ); - const result = await executor - .buildPreviousStepsMessages() - .then(msgs => msgs[0]?.content ?? ''); + const messages = await executor.buildPreviousStepsMessages(); - expect(result).toContain('Step "cond-1"'); - expect(result).toContain('Prompt: Approve?'); - expect(result).toContain('Input: {"answer":"Yes","reasoning":"Order is valid"}'); - expect(result).toContain('Output: {"answer":"Yes"}'); expect(runStore.getStepExecutions).toHaveBeenCalledWith('run-1'); + expect(messages).toHaveLength(1); + expect(messages[0]).toBeInstanceOf(SystemMessage); + expect(messages[0].content).toContain('Step "cond-1"'); + expect(messages[0].content).toContain('Prompt: Approve?'); }); - it('uses Input for matched steps and History for unmatched steps', async () => { + it('separates multiple previous steps with a blank line', async () => { + const runStore = makeMockRunStore([ + { + type: 'condition', + stepIndex: 0, + executionParams: { answer: 'Yes', reasoning: 'Valid' }, + executionResult: { answer: 'Yes' }, + }, + { + type: 'condition', + stepIndex: 1, + executionParams: { answer: 'No', reasoning: 'Wrong' }, + executionResult: { answer: 'No' }, + }, + ]); const executor = new TestableExecutor( makeContext({ - history: [ - makeHistoryEntry({ stepId: 'cond-1', stepIndex: 0 }), + previousSteps: [ + makeHistoryEntry({ stepId: 'cond-1', stepIndex: 0, prompt: 'First?' }), makeHistoryEntry({ stepId: 'cond-2', stepIndex: 1, prompt: 'Second?' }), ], - // Only step 1 has an execution entry — step 0 has no match - runStore: makeMockRunStore([ - { - type: 'condition', - stepIndex: 1, - executionParams: { answer: 'No', reasoning: 'Clearly no' }, - executionResult: { answer: 'No' }, - }, - ]), + runStore, }), ); - const result = await executor - .buildPreviousStepsMessages() - .then(msgs => msgs[0]?.content ?? ''); + const messages = await executor.buildPreviousStepsMessages(); + const content = messages[0].content as string; - expect(result).toContain('Step "cond-1"'); - expect(result).toContain('History: {"status":"success"}'); - expect(result).toContain('Step "cond-2"'); - expect(result).toContain('Input: {"answer":"No","reasoning":"Clearly no"}'); - expect(result).toContain('Output: {"answer":"No"}'); + expect(content).toContain('Step "cond-1"'); + expect(content).toContain('Step "cond-2"'); + expect(content).toContain('\n\nStep "cond-2"'); }); + }); - it('falls back to History when no matching step execution in RunStore', async () => { - const executor = new TestableExecutor( - makeContext({ - history: [ - makeHistoryEntry({ stepId: 'orphan', stepIndex: 5, prompt: 'Orphan step' }), - makeHistoryEntry({ stepId: 'matched', stepIndex: 1, prompt: 'Matched step' }), - ], - runStore: makeMockRunStore([ - { - type: 'condition', - stepIndex: 1, - executionParams: { answer: 'B', reasoning: 'Option B fits' }, - executionResult: { answer: 'B' }, - }, - ]), - }), - ); + describe('execute error handling', () => { + it('converts NoRecordsError to error outcome', async () => { + const executor = new TestableExecutor(makeContext(), new NoRecordsError()); - const result = await executor - .buildPreviousStepsMessages() - .then(msgs => msgs[0]?.content ?? ''); + const result = await executor.execute(); - expect(result).toContain('Step "orphan"'); - expect(result).toContain('History: {"status":"success"}'); - expect(result).toContain('Step "matched"'); - expect(result).toContain('Input: {"answer":"B","reasoning":"Option B fits"}'); - expect(result).toContain('Output: {"answer":"B"}'); + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe('No records available'); }); - it('includes selectedOption in History for condition steps', async () => { - const entry = makeHistoryEntry({ - stepId: 'cond-approval', - stepIndex: 0, - prompt: 'Approved?', + describe('unexpected error handling', () => { + it('returns error outcome instead of rethrowing', async () => { + const executor = new TestableExecutor(makeContext(), new Error('db connection refused')); + const result = await executor.execute(); + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe('Unexpected error during step execution'); }); - (entry.stepOutcome as { selectedOption?: string }).selectedOption = 'Yes'; - - const executor = new TestableExecutor( - makeContext({ - history: [entry], - runStore: makeMockRunStore([]), - }), - ); - - const result = await executor - .buildPreviousStepsMessages() - .then(msgs => msgs[0]?.content ?? ''); - expect(result).toContain('Step "cond-approval"'); - expect(result).toContain('"selectedOption":"Yes"'); - }); - - it('includes error in History for failed steps', async () => { - const entry = makeHistoryEntry({ - stepId: 'failing-step', - stepIndex: 0, - prompt: 'Do something', + it('logs the full error context when logger is provided', async () => { + const logger = makeMockLogger(); + const executor = new TestableExecutor( + makeContext({ logger }), + new Error('db connection refused'), + ); + await executor.execute(); + expect(logger.error).toHaveBeenCalledWith( + 'Unexpected error during step execution', + expect.objectContaining({ + runId: 'run-1', + stepId: 'step-0', + stepIndex: 0, + error: 'db connection refused', + }), + ); }); - entry.stepOutcome.status = 'error'; - (entry.stepOutcome as { error?: string }).error = 'AI could not match an option'; - - const executor = new TestableExecutor( - makeContext({ - history: [entry], - runStore: makeMockRunStore([]), - }), - ); - - const result = await executor - .buildPreviousStepsMessages() - .then(msgs => msgs[0]?.content ?? ''); - - expect(result).toContain('"status":"error"'); - expect(result).toContain('"error":"AI could not match an option"'); - }); - - it('includes status in History for ai-task steps without RunStore data', async () => { - const entry: { stepDefinition: StepDefinition; stepOutcome: StepOutcome } = { - stepDefinition: { - type: StepType.ReadRecord, - prompt: 'Run task', - }, - stepOutcome: { - type: 'ai-task', - stepId: 'ai-step', - stepIndex: 0, - status: 'awaiting-input', - }, - }; - - const executor = new TestableExecutor( - makeContext({ - history: [entry], - runStore: makeMockRunStore([]), - }), - ); - const result = await executor - .buildPreviousStepsMessages() - .then(msgs => msgs[0]?.content ?? ''); - - expect(result).toContain('Step "ai-step"'); - expect(result).toContain('History: {"status":"awaiting-input"}'); - }); - - it('uses Input when RunStore has executionParams, History otherwise', async () => { - const condEntry = makeHistoryEntry({ - stepId: 'cond-1', - stepIndex: 0, - prompt: 'Approved?', + it('includes stack trace in log context', async () => { + const logger = makeMockLogger(); + const err = new Error('db connection refused'); + const executor = new TestableExecutor(makeContext({ logger }), err); + await executor.execute(); + expect(logger.error).toHaveBeenCalledWith( + 'Unexpected error during step execution', + expect.objectContaining({ stack: err.stack }), + ); }); - (condEntry.stepOutcome as { selectedOption?: string }).selectedOption = 'Yes'; - - const aiEntry: { stepDefinition: StepDefinition; stepOutcome: StepOutcome } = { - stepDefinition: { - type: StepType.ReadRecord, - prompt: 'Read name', - }, - stepOutcome: { - type: 'ai-task', - stepId: 'read-customer', - stepIndex: 1, - status: 'success', - }, - }; - - const executor = new TestableExecutor( - makeContext({ - history: [condEntry, aiEntry], - runStore: makeMockRunStore([ - { - type: 'ai-task', - stepIndex: 1, - executionParams: { answer: 'John Doe' }, - }, - ]), - }), - ); - - const result = await executor - .buildPreviousStepsMessages() - .then(msgs => msgs[0]?.content ?? ''); - - expect(result).toContain('Step "cond-1"'); - expect(result).toContain('History: {"status":"success","selectedOption":"Yes"}'); - expect(result).toContain('Step "read-customer"'); - expect(result).toContain('Input: {"answer":"John Doe"}'); - }); - - it('prefers RunStore execution data over History fallback', async () => { - const entry = makeHistoryEntry({ stepId: 'cond-1', stepIndex: 0, prompt: 'Pick one' }); - (entry.stepOutcome as { selectedOption?: string }).selectedOption = 'A'; - const executor = new TestableExecutor( - makeContext({ - history: [entry], - runStore: makeMockRunStore([ - { - type: 'condition', - stepIndex: 0, - executionParams: { answer: 'A', reasoning: 'Best fit' }, - executionResult: { answer: 'A' }, - }, - ]), - }), - ); - - const result = await executor - .buildPreviousStepsMessages() - .then(msgs => msgs[0]?.content ?? ''); + it('handles non-Error throwables without crashing', async () => { + const executor = new TestableExecutor(makeContext(), 'a raw string thrown'); + const result = await executor.execute(); + expect(result.stepOutcome.status).toBe('error'); + }); - expect(result).toContain('Input: {"answer":"A","reasoning":"Best fit"}'); - expect(result).toContain('Output: {"answer":"A"}'); - expect(result).not.toContain('History:'); + it('includes cause in log when non-WorkflowExecutorError has a cause', async () => { + const logger = makeMockLogger(); + const cause = new Error('root cause'); + const error = Object.assign(new Error('wrapper error'), { cause }); + const executor = new TestableExecutor(makeContext({ logger }), error); + await executor.execute(); + expect(logger.error).toHaveBeenCalledWith( + 'Unexpected error during step execution', + expect.objectContaining({ cause: 'root cause' }), + ); + }); }); - it('omits Input line when executionParams is undefined', async () => { - const entry: { stepDefinition: StepDefinition; stepOutcome: StepOutcome } = { - stepDefinition: { - type: StepType.ReadRecord, - prompt: 'Do something', - }, - stepOutcome: { - type: 'ai-task', - stepId: 'ai-step', - stepIndex: 0, - status: 'success', - }, - }; - - const executor = new TestableExecutor( - makeContext({ - history: [entry], - runStore: makeMockRunStore([ - { - type: 'ai-task', - stepIndex: 0, - }, - ]), + it('logs cause when WorkflowExecutorError has a cause', async () => { + const logger = makeMockLogger(); + const cause = new Error('db timeout'); + const error = new StepPersistenceError('write failed', cause); + const executor = new TestableExecutor(makeContext({ logger }), error); + await executor.execute(); + expect(logger.error).toHaveBeenCalledWith( + 'write failed', + expect.objectContaining({ + cause: 'db timeout', + stack: cause.stack, }), ); - - const result = await executor - .buildPreviousStepsMessages() - .then(msgs => msgs[0]?.content ?? ''); - - expect(result).toContain('Step "ai-step"'); - expect(result).toContain('Prompt: Do something'); - expect(result).not.toContain('Input:'); }); - it('shows "(no prompt)" when step has no prompt', async () => { - const entry = makeHistoryEntry({ stepIndex: 0 }); - entry.stepDefinition.prompt = undefined; - - const executor = new TestableExecutor( - makeContext({ - history: [entry], - runStore: makeMockRunStore([ - { - type: 'condition', - stepIndex: 0, - executionParams: { answer: 'A', reasoning: 'Only option' }, - executionResult: { answer: 'A' }, - }, - ]), - }), - ); - - const result = await executor - .buildPreviousStepsMessages() - .then(msgs => msgs[0]?.content ?? ''); - - expect(result).toContain('Prompt: (no prompt)'); + it('does not log when WorkflowExecutorError has no cause', async () => { + const logger = makeMockLogger(); + const executor = new TestableExecutor(makeContext({ logger }), new MissingToolCallError()); + await executor.execute(); + expect(logger.error).not.toHaveBeenCalled(); }); }); diff --git a/packages/workflow-executor/test/executors/condition-step-executor.test.ts b/packages/workflow-executor/test/executors/condition-step-executor.test.ts index 23eb6c8365..696b200ab4 100644 --- a/packages/workflow-executor/test/executors/condition-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/condition-step-executor.test.ts @@ -53,8 +53,8 @@ function makeContext( agentPort: {} as ExecutionContext['agentPort'], workflowPort: {} as ExecutionContext['workflowPort'], runStore: makeMockRunStore(), - history: [], - remoteTools: [], + previousSteps: [], + logger: { error: jest.fn() }, ...overrides, }; } @@ -175,7 +175,7 @@ describe('ConditionStepExecutor', () => { const context = makeContext({ model: mockModel.model, runStore, - history: [ + previousSteps: [ { stepDefinition: { type: StepType.Condition, @@ -252,30 +252,32 @@ describe('ConditionStepExecutor', () => { const result = await executor.execute(); + expect(result.stepOutcome.type).toBe('condition'); expect(result.stepOutcome.status).toBe('error'); expect(result.stepOutcome.error).toBe( - 'AI returned a malformed tool call for "choose-gateway-option": JSON parse error', + "The AI returned an unexpected response. Try rephrasing the step's prompt.", ); expect(runStore.saveStepExecution).not.toHaveBeenCalled(); }); }); describe('error propagation', () => { - it('returns error status when model invocation fails', async () => { + it('returns error outcome for infrastructure errors', async () => { const invoke = jest.fn().mockRejectedValue(new Error('API timeout')); const bindTools = jest.fn().mockReturnValue({ invoke }); + const runStore = makeMockRunStore(); const context = makeContext({ model: { bindTools } as unknown as ExecutionContext['model'], + runStore, }); const executor = new ConditionStepExecutor(context); const result = await executor.execute(); - expect(result.stepOutcome.status).toBe('error'); - expect(result.stepOutcome.error).toBe('API timeout'); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); }); - it('lets run store errors propagate', async () => { + it('returns error outcome when run store save fails with context', async () => { const mockModel = makeMockModel({ option: 'Approve', reasoning: 'OK', @@ -286,7 +288,10 @@ describe('ConditionStepExecutor', () => { }); const executor = new ConditionStepExecutor(makeContext({ model: mockModel.model, runStore })); - await expect(executor.execute()).rejects.toThrow('Storage full'); + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe('The step result could not be saved. Please retry.'); }); }); }); diff --git a/packages/workflow-executor/test/executors/load-related-record-step-executor.test.ts b/packages/workflow-executor/test/executors/load-related-record-step-executor.test.ts new file mode 100644 index 0000000000..46910f72db --- /dev/null +++ b/packages/workflow-executor/test/executors/load-related-record-step-executor.test.ts @@ -0,0 +1,1497 @@ +import type { AgentPort } from '../../src/ports/agent-port'; +import type { RunStore } from '../../src/ports/run-store'; +import type { WorkflowPort } from '../../src/ports/workflow-port'; +import type { ExecutionContext } from '../../src/types/execution'; +import type { CollectionSchema, RecordData, RecordRef } from '../../src/types/record'; +import type { RecordTaskStepDefinition } from '../../src/types/step-definition'; +import type { LoadRelatedRecordStepExecutionData } from '../../src/types/step-execution-data'; + +import LoadRelatedRecordStepExecutor from '../../src/executors/load-related-record-step-executor'; +import { StepType } from '../../src/types/step-definition'; + +function makeStep(overrides: Partial = {}): RecordTaskStepDefinition { + return { + type: StepType.LoadRelatedRecord, + prompt: 'Load the related order for this customer', + ...overrides, + }; +} + +function makeRecordRef(overrides: Partial = {}): RecordRef { + return { + collectionName: 'customers', + recordId: [42], + stepIndex: 0, + ...overrides, + }; +} + +function makeRelatedRecordData(overrides: Partial = {}): RecordData { + return { + collectionName: 'orders', + recordId: [99], + values: { total: 150 }, + ...overrides, + }; +} + +function makeMockAgentPort(relatedData: RecordData[] = [makeRelatedRecordData()]): AgentPort { + return { + getRecord: jest.fn(), + updateRecord: jest.fn(), + getRelatedData: jest.fn().mockResolvedValue(relatedData), + executeAction: jest.fn(), + } as unknown as AgentPort; +} + +/** Default schema: 'Order' is BelongsTo (single record), 'Address' is HasMany. */ +function makeCollectionSchema(overrides: Partial = {}): CollectionSchema { + return { + collectionName: 'customers', + collectionDisplayName: 'Customers', + primaryKeyFields: ['id'], + fields: [ + { fieldName: 'email', displayName: 'Email', isRelationship: false }, + { fieldName: 'order', displayName: 'Order', isRelationship: true, relationType: 'BelongsTo' }, + { + fieldName: 'address', + displayName: 'Address', + isRelationship: true, + relationType: 'HasMany', + }, + ], + actions: [], + ...overrides, + }; +} + +function makeMockRunStore(overrides: Partial = {}): RunStore { + return { + getStepExecutions: jest.fn().mockResolvedValue([]), + saveStepExecution: jest.fn().mockResolvedValue(undefined), + ...overrides, + }; +} + +function makeMockWorkflowPort( + schemasByCollection: Record = { + customers: makeCollectionSchema(), + }, +): WorkflowPort { + return { + getPendingStepExecutions: jest.fn().mockResolvedValue([]), + getPendingStepExecutionsForRun: jest.fn().mockResolvedValue(null), + updateStepExecution: jest.fn().mockResolvedValue(undefined), + getCollectionSchema: jest + .fn() + .mockImplementation((name: string) => + Promise.resolve( + schemasByCollection[name] ?? makeCollectionSchema({ collectionName: name }), + ), + ), + getMcpServerConfigs: jest.fn().mockResolvedValue([]), + }; +} + +function makeMockModel(toolCallArgs?: Record, toolName = 'select-relation') { + const invoke = jest.fn().mockResolvedValue({ + tool_calls: toolCallArgs ? [{ name: toolName, args: toolCallArgs, id: 'call_1' }] : undefined, + }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const model = { bindTools } as unknown as ExecutionContext['model']; + + return { model, bindTools, invoke }; +} + +function makeContext( + overrides: Partial> = {}, +): ExecutionContext { + return { + runId: 'run-1', + stepId: 'load-1', + stepIndex: 0, + baseRecordRef: makeRecordRef(), + stepDefinition: makeStep(), + model: makeMockModel({ relationName: 'Order', reasoning: 'User requested order' }).model, + agentPort: makeMockAgentPort(), + workflowPort: makeMockWorkflowPort(), + runStore: makeMockRunStore(), + previousSteps: [], + logger: { error: jest.fn() }, + ...overrides, + }; +} + +/** Builds a valid pending execution for Branch A tests. */ +function makePendingExecution( + overrides: Partial = {}, +): LoadRelatedRecordStepExecutionData { + return { + type: 'load-related-record', + stepIndex: 0, + pendingData: { + displayName: 'Order', + name: 'order', + relatedCollectionName: 'orders', + selectedRecordId: [99], + suggestedFields: ['status', 'amount'], + }, + selectedRecordRef: makeRecordRef(), + ...overrides, + }; +} + +describe('LoadRelatedRecordStepExecutor', () => { + describe('automaticExecution: BelongsTo — load direct (Branch B)', () => { + it('fetches 1 related record and returns success', async () => { + const agentPort = makeMockAgentPort(); + const mockModel = makeMockModel({ relationName: 'Order', reasoning: 'User requested order' }); + const runStore = makeMockRunStore(); + const context = makeContext({ + model: mockModel.model, + agentPort, + runStore, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + expect(agentPort.getRelatedData).toHaveBeenCalledWith({ + collection: 'customers', + id: [42], + relation: 'order', + limit: 1, + }); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + type: 'load-related-record', + stepIndex: 0, + executionParams: { displayName: 'Order', name: 'order' }, + executionResult: expect.objectContaining({ + record: expect.objectContaining({ + collectionName: 'orders', + recordId: [99], + stepIndex: 0, + }), + }), + selectedRecordRef: expect.objectContaining({ + collectionName: 'customers', + recordId: [42], + }), + }), + ); + }); + }); + + describe('automaticExecution: HasMany — 2 AI calls (Branch B)', () => { + it('runs selectRelevantFields + selectBestRecord to pick the best candidate', async () => { + const hasManySchema = makeCollectionSchema({ + fields: [ + { fieldName: 'name', displayName: 'Name', isRelationship: false }, + { + fieldName: 'address', + displayName: 'Address', + isRelationship: true, + relationType: 'HasMany', + }, + ], + }); + + const relatedData: RecordData[] = [ + { collectionName: 'addresses', recordId: [1], values: { city: 'Paris' } }, + { collectionName: 'addresses', recordId: [2], values: { city: 'Lyon' } }, + ]; + const agentPort = makeMockAgentPort(relatedData); + + const addressSchema = makeCollectionSchema({ + collectionName: 'addresses', + collectionDisplayName: 'Addresses', + fields: [ + { fieldName: 'city', displayName: 'City', isRelationship: false }, + { fieldName: 'zip', displayName: 'Zip', isRelationship: false }, + ], + }); + + // Call 1: select-relation → Address; Call 2: select-fields → ['City'] (displayName); + // Call 3: select-record-by-content → index 1 (Lyon) + const invoke = jest + .fn() + .mockResolvedValueOnce({ + tool_calls: [ + { + name: 'select-relation', + args: { relationName: 'Address', reasoning: 'Load addresses' }, + id: 'c1', + }, + ], + }) + .mockResolvedValueOnce({ + tool_calls: [{ name: 'select-fields', args: { fieldNames: ['City'] }, id: 'c2' }], + }) + .mockResolvedValueOnce({ + tool_calls: [ + { + name: 'select-record-by-content', + args: { recordIndex: 1, reasoning: 'Lyon is relevant' }, + id: 'c3', + }, + ], + }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const model = { bindTools } as unknown as ExecutionContext['model']; + + const runStore = makeMockRunStore(); + const context = makeContext({ + model, + agentPort, + runStore, + workflowPort: makeMockWorkflowPort({ + customers: hasManySchema, + addresses: addressSchema, + }), + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + expect(bindTools).toHaveBeenCalledTimes(3); + expect(bindTools.mock.calls[0][0][0].name).toBe('select-relation'); + expect(bindTools.mock.calls[1][0][0].name).toBe('select-fields'); + expect(bindTools.mock.calls[2][0][0].name).toBe('select-record-by-content'); + + // Fetches 50 candidates (HasMany) + expect(agentPort.getRelatedData).toHaveBeenCalledWith({ + collection: 'customers', + id: [42], + relation: 'address', + limit: 50, + }); + + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + executionResult: expect.objectContaining({ + record: expect.objectContaining({ collectionName: 'addresses', recordId: [2] }), + }), + }), + ); + }); + + it('skips field-selection AI call when related collection has no non-relation fields', async () => { + const hasManySchema = makeCollectionSchema({ + fields: [ + { + fieldName: 'address', + displayName: 'Address', + isRelationship: true, + relationType: 'HasMany', + }, + ], + }); + const relatedData: RecordData[] = [ + { collectionName: 'addresses', recordId: [1], values: {} }, + { collectionName: 'addresses', recordId: [2], values: {} }, + ]; + const agentPort = makeMockAgentPort(relatedData); + const addressSchema = makeCollectionSchema({ + collectionName: 'addresses', + collectionDisplayName: 'Addresses', + fields: [], + }); + + // Call 1: select-relation; Call 2: select-record-by-content (no select-fields) + const invoke = jest + .fn() + .mockResolvedValueOnce({ + tool_calls: [ + { + name: 'select-relation', + args: { relationName: 'Address', reasoning: 'Load addresses' }, + id: 'c1', + }, + ], + }) + .mockResolvedValueOnce({ + tool_calls: [ + { + name: 'select-record-by-content', + args: { recordIndex: 0, reasoning: 'First is best' }, + id: 'c2', + }, + ], + }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const model = { bindTools } as unknown as ExecutionContext['model']; + + const context = makeContext({ + model, + agentPort, + workflowPort: makeMockWorkflowPort({ customers: hasManySchema, addresses: addressSchema }), + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + expect(bindTools).toHaveBeenCalledTimes(2); + expect(bindTools.mock.calls[0][0][0].name).toBe('select-relation'); + expect(bindTools.mock.calls[1][0][0].name).toBe('select-record-by-content'); + }); + + it('takes the single candidate directly without AI record-selection calls', async () => { + const hasManySchema = makeCollectionSchema({ + fields: [ + { + fieldName: 'address', + displayName: 'Address', + isRelationship: true, + relationType: 'HasMany', + }, + ], + }); + const agentPort = makeMockAgentPort([ + { collectionName: 'addresses', recordId: [1], values: { city: 'Paris' } }, + ]); + + const invoke = jest.fn().mockResolvedValueOnce({ + tool_calls: [ + { + name: 'select-relation', + args: { relationName: 'Address', reasoning: 'Load address' }, + id: 'c1', + }, + ], + }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const model = { bindTools } as unknown as ExecutionContext['model']; + + const context = makeContext({ + model, + agentPort, + workflowPort: makeMockWorkflowPort({ customers: hasManySchema }), + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + // Only select-relation was called — no field/record AI calls for single candidate + expect(bindTools).toHaveBeenCalledTimes(1); + }); + + it('returns error outcome when AI selects an out-of-range record index', async () => { + const hasManySchema = makeCollectionSchema({ + fields: [ + { + fieldName: 'address', + displayName: 'Address', + isRelationship: true, + relationType: 'HasMany', + }, + ], + }); + const relatedData: RecordData[] = [ + { collectionName: 'addresses', recordId: [1], values: { city: 'Paris' } }, + { collectionName: 'addresses', recordId: [2], values: { city: 'Lyon' } }, + ]; + const agentPort = makeMockAgentPort(relatedData); + const addressSchema = makeCollectionSchema({ + collectionName: 'addresses', + collectionDisplayName: 'Addresses', + fields: [{ fieldName: 'city', displayName: 'City', isRelationship: false }], + }); + + // Call 1: select-relation; Call 2: select-fields; Call 3: out-of-range index 999 + const invoke = jest + .fn() + .mockResolvedValueOnce({ + tool_calls: [ + { + name: 'select-relation', + args: { relationName: 'Address', reasoning: 'Load addresses' }, + id: 'c1', + }, + ], + }) + .mockResolvedValueOnce({ + tool_calls: [{ name: 'select-fields', args: { fieldNames: ['city'] }, id: 'c2' }], + }) + .mockResolvedValueOnce({ + tool_calls: [ + { + name: 'select-record-by-content', + args: { recordIndex: 999, reasoning: 'Out of range' }, + id: 'c3', + }, + ], + }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const model = { bindTools } as unknown as ExecutionContext['model']; + + const runStore = makeMockRunStore(); + const context = makeContext({ + model, + agentPort, + runStore, + workflowPort: makeMockWorkflowPort({ customers: hasManySchema, addresses: addressSchema }), + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + "The AI made an unexpected choice. Try rephrasing the step's prompt.", + ); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + + it('returns error when AI returns empty fieldNames violating the min:1 constraint', async () => { + const hasManySchema = makeCollectionSchema({ + fields: [ + { + fieldName: 'address', + displayName: 'Address', + isRelationship: true, + relationType: 'HasMany', + }, + ], + }); + const relatedData: RecordData[] = [ + { collectionName: 'addresses', recordId: [1], values: { city: 'Paris' } }, + { collectionName: 'addresses', recordId: [2], values: { city: 'Lyon' } }, + ]; + const agentPort = makeMockAgentPort(relatedData); + const addressSchema = makeCollectionSchema({ + collectionName: 'addresses', + collectionDisplayName: 'Addresses', + fields: [{ fieldName: 'city', displayName: 'City', isRelationship: false }], + }); + + // Call 1: select-relation; Call 2: select-fields returns empty array (AI violation) + const invoke = jest + .fn() + .mockResolvedValueOnce({ + tool_calls: [ + { + name: 'select-relation', + args: { relationName: 'Address', reasoning: 'Load addresses' }, + id: 'c1', + }, + ], + }) + .mockResolvedValueOnce({ + tool_calls: [{ name: 'select-fields', args: { fieldNames: [] }, id: 'c2' }], + }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const model = { bindTools } as unknown as ExecutionContext['model']; + + const runStore = makeMockRunStore(); + const context = makeContext({ + model, + agentPort, + runStore, + workflowPort: makeMockWorkflowPort({ customers: hasManySchema, addresses: addressSchema }), + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + "The AI made an unexpected choice. Try rephrasing the step's prompt.", + ); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + }); + + describe('automaticExecution: HasOne — load direct (Branch B)', () => { + it('fetches 1 related record (same path as BelongsTo) and returns success', async () => { + const hasOneSchema = makeCollectionSchema({ + fields: [ + { + fieldName: 'profile', + displayName: 'Profile', + isRelationship: true, + relationType: 'HasOne', + }, + ], + }); + const agentPort = makeMockAgentPort([ + { collectionName: 'profiles', recordId: [5], values: {} }, + ]); + const mockModel = makeMockModel({ relationName: 'Profile', reasoning: 'Load profile' }); + const runStore = makeMockRunStore(); + const context = makeContext({ + model: mockModel.model, + agentPort, + runStore, + workflowPort: makeMockWorkflowPort({ customers: hasOneSchema }), + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + // HasOne uses the same fetchFirstCandidate path as BelongsTo — limit: 1 + expect(agentPort.getRelatedData).toHaveBeenCalledWith({ + collection: 'customers', + id: [42], + relation: 'profile', + limit: 1, + }); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + executionResult: expect.objectContaining({ + record: expect.objectContaining({ collectionName: 'profiles', recordId: [5] }), + }), + }), + ); + }); + }); + + describe('without automaticExecution: awaiting-input (Branch C)', () => { + it('saves AI suggestion in pendingData and returns awaiting-input (single record — no field/record AI calls)', async () => { + const agentPort = makeMockAgentPort(); // returns 1 record: orders #99 + const mockModel = makeMockModel({ relationName: 'Order', reasoning: 'User requested order' }); + const runStore = makeMockRunStore(); + const context = makeContext({ model: mockModel.model, agentPort, runStore }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('awaiting-input'); + expect(agentPort.getRelatedData).toHaveBeenCalledWith({ + collection: 'customers', + id: [42], + relation: 'order', + limit: 50, + }); + // Single record → only select-relation AI call + expect(mockModel.bindTools).toHaveBeenCalledTimes(1); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + type: 'load-related-record', + stepIndex: 0, + pendingData: { + displayName: 'Order', + name: 'order', + relatedCollectionName: 'orders', + selectedRecordId: [99], + suggestedFields: [], + }, + selectedRecordRef: expect.objectContaining({ + collectionName: 'customers', + recordId: [42], + }), + }), + ); + }); + + it('runs field-selection + record-selection AI calls when multiple related records exist', async () => { + const relatedData: RecordData[] = [ + { collectionName: 'orders', recordId: [1], values: { status: 'pending' } }, + { collectionName: 'orders', recordId: [2], values: { status: 'completed' } }, + ]; + const agentPort = makeMockAgentPort(relatedData); + + const ordersSchema = makeCollectionSchema({ + collectionName: 'orders', + collectionDisplayName: 'Orders', + fields: [{ fieldName: 'status', displayName: 'Status', isRelationship: false }], + }); + + const invoke = jest + .fn() + .mockResolvedValueOnce({ + tool_calls: [ + { + name: 'select-relation', + args: { relationName: 'Order', reasoning: 'Load order' }, + id: 'c1', + }, + ], + }) + .mockResolvedValueOnce({ + tool_calls: [{ name: 'select-fields', args: { fieldNames: ['Status'] }, id: 'c2' }], + }) + .mockResolvedValueOnce({ + tool_calls: [ + { + name: 'select-record-by-content', + args: { recordIndex: 1, reasoning: 'Completed is best' }, + id: 'c3', + }, + ], + }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const model = { bindTools } as unknown as ExecutionContext['model']; + + const runStore = makeMockRunStore(); + const context = makeContext({ + model, + agentPort, + runStore, + workflowPort: makeMockWorkflowPort({ + customers: makeCollectionSchema(), + orders: ordersSchema, + }), + }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('awaiting-input'); + expect(bindTools).toHaveBeenCalledTimes(3); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + pendingData: { + displayName: 'Order', + name: 'order', + relatedCollectionName: 'orders', + selectedRecordId: [2], // record at index 1 + suggestedFields: ['status'], + }, + }), + ); + }); + + it('skips field-selection AI call when related collection has no non-relation fields', async () => { + const relatedData: RecordData[] = [ + { collectionName: 'orders', recordId: [1], values: {} }, + { collectionName: 'orders', recordId: [2], values: {} }, + ]; + const agentPort = makeMockAgentPort(relatedData); + + const ordersSchema = makeCollectionSchema({ + collectionName: 'orders', + collectionDisplayName: 'Orders', + fields: [], + }); + + const invoke = jest + .fn() + .mockResolvedValueOnce({ + tool_calls: [ + { + name: 'select-relation', + args: { relationName: 'Order', reasoning: 'Load order' }, + id: 'c1', + }, + ], + }) + .mockResolvedValueOnce({ + tool_calls: [ + { + name: 'select-record-by-content', + args: { recordIndex: 0, reasoning: 'First' }, + id: 'c2', + }, + ], + }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const model = { bindTools } as unknown as ExecutionContext['model']; + + const runStore = makeMockRunStore(); + const context = makeContext({ + model, + agentPort, + runStore, + workflowPort: makeMockWorkflowPort({ + customers: makeCollectionSchema(), + orders: ordersSchema, + }), + }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('awaiting-input'); + // select-relation + select-record-by-content (no select-fields) + expect(bindTools).toHaveBeenCalledTimes(2); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + pendingData: expect.objectContaining({ + selectedRecordId: [1], + suggestedFields: [], + }), + }), + ); + }); + }); + + describe('confirmation accepted (Branch A)', () => { + it('uses selectedRecordId from pendingData, no getRelatedData call', async () => { + const agentPort = makeMockAgentPort(); + const execution = makePendingExecution(); // selectedRecordId: [99] + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([execution]), + }); + const context = makeContext({ agentPort, runStore, userConfirmed: true }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + expect(agentPort.getRelatedData).not.toHaveBeenCalled(); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + type: 'load-related-record', + executionParams: { displayName: 'Order', name: 'order' }, + executionResult: expect.objectContaining({ + record: expect.objectContaining({ collectionName: 'orders', recordId: [99] }), + }), + pendingData: expect.objectContaining({ + displayName: 'Order', + name: 'order', + relatedCollectionName: 'orders', + selectedRecordId: [99], + }), + }), + ); + }); + + it('uses selectedRecordId when the user overrides the AI suggestion', async () => { + const agentPort = makeMockAgentPort(); + const execution = makePendingExecution({ + pendingData: { + displayName: 'Order', + name: 'order', + relatedCollectionName: 'orders', + suggestedFields: ['status', 'amount'], + selectedRecordId: [42], + }, + }); + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([execution]), + }); + const context = makeContext({ agentPort, runStore, userConfirmed: true }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + expect(agentPort.getRelatedData).not.toHaveBeenCalled(); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + executionResult: expect.objectContaining({ + record: expect.objectContaining({ collectionName: 'orders', recordId: [42] }), + }), + }), + ); + }); + }); + + describe('confirmation rejected (Branch A)', () => { + it('skips the load when user rejects', async () => { + const agentPort = makeMockAgentPort(); + const execution = makePendingExecution(); + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([execution]), + }); + const context = makeContext({ agentPort, runStore, userConfirmed: false }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + expect(agentPort.getRelatedData).not.toHaveBeenCalled(); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + executionResult: { skipped: true }, + pendingData: expect.objectContaining({ displayName: 'Order', name: 'order' }), + }), + ); + }); + }); + + describe('no pending data in confirmation flow (Branch A)', () => { + it('returns error outcome when no execution record is found', async () => { + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([]), + }); + const context = makeContext({ runStore, userConfirmed: true }); + const executor = new LoadRelatedRecordStepExecutor(context); + + await expect(executor.execute()).resolves.toMatchObject({ + stepOutcome: { + type: 'record-task', + stepId: 'load-1', + stepIndex: 0, + status: 'error', + error: 'An unexpected error occurred while processing this step.', + }, + }); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + + it('returns error outcome when execution exists but pendingData is absent', async () => { + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([ + { + type: 'load-related-record', + stepIndex: 0, + selectedRecordRef: makeRecordRef(), + }, + ]), + }); + const context = makeContext({ runStore, userConfirmed: true }); + const executor = new LoadRelatedRecordStepExecutor(context); + + await expect(executor.execute()).resolves.toMatchObject({ + stepOutcome: { + type: 'record-task', + stepId: 'load-1', + stepIndex: 0, + status: 'error', + error: 'An unexpected error occurred while processing this step.', + }, + }); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + }); + + describe('NoRelationshipFieldsError', () => { + it('returns error when collection has no relationship fields', async () => { + const schema = makeCollectionSchema({ + fields: [{ fieldName: 'email', displayName: 'Email', isRelationship: false }], + }); + const mockModel = makeMockModel({ relationName: 'Order', reasoning: 'test' }); + const workflowPort = makeMockWorkflowPort({ customers: schema }); + const runStore = makeMockRunStore(); + const context = makeContext({ model: mockModel.model, runStore, workflowPort }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + 'This record type has no relations configured in Forest Admin.', + ); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + }); + + describe('RelatedRecordNotFoundError', () => { + it('returns error when BelongsTo getRelatedData returns empty array (Branch B)', async () => { + const agentPort = makeMockAgentPort([]); + const mockModel = makeMockModel({ relationName: 'Order', reasoning: 'test' }); + const runStore = makeMockRunStore(); + const context = makeContext({ + model: mockModel.model, + agentPort, + runStore, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + 'The related record could not be found. It may have been deleted.', + ); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + + it('returns error when HasMany getRelatedData returns empty array (Branch B)', async () => { + const hasManySchema = makeCollectionSchema({ + fields: [ + { + fieldName: 'address', + displayName: 'Address', + isRelationship: true, + relationType: 'HasMany', + }, + ], + }); + const agentPort = makeMockAgentPort([]); + const mockModel = makeMockModel({ relationName: 'Address', reasoning: 'test' }); + const runStore = makeMockRunStore(); + const context = makeContext({ + model: mockModel.model, + agentPort, + runStore, + workflowPort: makeMockWorkflowPort({ customers: hasManySchema }), + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + 'The related record could not be found. It may have been deleted.', + ); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + + it('returns error when getRelatedData returns empty array (Branch C)', async () => { + const agentPort = makeMockAgentPort([]); + const mockModel = makeMockModel({ relationName: 'Order', reasoning: 'test' }); + const runStore = makeMockRunStore(); + const context = makeContext({ model: mockModel.model, agentPort, runStore }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + 'The related record could not be found. It may have been deleted.', + ); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + }); + + describe('StepPersistenceError post-load', () => { + it('returns error outcome when saveStepExecution fails after load (Branch B)', async () => { + const runStore = makeMockRunStore({ + saveStepExecution: jest.fn().mockRejectedValue(new Error('Disk full')), + }); + const context = makeContext({ + runId: 'run-1', + stepIndex: 0, + runStore, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe('The step result could not be saved. Please retry.'); + }); + + it('returns error outcome when saveStepExecution fails after load (Branch A confirmed)', async () => { + const execution = makePendingExecution(); + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([execution]), + saveStepExecution: jest.fn().mockRejectedValue(new Error('Disk full')), + }); + const context = makeContext({ runId: 'run-1', stepIndex: 0, runStore, userConfirmed: true }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe('The step result could not be saved. Please retry.'); + }); + }); + + describe('resolveRelationName failure', () => { + it('returns error when AI returns a relation name not found in the schema', async () => { + const agentPort = makeMockAgentPort(); + const mockModel = makeMockModel({ relationName: 'NonExistentRelation', reasoning: 'test' }); + const schema = makeCollectionSchema({ + fields: [ + { + fieldName: 'order', + displayName: 'Order', + isRelationship: true, + relationType: 'BelongsTo', + }, + ], + }); + const workflowPort = makeMockWorkflowPort({ customers: schema }); + const runStore = makeMockRunStore(); + const context = makeContext({ + model: mockModel.model, + agentPort, + runStore, + workflowPort, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + "The AI selected a relation that doesn't exist on this record. Try rephrasing the step's prompt.", + ); + expect(agentPort.getRelatedData).not.toHaveBeenCalled(); + }); + }); + + describe('AI malformed/missing tool call', () => { + it('returns error on malformed tool call', async () => { + const invoke = jest.fn().mockResolvedValue({ + tool_calls: [], + invalid_tool_calls: [ + { name: 'select-relation', args: '{bad json', error: 'JSON parse error' }, + ], + }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const runStore = makeMockRunStore(); + const context = makeContext({ + model: { bindTools } as unknown as ExecutionContext['model'], + runStore, + }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.type).toBe('record-task'); + expect(result.stepOutcome.stepId).toBe('load-1'); + expect(result.stepOutcome.stepIndex).toBe(0); + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + "The AI returned an unexpected response. Try rephrasing the step's prompt.", + ); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + + it('returns error when AI returns no tool call', async () => { + const invoke = jest.fn().mockResolvedValue({ tool_calls: [] }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const runStore = makeMockRunStore(); + const context = makeContext({ + model: { bindTools } as unknown as ExecutionContext['model'], + runStore, + }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.type).toBe('record-task'); + expect(result.stepOutcome.stepId).toBe('load-1'); + expect(result.stepOutcome.stepIndex).toBe(0); + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + "The AI couldn't decide what to do. Try rephrasing the step's prompt.", + ); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + }); + + describe('infra error propagation', () => { + it('returns error outcome for getRelatedData infrastructure errors (Branch B)', async () => { + const agentPort = makeMockAgentPort(); + (agentPort.getRelatedData as jest.Mock).mockRejectedValue(new Error('Connection refused')); + const mockModel = makeMockModel({ relationName: 'Order', reasoning: 'test' }); + const context = makeContext({ + model: mockModel.model, + agentPort, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + expect(result.stepOutcome.status).toBe('error'); + }); + + it('returns error outcome for getRelatedData infrastructure errors (Branch C)', async () => { + const agentPort = makeMockAgentPort(); + (agentPort.getRelatedData as jest.Mock).mockRejectedValue(new Error('Connection refused')); + const mockModel = makeMockModel({ relationName: 'Order', reasoning: 'test' }); + const context = makeContext({ model: mockModel.model, agentPort }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + expect(result.stepOutcome.status).toBe('error'); + }); + + it('returns user message and logs cause when agentPort.getRelatedData throws an infra error', async () => { + const logger = { error: jest.fn() }; + const agentPort = makeMockAgentPort(); + (agentPort.getRelatedData as jest.Mock).mockRejectedValue(new Error('DB connection lost')); + const mockModel = makeMockModel({ relationName: 'Order', reasoning: 'test' }); + const context = makeContext({ + model: mockModel.model, + agentPort, + logger, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + 'An error occurred while accessing your data. Please try again.', + ); + expect(logger.error).toHaveBeenCalledWith( + 'Agent port "getRelatedData" failed: DB connection lost', + expect.objectContaining({ cause: 'DB connection lost' }), + ); + }); + }); + + describe('multi-record AI selection (base record pool)', () => { + it('uses AI to select among multiple base records then loads relation', async () => { + const baseRecordRef = makeRecordRef({ stepIndex: 1 }); + const relatedRecord = makeRecordRef({ + stepIndex: 2, + recordId: [99], + collectionName: 'orders', + }); + + const ordersSchema = makeCollectionSchema({ + collectionName: 'orders', + collectionDisplayName: 'Orders', + fields: [ + { + fieldName: 'invoice', + displayName: 'Invoice', + isRelationship: true, + relationType: 'BelongsTo', + }, + ], + }); + + // Call 1: select-record; Call 2: select-relation + const invoke = jest + .fn() + .mockResolvedValueOnce({ + tool_calls: [ + { + name: 'select-record', + args: { recordIdentifier: 'Step 2 - Orders #99' }, + id: 'call_1', + }, + ], + }) + .mockResolvedValueOnce({ + tool_calls: [ + { + name: 'select-relation', + args: { relationName: 'Invoice', reasoning: 'Load the invoice' }, + id: 'call_2', + }, + ], + }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const model = { bindTools } as unknown as ExecutionContext['model']; + + const agentPort = makeMockAgentPort([ + { collectionName: 'invoices', recordId: [55], values: {} }, + ]); + + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([ + { + type: 'load-related-record', + stepIndex: 2, + executionResult: { + relation: { name: 'order', displayName: 'Order' }, + record: relatedRecord, + }, + selectedRecordRef: makeRecordRef(), + }, + ]), + }); + const workflowPort = makeMockWorkflowPort({ + customers: makeCollectionSchema(), + orders: ordersSchema, + }); + const context = makeContext({ baseRecordRef, model, runStore, workflowPort, agentPort }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('awaiting-input'); + expect(bindTools).toHaveBeenCalledTimes(2); + + const selectRecordTool = bindTools.mock.calls[0][0][0]; + expect(selectRecordTool.name).toBe('select-record'); + + const selectRelationTool = bindTools.mock.calls[1][0][0]; + expect(selectRelationTool.name).toBe('select-relation'); + + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + pendingData: expect.objectContaining({ + displayName: 'Invoice', + name: 'invoice', + relatedCollectionName: 'invoices', + selectedRecordId: [55], + }), + selectedRecordRef: expect.objectContaining({ recordId: [99], collectionName: 'orders' }), + }), + ); + }); + }); + + describe('stepOutcome shape', () => { + it('emits correct type, stepId and stepIndex in the outcome', async () => { + const context = makeContext({ stepDefinition: makeStep({ automaticExecution: true }) }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome).toMatchObject({ + type: 'record-task', + stepId: 'load-1', + stepIndex: 0, + status: 'success', + }); + }); + }); + + describe('previous steps context', () => { + it('includes previous steps summary in select-relation messages', async () => { + const mockModel = makeMockModel({ relationName: 'Order', reasoning: 'test' }); + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([ + { + type: 'condition', + stepIndex: 0, + executionParams: { answer: 'Yes', reasoning: 'Approved' }, + }, + ]), + }); + const context = makeContext({ + model: mockModel.model, + runStore, + previousSteps: [ + { + stepDefinition: { + type: StepType.Condition, + options: ['Yes', 'No'], + prompt: 'Should we proceed?', + }, + stepOutcome: { + type: 'condition', + stepId: 'prev-step', + stepIndex: 0, + status: 'success', + }, + }, + ], + }); + const executor = new LoadRelatedRecordStepExecutor({ + ...context, + stepId: 'load-2', + stepIndex: 1, + }); + + await executor.execute(); + + const messages = mockModel.invoke.mock.calls[0][0]; + // previous steps message + system prompt + collection info + human message = 4 + expect(messages).toHaveLength(4); + expect(messages[0].content).toContain('Should we proceed?'); + expect(messages[0].content).toContain('"answer":"Yes"'); + expect(messages[1].content).toContain('loading a related record'); + }); + }); + + describe('default prompt', () => { + it('uses default prompt when step.prompt is undefined', async () => { + const mockModel = makeMockModel({ relationName: 'Order', reasoning: 'test' }); + const context = makeContext({ + model: mockModel.model, + stepDefinition: makeStep({ prompt: undefined }), + }); + const executor = new LoadRelatedRecordStepExecutor(context); + + await executor.execute(); + + const messages = mockModel.invoke.mock.calls[mockModel.invoke.mock.calls.length - 1][0]; + const humanMessage = messages[messages.length - 1]; + expect(humanMessage.content).toBe('**Request**: Load the relevant related record.'); + }); + }); + + describe('RunStore error propagation', () => { + it('returns error outcome when getStepExecutions fails (Branch A)', async () => { + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockRejectedValue(new Error('DB timeout')), + }); + const context = makeContext({ runStore, userConfirmed: true }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + expect(result.stepOutcome.status).toBe('error'); + }); + + it('returns error outcome when saveStepExecution fails saving awaiting-input (Branch C)', async () => { + const agentPort = makeMockAgentPort(); + const runStore = makeMockRunStore({ + saveStepExecution: jest.fn().mockRejectedValue(new Error('Disk full')), + }); + const context = makeContext({ agentPort, runStore }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + expect(result.stepOutcome.status).toBe('error'); + }); + + it('returns error outcome when saveStepExecution fails on user reject (Branch A)', async () => { + const execution = makePendingExecution(); + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([execution]), + saveStepExecution: jest.fn().mockRejectedValue(new Error('Disk full')), + }); + const context = makeContext({ runStore, userConfirmed: false }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + expect(result.stepOutcome.status).toBe('error'); + }); + }); + + describe('displayName → fieldName resolution fallback', () => { + it('resolves relation when AI returns technical name instead of displayName', async () => { + const agentPort = makeMockAgentPort(); + // AI returns technical name 'order' instead of display name 'Order' + const mockModel = makeMockModel({ relationName: 'order', reasoning: 'fallback' }); + const schema = makeCollectionSchema({ + fields: [ + { + fieldName: 'order', + displayName: 'Order', + isRelationship: true, + relationType: 'BelongsTo', + }, + ], + }); + const workflowPort = makeMockWorkflowPort({ customers: schema }); + const context = makeContext({ + model: mockModel.model, + agentPort, + workflowPort, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + expect(agentPort.getRelatedData).toHaveBeenCalledWith({ + collection: 'customers', + id: [42], + relation: 'order', + limit: 1, + }); + }); + }); + + describe('schema caching', () => { + it('fetches getCollectionSchema once per collection even when called twice (Branch B)', async () => { + const workflowPort = makeMockWorkflowPort(); + const context = makeContext({ + workflowPort, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new LoadRelatedRecordStepExecutor(context); + + await executor.execute(); + + expect(workflowPort.getCollectionSchema).toHaveBeenCalledTimes(1); + }); + }); + + describe('getAvailableRecordRefs filtering', () => { + it('excludes a pending load-related-record (no record field) from the record pool', async () => { + const baseRecordRef = makeRecordRef({ stepIndex: 1 }); + // A completed load-related-record (has record) — should appear in pool + const completedRecord = makeRecordRef({ + stepIndex: 2, + recordId: [99], + collectionName: 'orders', + }); + // A pending load-related-record (no record — awaiting-input state) — should be excluded + const pendingExecution = { + type: 'load-related-record' as const, + stepIndex: 3, + selectedRecordRef: makeRecordRef(), + pendingData: { + displayName: 'Invoice', + name: 'invoice', + relatedCollectionName: 'invoices', + selectedRecordId: [55], + }, + }; + + const ordersSchema = makeCollectionSchema({ + collectionName: 'orders', + collectionDisplayName: 'Orders', + fields: [ + { + fieldName: 'order', + displayName: 'Order', + isRelationship: true, + relationType: 'BelongsTo', + }, + ], + }); + + // Call 1: select-record (picks the completed related record) + // Call 2: select-relation + const invoke = jest + .fn() + .mockResolvedValueOnce({ + tool_calls: [ + { + name: 'select-record', + args: { recordIdentifier: 'Step 2 - Orders #99' }, + id: 'call_1', + }, + ], + }) + .mockResolvedValueOnce({ + tool_calls: [ + { + name: 'select-relation', + args: { relationName: 'Order', reasoning: 'test' }, + id: 'call_2', + }, + ], + }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const model = { bindTools } as unknown as ExecutionContext['model']; + + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([ + { + type: 'load-related-record', + stepIndex: 2, + executionResult: { + relation: { name: 'order', displayName: 'Order' }, + record: completedRecord, + }, + selectedRecordRef: makeRecordRef(), + }, + pendingExecution, + ]), + }); + const workflowPort = makeMockWorkflowPort({ + customers: makeCollectionSchema(), + orders: ordersSchema, + }); + const context = makeContext({ baseRecordRef, model, runStore, workflowPort }); + const executor = new LoadRelatedRecordStepExecutor(context); + + await executor.execute(); + + // Pool = [base, completedRecord] = 2 items → select-record IS invoked + // Pool does NOT include pending execution (no record) → only 2 options, not 3 + expect(bindTools).toHaveBeenCalledTimes(2); + const selectRecordTool = bindTools.mock.calls[0][0][0]; + expect(selectRecordTool.name).toBe('select-record'); + expect(selectRecordTool.schema.shape.recordIdentifier.options).toHaveLength(2); + expect(selectRecordTool.schema.shape.recordIdentifier.options).not.toContain( + expect.stringContaining('stepIndex: 3'), + ); + }); + }); +}); diff --git a/packages/workflow-executor/test/executors/mcp-task-step-executor.test.ts b/packages/workflow-executor/test/executors/mcp-task-step-executor.test.ts new file mode 100644 index 0000000000..5525014ddf --- /dev/null +++ b/packages/workflow-executor/test/executors/mcp-task-step-executor.test.ts @@ -0,0 +1,726 @@ +import type { RunStore } from '../../src/ports/run-store'; +import type { WorkflowPort } from '../../src/ports/workflow-port'; +import type { ExecutionContext } from '../../src/types/execution'; +import type { McpTaskStepDefinition } from '../../src/types/step-definition'; +import type { McpTaskStepExecutionData } from '../../src/types/step-execution-data'; + +import RemoteTool from '@forestadmin/ai-proxy/src/remote-tool'; + +import { StepStateError } from '../../src/errors'; +import McpTaskStepExecutor from '../../src/executors/mcp-task-step-executor'; +import { StepType } from '../../src/types/step-definition'; + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +class MockRemoteTool extends RemoteTool { + constructor(options: { name: string; sourceId?: string; invoke?: jest.Mock }) { + const invokeFn = options.invoke ?? jest.fn().mockResolvedValue('tool-result'); + super({ + tool: { + name: options.name, + description: `${options.name} description`, + schema: { parse: jest.fn(), _def: {} } as unknown as RemoteTool['base']['schema'], + invoke: invokeFn, + } as unknown as RemoteTool['base'], + sourceId: options.sourceId ?? 'mcp-server-1', + sourceType: 'mcp', + }); + } +} + +function makeStep(overrides: Partial = {}): McpTaskStepDefinition { + return { + type: StepType.McpTask, + prompt: 'Send a notification to the user', + ...overrides, + }; +} + +function makeMockRunStore(overrides: Partial = {}): RunStore { + return { + getStepExecutions: jest.fn().mockResolvedValue([]), + saveStepExecution: jest.fn().mockResolvedValue(undefined), + ...overrides, + }; +} + +function makeMockWorkflowPort(): WorkflowPort { + return { + getPendingStepExecutions: jest.fn().mockResolvedValue([]), + getPendingStepExecutionsForRun: jest.fn().mockResolvedValue(null), + updateStepExecution: jest.fn().mockResolvedValue(undefined), + getCollectionSchema: jest.fn().mockResolvedValue({ + collectionName: 'customers', + collectionDisplayName: 'Customers', + primaryKeyFields: ['id'], + fields: [], + actions: [], + }), + getMcpServerConfigs: jest.fn().mockResolvedValue([]), + }; +} + +function makeMockModel(toolName: string, toolArgs: Record) { + const invoke = jest.fn().mockResolvedValue({ + tool_calls: [{ name: toolName, args: toolArgs, id: 'call_1' }], + }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const model = { bindTools } as unknown as ExecutionContext['model']; + + return { model, bindTools, invoke }; +} + +function makeContext( + overrides: Partial> = {}, +): ExecutionContext { + return { + runId: 'run-1', + stepId: 'mcp-1', + stepIndex: 0, + baseRecordRef: { collectionName: 'customers', recordId: [42], stepIndex: 0 }, + stepDefinition: makeStep(), + model: makeMockModel('send_notification', { message: 'Hello' }).model, + agentPort: { + getRecord: jest.fn(), + updateRecord: jest.fn(), + getRelatedData: jest.fn(), + executeAction: jest.fn(), + } as unknown as ExecutionContext['agentPort'], + workflowPort: makeMockWorkflowPort(), + runStore: makeMockRunStore(), + previousSteps: [], + logger: { error: jest.fn() }, + ...overrides, + }; +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +describe('McpTaskStepExecutor', () => { + describe('automaticExecution: direct execution (Branch B)', () => { + it('invokes the tool and returns success', async () => { + const invokeFn = jest.fn().mockResolvedValue({ result: 'notification sent' }); + const tool = new MockRemoteTool({ + name: 'send_notification', + sourceId: 'mcp-server-1', + invoke: invokeFn, + }); + const { model, invoke: modelInvoke } = makeMockModel('send_notification', { + message: 'Hello', + }); + const runStore = makeMockRunStore(); + const context = makeContext({ + model, + runStore, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new McpTaskStepExecutor(context, [tool]); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + expect(invokeFn).toHaveBeenCalledWith({ message: 'Hello' }); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + type: 'mcp-task', + stepIndex: 0, + executionParams: { name: 'send_notification', input: { message: 'Hello' } }, + executionResult: { success: true, toolResult: { result: 'notification sent' } }, + }), + ); + // Model is invoked twice: once for tool selection, once for AI formatting + expect(modelInvoke).toHaveBeenCalledTimes(2); + }); + + it('persists formattedResponse when AI formatting succeeds', async () => { + const toolResult = { result: 'notification sent' }; + const invokeFn = jest.fn().mockResolvedValue(toolResult); + const tool = new MockRemoteTool({ + name: 'send_notification', + sourceId: 'mcp-server-1', + invoke: invokeFn, + }); + const { model, invoke: modelInvoke } = makeMockModel('send_notification', { + message: 'Hello', + }); + // Second model call (formatting) returns a summary + modelInvoke + .mockResolvedValueOnce({ + tool_calls: [{ name: 'send_notification', args: { message: 'Hello' }, id: 'call_1' }], + }) + .mockResolvedValueOnce({ + tool_calls: [ + { name: 'summarize-result', args: { summary: 'Found 3 results.' }, id: 'call_2' }, + ], + }); + const runStore = makeMockRunStore(); + const context = makeContext({ + model, + runStore, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new McpTaskStepExecutor(context, [tool]); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + expect(modelInvoke).toHaveBeenCalledTimes(2); + // First save: raw result only + expect(runStore.saveStepExecution).toHaveBeenNthCalledWith( + 1, + 'run-1', + expect.objectContaining({ + executionResult: { success: true, toolResult }, + }), + ); + // Second save: raw result + formattedResponse + expect(runStore.saveStepExecution).toHaveBeenNthCalledWith( + 2, + 'run-1', + expect.objectContaining({ + executionResult: { success: true, toolResult, formattedResponse: 'Found 3 results.' }, + }), + ); + }); + + it('returns success and logs when AI formatting throws', async () => { + const invokeFn = jest.fn().mockResolvedValue({ result: 'ok' }); + const tool = new MockRemoteTool({ + name: 'send_notification', + sourceId: 'mcp-server-1', + invoke: invokeFn, + }); + const { model, invoke: modelInvoke } = makeMockModel('send_notification', { message: 'Hi' }); + // Second call (formatting) returns no tool calls → MissingToolCallError + modelInvoke + .mockResolvedValueOnce({ + tool_calls: [{ name: 'send_notification', args: { message: 'Hi' }, id: 'call_1' }], + }) + .mockResolvedValueOnce({ tool_calls: [] }); + const logger = { error: jest.fn() }; + const runStore = makeMockRunStore(); + const context = makeContext({ + model, + runStore, + stepDefinition: makeStep({ automaticExecution: true }), + logger, + }); + const executor = new McpTaskStepExecutor(context, [tool]); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + // Only the first save (raw result) — no second save since formatting failed + expect(runStore.saveStepExecution).toHaveBeenCalledTimes(1); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + executionResult: { success: true, toolResult: { result: 'ok' } }, + }), + ); + expect(logger.error).toHaveBeenCalledWith( + 'Failed to format MCP tool result, using generic fallback', + expect.objectContaining({ toolName: 'send_notification' }), + ); + }); + + it('does not call AI formatting when toolResult is null', async () => { + const invokeFn = jest.fn().mockResolvedValue(null); + const tool = new MockRemoteTool({ + name: 'send_notification', + sourceId: 'mcp-server-1', + invoke: invokeFn, + }); + const { model, invoke: modelInvoke } = makeMockModel('send_notification', { message: 'Hi' }); + const runStore = makeMockRunStore(); + const context = makeContext({ + model, + runStore, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new McpTaskStepExecutor(context, [tool]); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + // Model called only once (tool selection) — no formatting call for null result + expect(modelInvoke).toHaveBeenCalledTimes(1); + expect(runStore.saveStepExecution).toHaveBeenCalledTimes(1); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + executionResult: { success: true, toolResult: null }, + }), + ); + }); + }); + + describe('without automaticExecution: awaiting-input (Branch C)', () => { + it('saves pendingData and returns awaiting-input', async () => { + const { model } = makeMockModel('send_notification', { message: 'Hello' }); + const runStore = makeMockRunStore(); + const tool = new MockRemoteTool({ name: 'send_notification', sourceId: 'mcp-server-1' }); + const context = makeContext({ model, runStore }); + const executor = new McpTaskStepExecutor(context, [tool]); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('awaiting-input'); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + type: 'mcp-task', + stepIndex: 0, + pendingData: { name: 'send_notification', input: { message: 'Hello' } }, + }), + ); + }); + + it('returns error when saveStepExecution fails (Branch C)', async () => { + const { model } = makeMockModel('send_notification', { message: 'Hello' }); + const logger = { error: jest.fn() }; + const runStore = makeMockRunStore({ + saveStepExecution: jest.fn().mockRejectedValue(new Error('DB unavailable')), + }); + const tool = new MockRemoteTool({ name: 'send_notification', sourceId: 'mcp-server-1' }); + const context = makeContext({ model, runStore, logger }); + const executor = new McpTaskStepExecutor(context, [tool]); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe('The step result could not be saved. Please retry.'); + expect(logger.error).toHaveBeenCalledWith( + 'MCP task step state could not be persisted (run "run-1", step 0)', + expect.objectContaining({ cause: 'DB unavailable', stepId: 'mcp-1' }), + ); + }); + }); + + describe('confirmation accepted (Branch A)', () => { + it('loads pendingData, invokes the tool, and persists the result', async () => { + const invokeFn = jest.fn().mockResolvedValue('email sent'); + const tool = new MockRemoteTool({ + name: 'send_notification', + sourceId: 'mcp-server-1', + invoke: invokeFn, + }); + const execution: McpTaskStepExecutionData = { + type: 'mcp-task', + stepIndex: 0, + pendingData: { name: 'send_notification', input: { message: 'Hello' } }, + }; + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([execution]), + }); + const context = makeContext({ runStore, userConfirmed: true }); + const executor = new McpTaskStepExecutor(context, [tool]); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + expect(invokeFn).toHaveBeenCalledWith({ message: 'Hello' }); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + type: 'mcp-task', + executionParams: { name: 'send_notification', input: { message: 'Hello' } }, + executionResult: { success: true, toolResult: 'email sent' }, + pendingData: { name: 'send_notification', input: { message: 'Hello' } }, + }), + ); + }); + }); + + describe('confirmation rejected (Branch A)', () => { + it('saves skipped result and returns success without invoking the tool', async () => { + const invokeFn = jest.fn(); + const tool = new MockRemoteTool({ + name: 'send_notification', + sourceId: 'mcp-server-1', + invoke: invokeFn, + }); + const execution: McpTaskStepExecutionData = { + type: 'mcp-task', + stepIndex: 0, + pendingData: { name: 'send_notification', input: { message: 'Hello' } }, + }; + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([execution]), + }); + const context = makeContext({ runStore, userConfirmed: false }); + const executor = new McpTaskStepExecutor(context, [tool]); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + expect(invokeFn).not.toHaveBeenCalled(); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + executionResult: { skipped: true }, + pendingData: { name: 'send_notification', input: { message: 'Hello' } }, + }), + ); + }); + }); + + describe('mcpServerId filter', () => { + it('passes only tools from the specified server to the AI', async () => { + const toolA = new MockRemoteTool({ name: 'tool_a', sourceId: 'server-A' }); + const toolB = new MockRemoteTool({ name: 'tool_b', sourceId: 'server-B' }); + const invokeFn = jest.fn().mockResolvedValue('ok'); + const toolB2 = new MockRemoteTool({ + name: 'tool_b2', + sourceId: 'server-B', + invoke: invokeFn, + }); + + const { model, bindTools } = makeMockModel('tool_b', {}); + const runStore = makeMockRunStore(); + const context = makeContext({ + model, + runStore, + stepDefinition: makeStep({ mcpServerId: 'server-B', automaticExecution: true }), + }); + const executor = new McpTaskStepExecutor(context, [toolA, toolB, toolB2]); + + await executor.execute(); + + // bindTools should only receive server-B tools + const boundTools = bindTools.mock.calls[0][0] as Array<{ name: string }>; + const boundNames = boundTools.map(t => t.name); + expect(boundNames).not.toContain('tool_a'); + expect(boundNames).toContain('tool_b'); + expect(boundNames).toContain('tool_b2'); + }); + }); + + describe('NoMcpToolsError', () => { + it('returns error when remoteTools is empty', async () => { + const context = makeContext(); + const executor = new McpTaskStepExecutor(context, []); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe('No tools are available to execute this step.'); + }); + + it('returns error when mcpServerId filter yields no tools', async () => { + const tool = new MockRemoteTool({ name: 'tool_a', sourceId: 'server-A' }); + const context = makeContext({ + stepDefinition: makeStep({ mcpServerId: 'server-B' }), + }); + const executor = new McpTaskStepExecutor(context, [tool]); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe('No tools are available to execute this step.'); + }); + }); + + describe('McpToolNotFoundError', () => { + it('returns error when tool from pendingData no longer exists (Branch A)', async () => { + const execution: McpTaskStepExecutionData = { + type: 'mcp-task', + stepIndex: 0, + pendingData: { name: 'deleted_tool', input: {} }, + }; + const tool = new MockRemoteTool({ name: 'other_tool', sourceId: 'mcp-server-1' }); + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([execution]), + }); + const context = makeContext({ runStore, userConfirmed: true }); + const executor = new McpTaskStepExecutor(context, [tool]); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + "The AI selected a tool that doesn't exist. Try rephrasing the step's prompt.", + ); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + }); + + describe('StepPersistenceError', () => { + it('returns error and logs cause when saveStepExecution fails after tool invocation (Branch B)', async () => { + const invokeFn = jest.fn().mockResolvedValue('ok'); + const tool = new MockRemoteTool({ + name: 'send_notification', + sourceId: 'mcp-server-1', + invoke: invokeFn, + }); + const { model } = makeMockModel('send_notification', { message: 'Hello' }); + const logger = { error: jest.fn() }; + const runStore = makeMockRunStore({ + saveStepExecution: jest.fn().mockRejectedValue(new Error('Disk full')), + }); + const context = makeContext({ + model, + runStore, + stepDefinition: makeStep({ automaticExecution: true }), + logger, + }); + const executor = new McpTaskStepExecutor(context, [tool]); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe('The step result could not be saved. Please retry.'); + expect(logger.error).toHaveBeenCalledWith( + 'MCP tool "send_notification" executed but step state could not be persisted (run "run-1", step 0)', + expect.objectContaining({ cause: 'Disk full', stepId: 'mcp-1' }), + ); + }); + + it('returns error and logs cause when saveStepExecution fails after tool invocation (Branch A)', async () => { + const invokeFn = jest.fn().mockResolvedValue('ok'); + const tool = new MockRemoteTool({ + name: 'send_notification', + sourceId: 'mcp-server-1', + invoke: invokeFn, + }); + const execution: McpTaskStepExecutionData = { + type: 'mcp-task', + stepIndex: 0, + pendingData: { name: 'send_notification', input: { message: 'Hello' } }, + }; + const logger = { error: jest.fn() }; + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([execution]), + saveStepExecution: jest.fn().mockRejectedValue(new Error('Disk full')), + }); + const context = makeContext({ runStore, userConfirmed: true, logger }); + const executor = new McpTaskStepExecutor(context, [tool]); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe('The step result could not be saved. Please retry.'); + expect(logger.error).toHaveBeenCalledWith( + 'MCP tool "send_notification" executed but step state could not be persisted (run "run-1", step 0)', + expect.objectContaining({ cause: 'Disk full', stepId: 'mcp-1' }), + ); + }); + }); + + describe('stepOutcome shape', () => { + it('emits correct type, stepId and stepIndex', async () => { + const tool = new MockRemoteTool({ name: 'send_notification', sourceId: 'mcp-server-1' }); + const { model } = makeMockModel('send_notification', {}); + const context = makeContext({ + model, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new McpTaskStepExecutor(context, [tool]); + + const result = await executor.execute(); + + expect(result.stepOutcome).toMatchObject({ + type: 'mcp-task', + stepId: 'mcp-1', + stepIndex: 0, + status: 'success', + }); + }); + }); + + describe('no pending data in confirmation flow (Branch A)', () => { + it('returns error when no execution record is found', async () => { + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([]), + }); + const context = makeContext({ runStore, userConfirmed: true }); + const executor = new McpTaskStepExecutor(context, []); + + await expect(executor.execute()).resolves.toMatchObject({ + stepOutcome: { + status: 'error', + error: 'An unexpected error occurred while processing this step.', + }, + }); + }); + + it('returns error when execution exists but pendingData is absent', async () => { + const execution: McpTaskStepExecutionData = { + type: 'mcp-task', + stepIndex: 0, + }; + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([execution]), + }); + const context = makeContext({ runStore, userConfirmed: true }); + const executor = new McpTaskStepExecutor(context, []); + + await expect(executor.execute()).resolves.toMatchObject({ + stepOutcome: { + status: 'error', + error: 'An unexpected error occurred while processing this step.', + }, + }); + }); + }); + + describe('tool.base.invoke error', () => { + it('returns error when tool invocation throws a WorkflowExecutorError', async () => { + const invokeFn = jest.fn().mockRejectedValue(new StepStateError('Tool failed')); + const tool = new MockRemoteTool({ + name: 'send_notification', + sourceId: 'mcp-server-1', + invoke: invokeFn, + }); + const { model } = makeMockModel('send_notification', {}); + const mockRunStore = makeMockRunStore(); + const context = makeContext({ + model, + runStore: mockRunStore, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new McpTaskStepExecutor(context, [tool]); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(mockRunStore.saveStepExecution).not.toHaveBeenCalled(); + }); + + it('returns error and logs when tool invocation throws an infrastructure error', async () => { + const invokeFn = jest.fn().mockRejectedValue(new Error('Connection refused')); + const tool = new MockRemoteTool({ + name: 'send_notification', + sourceId: 'mcp-server-1', + invoke: invokeFn, + }); + const { model } = makeMockModel('send_notification', {}); + const logger = { error: jest.fn() }; + const context = makeContext({ + model, + stepDefinition: makeStep({ automaticExecution: true }), + logger, + }); + const executor = new McpTaskStepExecutor(context, [tool]); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + 'The tool failed to execute. Please try again or contact your administrator.', + ); + expect(logger.error).toHaveBeenCalledWith( + 'MCP tool "send_notification" invocation failed: Connection refused', + expect.objectContaining({ cause: 'Connection refused' }), + ); + }); + }); + + describe('selectTool AI errors', () => { + it('returns error when AI returns a malformed tool call (MalformedToolCallError)', async () => { + const model = { + bindTools: jest.fn().mockReturnValue({ + invoke: jest.fn().mockResolvedValue({ + tool_calls: [{ name: 'send_notification', args: null, id: 'call_1' }], + }), + }), + } as unknown as ExecutionContext['model']; + const tool = new MockRemoteTool({ name: 'send_notification' }); + const context = makeContext({ model }); + const executor = new McpTaskStepExecutor(context, [tool]); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + "The AI returned an unexpected response. Try rephrasing the step's prompt.", + ); + }); + + it('returns error when AI returns no tool call (MissingToolCallError)', async () => { + const model = { + bindTools: jest.fn().mockReturnValue({ + invoke: jest.fn().mockResolvedValue({ tool_calls: [] }), + }), + } as unknown as ExecutionContext['model']; + const tool = new MockRemoteTool({ name: 'send_notification' }); + const context = makeContext({ model }); + const executor = new McpTaskStepExecutor(context, [tool]); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + "The AI couldn't decide what to do. Try rephrasing the step's prompt.", + ); + }); + }); + + describe('default prompt', () => { + it('uses default prompt when step.prompt is undefined', async () => { + const { model, invoke: modelInvoke } = makeMockModel('send_notification', {}); + const tool = new MockRemoteTool({ name: 'send_notification', sourceId: 'mcp-server-1' }); + const context = makeContext({ + model, + stepDefinition: makeStep({ prompt: undefined }), + }); + const executor = new McpTaskStepExecutor(context, [tool]); + + await executor.execute(); + + const messages = modelInvoke.mock.calls[0][0]; + const humanMessage = messages[messages.length - 1]; + expect(humanMessage.content).toBe('**Request**: Execute the relevant tool.'); + }); + }); + + describe('previous steps context', () => { + it('includes previous steps summary in selectTool messages', async () => { + const { model, invoke: modelInvoke } = makeMockModel('send_notification', {}); + const tool = new MockRemoteTool({ name: 'send_notification', sourceId: 'mcp-server-1' }); + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([ + { + type: 'condition', + stepIndex: 0, + executionParams: { answer: 'Yes', reasoning: 'Approved' }, + }, + ]), + }); + const context = makeContext({ + model, + runStore, + previousSteps: [ + { + stepDefinition: { + type: StepType.Condition, + options: ['Yes', 'No'], + prompt: 'Should we send a notification?', + }, + stepOutcome: { + type: 'condition', + stepId: 'prev-step', + stepIndex: 0, + status: 'success', + }, + }, + ], + }); + const executor = new McpTaskStepExecutor({ ...context, stepId: 'mcp-2', stepIndex: 1 }, [ + tool, + ]); + + await executor.execute(); + + const messages = modelInvoke.mock.calls[0][0]; + // previous steps message + system prompt + human message = 3 + expect(messages).toHaveLength(3); + expect(messages[0].content).toContain('Should we send a notification?'); + }); + }); +}); diff --git a/packages/workflow-executor/test/executors/read-record-step-executor.test.ts b/packages/workflow-executor/test/executors/read-record-step-executor.test.ts index eb9c3bc5de..cee40bc8c3 100644 --- a/packages/workflow-executor/test/executors/read-record-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/read-record-step-executor.test.ts @@ -3,13 +3,13 @@ import type { RunStore } from '../../src/ports/run-store'; import type { WorkflowPort } from '../../src/ports/workflow-port'; import type { ExecutionContext } from '../../src/types/execution'; import type { CollectionSchema, RecordRef } from '../../src/types/record'; -import type { AiTaskStepDefinition } from '../../src/types/step-definition'; +import type { RecordTaskStepDefinition } from '../../src/types/step-definition'; import { NoRecordsError, RecordNotFoundError } from '../../src/errors'; import ReadRecordStepExecutor from '../../src/executors/read-record-step-executor'; import { StepType } from '../../src/types/step-definition'; -function makeStep(overrides: Partial = {}): AiTaskStepDefinition { +function makeStep(overrides: Partial = {}): RecordTaskStepDefinition { return { type: StepType.ReadRecord, prompt: 'Read the customer email', @@ -34,8 +34,8 @@ function makeMockAgentPort( return { getRecord: jest .fn() - .mockImplementation((collectionName: string) => - Promise.resolve(recordsByCollection[collectionName] ?? { values: {} }), + .mockImplementation(({ collection }: { collection: string }) => + Promise.resolve(recordsByCollection[collection] ?? { values: {} }), ), updateRecord: jest.fn(), getRelatedData: jest.fn(), @@ -73,6 +73,7 @@ function makeMockWorkflowPort( ): WorkflowPort { return { getPendingStepExecutions: jest.fn().mockResolvedValue([]), + getPendingStepExecutionsForRun: jest.fn().mockResolvedValue(null), updateStepExecution: jest.fn().mockResolvedValue(undefined), getCollectionSchema: jest .fn() @@ -99,8 +100,8 @@ function makeMockModel( } function makeContext( - overrides: Partial> = {}, -): ExecutionContext { + overrides: Partial> = {}, +): ExecutionContext { return { runId: 'run-1', stepId: 'read-1', @@ -111,8 +112,8 @@ function makeContext( agentPort: makeMockAgentPort(), workflowPort: makeMockWorkflowPort(), runStore: makeMockRunStore(), - history: [], - remoteTools: [], + previousSteps: [], + logger: { error: jest.fn() }, ...overrides, }; } @@ -133,9 +134,9 @@ describe('ReadRecordStepExecutor', () => { expect.objectContaining({ type: 'read-record', stepIndex: 0, - executionParams: { fieldNames: ['email'] }, + executionParams: { fields: [{ name: 'email', displayName: 'Email' }] }, executionResult: { - fields: [{ value: 'john@example.com', fieldName: 'email', displayName: 'Email' }], + fields: [{ value: 'john@example.com', name: 'email', displayName: 'Email' }], }, }), ); @@ -155,11 +156,16 @@ describe('ReadRecordStepExecutor', () => { expect(runStore.saveStepExecution).toHaveBeenCalledWith( 'run-1', expect.objectContaining({ - executionParams: { fieldNames: ['email', 'name'] }, + executionParams: { + fields: [ + { name: 'email', displayName: 'Email' }, + { name: 'name', displayName: 'Full Name' }, + ], + }, executionResult: { fields: [ - { value: 'john@example.com', fieldName: 'email', displayName: 'Email' }, - { value: 'John Doe', fieldName: 'name', displayName: 'Full Name' }, + { value: 'john@example.com', name: 'email', displayName: 'Email' }, + { value: 'John Doe', name: 'name', displayName: 'Full Name' }, ], }, }), @@ -180,9 +186,9 @@ describe('ReadRecordStepExecutor', () => { expect(runStore.saveStepExecution).toHaveBeenCalledWith( 'run-1', expect.objectContaining({ - executionParams: { fieldNames: ['name'] }, + executionParams: { fields: [{ name: 'name', displayName: 'Full Name' }] }, executionResult: { - fields: [{ value: 'John Doe', fieldName: 'name', displayName: 'Full Name' }], + fields: [{ value: 'John Doe', name: 'name', displayName: 'Full Name' }], }, }), ); @@ -199,7 +205,11 @@ describe('ReadRecordStepExecutor', () => { await executor.execute(); - expect(agentPort.getRecord).toHaveBeenCalledWith('customers', [42], ['name', 'email']); + expect(agentPort.getRecord).toHaveBeenCalledWith({ + collection: 'customers', + id: [42], + fields: ['name', 'email'], + }); }); it('passes only resolved field names when some fields are unresolved', async () => { @@ -211,7 +221,11 @@ describe('ReadRecordStepExecutor', () => { await executor.execute(); - expect(agentPort.getRecord).toHaveBeenCalledWith('customers', [42], ['email']); + expect(agentPort.getRecord).toHaveBeenCalledWith({ + collection: 'customers', + id: [42], + fields: ['email'], + }); }); it('returns error when no fields can be resolved', async () => { @@ -225,7 +239,7 @@ describe('ReadRecordStepExecutor', () => { expect(result.stepOutcome.status).toBe('error'); expect(result.stepOutcome.error).toBe( - 'None of the requested fields could be resolved: nonexistent, unknown', + "The AI selected fields that don't exist on this record. Try rephrasing the step's prompt.", ); expect(agentPort.getRecord).not.toHaveBeenCalled(); expect(runStore.saveStepExecution).not.toHaveBeenCalled(); @@ -247,10 +261,10 @@ describe('ReadRecordStepExecutor', () => { expect.objectContaining({ executionResult: { fields: [ - { value: 'john@example.com', fieldName: 'email', displayName: 'Email' }, + { value: 'john@example.com', name: 'email', displayName: 'Email' }, { error: 'Field not found: nonexistent', - fieldName: 'nonexistent', + name: 'nonexistent', displayName: 'nonexistent', }, ], @@ -310,7 +324,7 @@ describe('ReadRecordStepExecutor', () => { expect(result.stepOutcome.status).toBe('error'); expect(result.stepOutcome.error).toBe( - 'No readable fields on record from collection "customers"', + 'This record type has no readable fields configured in Forest Admin.', ); expect(runStore.saveStepExecution).not.toHaveBeenCalled(); }); @@ -356,11 +370,17 @@ describe('ReadRecordStepExecutor', () => { const model = { bindTools } as unknown as ExecutionContext['model']; const runStore = makeMockRunStore({ - getStepExecutions: jest - .fn() - .mockResolvedValue([ - { type: 'load-related-record', stepIndex: 2, record: relatedRecord }, - ]), + getStepExecutions: jest.fn().mockResolvedValue([ + { + type: 'load-related-record', + stepIndex: 2, + executionResult: { + relation: { name: 'order', displayName: 'Order' }, + record: relatedRecord, + }, + selectedRecordRef: makeRecordRef(), + }, + ]), }); const workflowPort = makeMockWorkflowPort({ customers: makeCollectionSchema(), @@ -392,7 +412,7 @@ describe('ReadRecordStepExecutor', () => { 'run-1', expect.objectContaining({ executionResult: { - fields: [{ value: 'john@example.com', fieldName: 'email', displayName: 'Email' }], + fields: [{ value: 'john@example.com', name: 'email', displayName: 'Email' }], }, selectedRecordRef: expect.objectContaining({ recordId: [42], @@ -429,18 +449,28 @@ describe('ReadRecordStepExecutor', () => { }) .mockResolvedValueOnce({ tool_calls: [ - { name: 'read-selected-record-fields', args: { fieldNames: ['total'] }, id: 'call_2' }, + { + name: 'read-selected-record-fields', + args: { fieldNames: ['total'] }, + id: 'call_2', + }, ], }); const bindTools = jest.fn().mockReturnValue({ invoke }); const model = { bindTools } as unknown as ExecutionContext['model']; const runStore = makeMockRunStore({ - getStepExecutions: jest - .fn() - .mockResolvedValue([ - { type: 'load-related-record', stepIndex: 2, record: relatedRecord }, - ]), + getStepExecutions: jest.fn().mockResolvedValue([ + { + type: 'load-related-record', + stepIndex: 2, + executionResult: { + relation: { name: 'order', displayName: 'Order' }, + record: relatedRecord, + }, + selectedRecordRef: makeRecordRef(), + }, + ]), }); const workflowPort = makeMockWorkflowPort({ customers: makeCollectionSchema(), @@ -459,7 +489,7 @@ describe('ReadRecordStepExecutor', () => { 'run-1', expect.objectContaining({ executionResult: { - fields: [{ value: 150, fieldName: 'total', displayName: 'Total' }], + fields: [{ value: 150, name: 'total', displayName: 'Total' }], }, selectedRecordRef: expect.objectContaining({ recordId: [99], @@ -496,18 +526,28 @@ describe('ReadRecordStepExecutor', () => { }) .mockResolvedValueOnce({ tool_calls: [ - { name: 'read-selected-record-fields', args: { fieldNames: ['email'] }, id: 'call_2' }, + { + name: 'read-selected-record-fields', + args: { fieldNames: ['email'] }, + id: 'call_2', + }, ], }); const bindTools = jest.fn().mockReturnValue({ invoke }); const model = { bindTools } as unknown as ExecutionContext['model']; const runStore = makeMockRunStore({ - getStepExecutions: jest - .fn() - .mockResolvedValue([ - { type: 'load-related-record', stepIndex: 5, record: relatedRecord }, - ]), + getStepExecutions: jest.fn().mockResolvedValue([ + { + type: 'load-related-record', + stepIndex: 5, + executionResult: { + relation: { name: 'order', displayName: 'Order' }, + record: relatedRecord, + }, + selectedRecordRef: makeRecordRef(), + }, + ]), }); const workflowPort = makeMockWorkflowPort({ customers: makeCollectionSchema(), @@ -553,11 +593,17 @@ describe('ReadRecordStepExecutor', () => { const model = { bindTools } as unknown as ExecutionContext['model']; const runStore = makeMockRunStore({ - getStepExecutions: jest - .fn() - .mockResolvedValue([ - { type: 'load-related-record', stepIndex: 1, record: relatedRecord }, - ]), + getStepExecutions: jest.fn().mockResolvedValue([ + { + type: 'load-related-record', + stepIndex: 1, + executionResult: { + relation: { name: 'order', displayName: 'Order' }, + record: relatedRecord, + }, + selectedRecordRef: makeRecordRef(), + }, + ]), }); const workflowPort = makeMockWorkflowPort({ customers: makeCollectionSchema(), @@ -570,7 +616,7 @@ describe('ReadRecordStepExecutor', () => { expect(result.stepOutcome.status).toBe('error'); expect(result.stepOutcome.error).toBe( - 'AI selected record "NonExistent #999" which does not match any available record', + "The AI made an unexpected choice. Try rephrasing the step's prompt.", ); expect(runStore.saveStepExecution).not.toHaveBeenCalled(); }); @@ -590,23 +636,46 @@ describe('ReadRecordStepExecutor', () => { const result = await executor.execute(); expect(result.stepOutcome.status).toBe('error'); - expect(result.stepOutcome.error).toBe('Record not found: collection "customers", id "42"'); + expect(result.stepOutcome.error).toBe( + 'The record no longer exists. It may have been deleted.', + ); expect(runStore.saveStepExecution).not.toHaveBeenCalled(); }); - it('lets infrastructure errors propagate', async () => { + it('returns error outcome for infrastructure errors', async () => { const agentPort = makeMockAgentPort(); (agentPort.getRecord as jest.Mock).mockRejectedValue(new Error('Connection refused')); const mockModel = makeMockModel({ fieldNames: ['email'] }); const context = makeContext({ model: mockModel.model, agentPort }); const executor = new ReadRecordStepExecutor(context); - await expect(executor.execute()).rejects.toThrow('Connection refused'); + const result = await executor.execute(); + expect(result.stepOutcome.status).toBe('error'); + }); + + it('returns user message and logs cause when agentPort.getRecord throws an infra error', async () => { + const logger = { error: jest.fn() }; + const agentPort = makeMockAgentPort(); + (agentPort.getRecord as jest.Mock).mockRejectedValue(new Error('DB connection lost')); + const mockModel = makeMockModel({ fieldNames: ['email'] }); + const context = makeContext({ model: mockModel.model, agentPort, logger }); + const executor = new ReadRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + 'An error occurred while accessing your data. Please try again.', + ); + expect(logger.error).toHaveBeenCalledWith( + 'Agent port "getRecord" failed: DB connection lost', + expect.objectContaining({ cause: 'DB connection lost' }), + ); }); }); describe('model error', () => { - it('lets non-WorkflowExecutorError propagate from AI invocation', async () => { + it('returns error outcome for non-WorkflowExecutorError from AI invocation', async () => { const invoke = jest.fn().mockRejectedValue(new Error('API timeout')); const bindTools = jest.fn().mockReturnValue({ invoke }); const context = makeContext({ @@ -614,7 +683,8 @@ describe('ReadRecordStepExecutor', () => { }); const executor = new ReadRecordStepExecutor(context); - await expect(executor.execute()).rejects.toThrow('API timeout'); + const result = await executor.execute(); + expect(result.stepOutcome.status).toBe('error'); }); }); @@ -638,7 +708,7 @@ describe('ReadRecordStepExecutor', () => { expect(result.stepOutcome.status).toBe('error'); expect(result.stepOutcome.error).toBe( - 'AI returned a malformed tool call for "read-selected-record-fields": JSON parse error', + "The AI returned an unexpected response. Try rephrasing the step's prompt.", ); expect(runStore.saveStepExecution).not.toHaveBeenCalled(); }); @@ -656,13 +726,15 @@ describe('ReadRecordStepExecutor', () => { const result = await executor.execute(); expect(result.stepOutcome.status).toBe('error'); - expect(result.stepOutcome.error).toBe('AI did not return a tool call'); + expect(result.stepOutcome.error).toBe( + "The AI couldn't decide what to do. Try rephrasing the step's prompt.", + ); expect(runStore.saveStepExecution).not.toHaveBeenCalled(); }); }); describe('RunStore error propagation', () => { - it('lets saveStepExecution errors propagate', async () => { + it('returns error outcome when saveStepExecution fails', async () => { const mockModel = makeMockModel({ fieldNames: ['email'] }); const runStore = makeMockRunStore({ saveStepExecution: jest.fn().mockRejectedValue(new Error('Storage full')), @@ -670,10 +742,11 @@ describe('ReadRecordStepExecutor', () => { const context = makeContext({ model: mockModel.model, runStore }); const executor = new ReadRecordStepExecutor(context); - await expect(executor.execute()).rejects.toThrow('Storage full'); + const result = await executor.execute(); + expect(result.stepOutcome.status).toBe('error'); }); - it('lets getStepExecutions errors propagate', async () => { + it('returns error outcome when getStepExecutions fails', async () => { const mockModel = makeMockModel({ fieldNames: ['email'] }); const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockRejectedValue(new Error('Connection lost')), @@ -681,7 +754,8 @@ describe('ReadRecordStepExecutor', () => { const context = makeContext({ model: mockModel.model, runStore }); const executor = new ReadRecordStepExecutor(context); - await expect(executor.execute()).rejects.toThrow('Connection lost'); + const result = await executor.execute(); + expect(result.stepOutcome.status).toBe('error'); }); }); @@ -700,7 +774,7 @@ describe('ReadRecordStepExecutor', () => { const context = makeContext({ model: mockModel.model, runStore, - history: [ + previousSteps: [ { stepDefinition: { type: StepType.Condition, @@ -766,11 +840,16 @@ describe('ReadRecordStepExecutor', () => { expect(runStore.saveStepExecution).toHaveBeenCalledWith('run-1', { type: 'read-record', stepIndex: 3, - executionParams: { fieldNames: ['email', 'name'] }, + executionParams: { + fields: [ + { name: 'email', displayName: 'Email' }, + { name: 'name', displayName: 'Full Name' }, + ], + }, executionResult: { fields: [ - { value: 'john@example.com', fieldName: 'email', displayName: 'Email' }, - { value: 'John Doe', fieldName: 'name', displayName: 'Full Name' }, + { value: 'john@example.com', name: 'email', displayName: 'Email' }, + { value: 'John Doe', name: 'name', displayName: 'Full Name' }, ], }, selectedRecordRef: { diff --git a/packages/workflow-executor/test/executors/safe-agent-port.test.ts b/packages/workflow-executor/test/executors/safe-agent-port.test.ts new file mode 100644 index 0000000000..33ab53d072 --- /dev/null +++ b/packages/workflow-executor/test/executors/safe-agent-port.test.ts @@ -0,0 +1,173 @@ +import type { AgentPort } from '../../src/ports/agent-port'; + +import { AgentPortError, StepStateError, WorkflowExecutorError } from '../../src/errors'; +import SafeAgentPort from '../../src/executors/safe-agent-port'; + +function makeMockPort(overrides: Partial = {}): AgentPort { + return { + getRecord: jest + .fn() + .mockResolvedValue({ collectionName: 'customers', recordId: [1], values: {} }), + updateRecord: jest + .fn() + .mockResolvedValue({ collectionName: 'customers', recordId: [1], values: {} }), + getRelatedData: jest.fn().mockResolvedValue([]), + executeAction: jest.fn().mockResolvedValue(undefined), + ...overrides, + } as unknown as AgentPort; +} + +describe('SafeAgentPort', () => { + describe('returns result when port call succeeds', () => { + it('getRecord returns the port result', async () => { + const expected = { collectionName: 'customers', recordId: [1], values: { email: 'a@b.com' } }; + const port = makeMockPort({ getRecord: jest.fn().mockResolvedValue(expected) }); + const safe = new SafeAgentPort(port); + + const result = await safe.getRecord({ collection: 'customers', id: [1] }); + + expect(result).toBe(expected); + }); + + it('updateRecord returns the port result', async () => { + const expected = { collectionName: 'customers', recordId: [1], values: { status: 'active' } }; + const port = makeMockPort({ updateRecord: jest.fn().mockResolvedValue(expected) }); + const safe = new SafeAgentPort(port); + + const result = await safe.updateRecord({ + collection: 'customers', + id: [1], + values: { status: 'active' }, + }); + + expect(result).toBe(expected); + }); + + it('getRelatedData returns the port result', async () => { + const expected = [{ collectionName: 'orders', recordId: [10], values: {} }]; + const port = makeMockPort({ getRelatedData: jest.fn().mockResolvedValue(expected) }); + const safe = new SafeAgentPort(port); + + const result = await safe.getRelatedData({ + collection: 'customers', + id: [1], + relation: 'orders', + limit: 10, + }); + + expect(result).toBe(expected); + }); + + it('executeAction returns the port result', async () => { + const expected = { success: true }; + const port = makeMockPort({ executeAction: jest.fn().mockResolvedValue(expected) }); + const safe = new SafeAgentPort(port); + + const result = await safe.executeAction({ collection: 'customers', action: 'send-email' }); + + expect(result).toBe(expected); + }); + }); + + describe('wraps infra Error in AgentPortError', () => { + it('wraps getRecord infra error with correct operation name', async () => { + const port = makeMockPort({ + getRecord: jest.fn().mockRejectedValue(new Error('DB connection lost')), + }); + const safe = new SafeAgentPort(port); + + await expect(safe.getRecord({ collection: 'customers', id: [1] })).rejects.toThrow( + AgentPortError, + ); + }); + + it('includes cause message in AgentPortError.message for getRecord', async () => { + const port = makeMockPort({ + getRecord: jest.fn().mockRejectedValue(new Error('DB connection lost')), + }); + const safe = new SafeAgentPort(port); + + await expect(safe.getRecord({ collection: 'customers', id: [1] })).rejects.toThrow( + 'Agent port "getRecord" failed: DB connection lost', + ); + }); + + it('wraps updateRecord infra error with correct operation name', async () => { + const port = makeMockPort({ + updateRecord: jest.fn().mockRejectedValue(new Error('Timeout')), + }); + const safe = new SafeAgentPort(port); + + await expect( + safe.updateRecord({ collection: 'customers', id: [1], values: {} }), + ).rejects.toThrow('Agent port "updateRecord" failed: Timeout'); + }); + + it('wraps getRelatedData infra error with correct operation name', async () => { + const port = makeMockPort({ + getRelatedData: jest.fn().mockRejectedValue(new Error('Network error')), + }); + const safe = new SafeAgentPort(port); + + await expect( + safe.getRelatedData({ collection: 'customers', id: [1], relation: 'orders', limit: 10 }), + ).rejects.toThrow('Agent port "getRelatedData" failed: Network error'); + }); + + it('wraps executeAction infra error with correct operation name', async () => { + const port = makeMockPort({ + executeAction: jest.fn().mockRejectedValue(new Error('Action failed')), + }); + const safe = new SafeAgentPort(port); + + await expect( + safe.executeAction({ collection: 'customers', action: 'send-email' }), + ).rejects.toThrow('Agent port "executeAction" failed: Action failed'); + }); + + it('sets cause on AgentPortError', async () => { + const infraError = new Error('DB connection lost'); + const port = makeMockPort({ getRecord: jest.fn().mockRejectedValue(infraError) }); + const safe = new SafeAgentPort(port); + + let thrown: unknown; + + try { + await safe.getRecord({ collection: 'customers', id: [1] }); + } catch (e) { + thrown = e; + } + + expect(thrown).toBeInstanceOf(AgentPortError); + expect((thrown as AgentPortError).cause).toBe(infraError); + }); + }); + + describe('does not re-wrap WorkflowExecutorError', () => { + it('rethrows WorkflowExecutorError as-is from getRecord', async () => { + const domainError = new StepStateError('invalid state'); + const port = makeMockPort({ getRecord: jest.fn().mockRejectedValue(domainError) }); + const safe = new SafeAgentPort(port); + + await expect(safe.getRecord({ collection: 'customers', id: [1] })).rejects.toBe(domainError); + }); + + it('rethrows WorkflowExecutorError subclass without wrapping in AgentPortError', async () => { + const domainError = new StepStateError('invalid state'); + const port = makeMockPort({ executeAction: jest.fn().mockRejectedValue(domainError) }); + const safe = new SafeAgentPort(port); + + let thrown: unknown; + + try { + await safe.executeAction({ collection: 'customers', action: 'send-email' }); + } catch (e) { + thrown = e; + } + + expect(thrown).toBeInstanceOf(WorkflowExecutorError); + expect(thrown).not.toBeInstanceOf(AgentPortError); + expect(thrown).toBe(domainError); + }); + }); +}); diff --git a/packages/workflow-executor/test/executors/step-execution-formatters.test.ts b/packages/workflow-executor/test/executors/step-execution-formatters.test.ts new file mode 100644 index 0000000000..dc4f65db29 --- /dev/null +++ b/packages/workflow-executor/test/executors/step-execution-formatters.test.ts @@ -0,0 +1,141 @@ +import type { StepExecutionData } from '../../src/types/step-execution-data'; + +import StepExecutionFormatters from '../../src/executors/summary/step-execution-formatters'; + +describe('StepExecutionFormatters', () => { + describe('format', () => { + describe('load-related-record', () => { + it('returns the full Loaded: line for a completed execution', () => { + const execution: StepExecutionData = { + type: 'load-related-record', + stepIndex: 1, + selectedRecordRef: { collectionName: 'customers', recordId: [42], stepIndex: 0 }, + executionResult: { + relation: { name: 'address', displayName: 'Address' }, + record: { collectionName: 'addresses', recordId: [1], stepIndex: 1 }, + }, + }; + + expect(StepExecutionFormatters.format(execution)).toBe( + ' Loaded: customers #42 → [Address] → addresses #1 (step 1)', + ); + }); + + it('returns null for a skipped execution', () => { + const execution: StepExecutionData = { + type: 'load-related-record', + stepIndex: 1, + selectedRecordRef: { collectionName: 'customers', recordId: [42], stepIndex: 0 }, + executionResult: { skipped: true }, + }; + + expect(StepExecutionFormatters.format(execution)).toBeNull(); + }); + + it('returns null when executionResult is absent (pending phase)', () => { + const execution: StepExecutionData = { + type: 'load-related-record', + stepIndex: 1, + selectedRecordRef: { collectionName: 'customers', recordId: [42], stepIndex: 0 }, + pendingData: { + displayName: 'Address', + name: 'address', + relatedCollectionName: 'addresses', + selectedRecordId: [1], + }, + }; + + expect(StepExecutionFormatters.format(execution)).toBeNull(); + }); + + it('formats composite record IDs joined by ", "', () => { + const execution: StepExecutionData = { + type: 'load-related-record', + stepIndex: 1, + selectedRecordRef: { collectionName: 'customers', recordId: [42, 'abc'], stepIndex: 0 }, + executionResult: { + relation: { name: 'orders', displayName: 'Orders' }, + record: { collectionName: 'orders', recordId: [1, 'xyz'], stepIndex: 1 }, + }, + }; + + expect(StepExecutionFormatters.format(execution)).toBe( + ' Loaded: customers #42, abc → [Orders] → orders #1, xyz (step 1)', + ); + }); + }); + + describe('mcp-task', () => { + it('returns the Result: line when formattedResponse is present', () => { + const execution: StepExecutionData = { + type: 'mcp-task', + stepIndex: 2, + executionParams: { name: 'search_records', input: { query: 'foo' } }, + executionResult: { + success: true, + toolResult: { items: [] }, + formattedResponse: 'No records found.', + }, + }; + + expect(StepExecutionFormatters.format(execution)).toBe(' Result: No records found.'); + }); + + it('returns a generic Executed: line when formattedResponse is absent', () => { + const execution: StepExecutionData = { + type: 'mcp-task', + stepIndex: 2, + executionParams: { name: 'search_records', input: { query: 'foo' } }, + executionResult: { success: true, toolResult: { items: [] } }, + }; + + expect(StepExecutionFormatters.format(execution)).toBe( + ' Executed: search_records (result not summarized)', + ); + }); + + it('returns null when executionResult is absent (pending phase)', () => { + const execution: StepExecutionData = { + type: 'mcp-task', + stepIndex: 2, + pendingData: { name: 'search_records', input: {} }, + }; + + expect(StepExecutionFormatters.format(execution)).toBeNull(); + }); + + it('returns null for a skipped execution', () => { + const execution: StepExecutionData = { + type: 'mcp-task', + stepIndex: 2, + executionResult: { skipped: true }, + }; + + expect(StepExecutionFormatters.format(execution)).toBeNull(); + }); + }); + + describe('types without a custom formatter', () => { + it('returns null for condition type', () => { + const execution: StepExecutionData = { + type: 'condition', + stepIndex: 0, + executionParams: { answer: 'Yes' }, + executionResult: { answer: 'Yes' }, + }; + + expect(StepExecutionFormatters.format(execution)).toBeNull(); + }); + + it('returns null for record-task type', () => { + const execution: StepExecutionData = { + type: 'record-task', + stepIndex: 0, + executionResult: { success: true }, + }; + + expect(StepExecutionFormatters.format(execution)).toBeNull(); + }); + }); + }); +}); diff --git a/packages/workflow-executor/test/executors/step-summary-builder.test.ts b/packages/workflow-executor/test/executors/step-summary-builder.test.ts new file mode 100644 index 0000000000..a6a5c743f4 --- /dev/null +++ b/packages/workflow-executor/test/executors/step-summary-builder.test.ts @@ -0,0 +1,284 @@ +import type { StepDefinition } from '../../src/types/step-definition'; +import type { StepExecutionData } from '../../src/types/step-execution-data'; +import type { StepOutcome } from '../../src/types/step-outcome'; + +import StepSummaryBuilder from '../../src/executors/summary/step-summary-builder'; +import { StepType } from '../../src/types/step-definition'; + +function makeConditionStep(prompt?: string): StepDefinition { + return { type: StepType.Condition, options: ['A', 'B'], prompt }; +} + +function makeConditionOutcome( + stepId: string, + stepIndex: number, + extra: Record = {}, +): StepOutcome { + return { type: 'condition', stepId, stepIndex, status: 'success', ...extra } as StepOutcome; +} + +describe('StepSummaryBuilder', () => { + describe('build', () => { + it('renders header, prompt, Input, and Output for a condition step with execution data', () => { + const step = makeConditionStep('Approve?'); + const outcome = makeConditionOutcome('cond-1', 0); + const execution: StepExecutionData = { + type: 'condition', + stepIndex: 0, + executionParams: { answer: 'Yes', reasoning: 'Order is valid' }, + executionResult: { answer: 'Yes' }, + }; + + const result = StepSummaryBuilder.build(step, outcome, execution); + + expect(result).toContain('Step "cond-1" (index 0):'); + expect(result).toContain('Prompt: Approve?'); + expect(result).toContain('Input: {"answer":"Yes","reasoning":"Order is valid"}'); + expect(result).toContain('Output: {"answer":"Yes"}'); + }); + + it('renders Output: when executionResult is present but executionParams is absent', () => { + const step: StepDefinition = { type: StepType.ReadRecord, prompt: 'Do something' }; + const outcome: StepOutcome = { + type: 'record-task', + stepId: 'task-1', + stepIndex: 0, + status: 'success', + }; + const execution: StepExecutionData = { + type: 'record-task', + stepIndex: 0, + executionResult: { success: true }, + }; + + const result = StepSummaryBuilder.build(step, outcome, execution); + + expect(result).toContain('Output: {"success":true}'); + expect(result).not.toContain('Input:'); + }); + + it('falls back to History when no execution data is provided', () => { + const step = makeConditionStep('Pick one'); + const outcome = makeConditionOutcome('cond-1', 0); + + const result = StepSummaryBuilder.build(step, outcome, undefined); + + expect(result).toContain('Step "cond-1" (index 0):'); + expect(result).toContain('Prompt: Pick one'); + expect(result).toContain('History: {"status":"success"}'); + expect(result).not.toContain('"stepId"'); + expect(result).not.toContain('"stepIndex"'); + expect(result).not.toContain('"type"'); + }); + + it('includes selectedOption in History for condition steps', () => { + const step = makeConditionStep('Approved?'); + const outcome = makeConditionOutcome('cond-approval', 0, { selectedOption: 'Yes' }); + + const result = StepSummaryBuilder.build(step, outcome, undefined); + + expect(result).toContain('"selectedOption":"Yes"'); + }); + + it('includes error in History for failed steps', () => { + const step = makeConditionStep('Do something'); + const outcome: StepOutcome = { + type: 'condition', + stepId: 'failing-step', + stepIndex: 0, + status: 'error', + error: 'AI could not match an option', + }; + + const result = StepSummaryBuilder.build(step, outcome, undefined); + + expect(result).toContain('"status":"error"'); + expect(result).toContain('"error":"AI could not match an option"'); + }); + + it('omits History type field and includes status for record-task steps', () => { + const step: StepDefinition = { type: StepType.ReadRecord, prompt: 'Run task' }; + const outcome: StepOutcome = { + type: 'record-task', + stepId: 'read-record-1', + stepIndex: 0, + status: 'awaiting-input', + }; + + const result = StepSummaryBuilder.build(step, outcome, undefined); + + expect(result).toContain('Step "read-record-1" (index 0):'); + expect(result).toContain('History: {"status":"awaiting-input"}'); + }); + + it('omits Input and Output lines when executionParams and executionResult are both absent', () => { + const step: StepDefinition = { type: StepType.ReadRecord, prompt: 'Do something' }; + const outcome: StepOutcome = { + type: 'record-task', + stepId: 'read-record-1', + stepIndex: 0, + status: 'success', + }; + const execution: StepExecutionData = { type: 'record-task', stepIndex: 0 }; + + const result = StepSummaryBuilder.build(step, outcome, execution); + + expect(result).toContain('Step "read-record-1" (index 0):'); + expect(result).toContain('Prompt: Do something'); + expect(result).not.toContain('Input:'); + expect(result).not.toContain('Output:'); + }); + + it('uses Pending when update-record step has pendingData but no executionParams', () => { + const step: StepDefinition = { type: StepType.UpdateRecord, prompt: 'Set status to active' }; + const outcome: StepOutcome = { + type: 'record-task', + stepId: 'update-1', + stepIndex: 0, + status: 'awaiting-input', + }; + const execution: StepExecutionData = { + type: 'update-record', + stepIndex: 0, + pendingData: { displayName: 'Status', name: 'status', value: 'active' }, + selectedRecordRef: { collectionName: 'customers', recordId: [1], stepIndex: 0 }, + }; + + const result = StepSummaryBuilder.build(step, outcome, execution); + + expect(result).toContain('Pending:'); + expect(result).toContain('"displayName":"Status"'); + expect(result).toContain('"value":"active"'); + expect(result).not.toContain('Input:'); + }); + + it('uses Pending for trigger-action step with pendingData', () => { + const step: StepDefinition = { + type: StepType.TriggerAction, + prompt: 'Archive the customer', + }; + const outcome: StepOutcome = { + type: 'record-task', + stepId: 'trigger-1', + stepIndex: 0, + status: 'awaiting-input', + }; + const execution: StepExecutionData = { + type: 'trigger-action', + stepIndex: 0, + pendingData: { displayName: 'Archive Customer', name: 'archive' }, + selectedRecordRef: { collectionName: 'customers', recordId: [1], stepIndex: 0 }, + }; + + const result = StepSummaryBuilder.build(step, outcome, execution); + + expect(result).toContain('Pending:'); + expect(result).toContain('"displayName":"Archive Customer"'); + expect(result).toContain('"name":"archive"'); + expect(result).not.toContain('Input:'); + }); + + it('renders load-related-record completed as Loaded: (no Input: or Output:)', () => { + const step: StepDefinition = { + type: StepType.LoadRelatedRecord, + prompt: 'Load the address', + }; + const outcome: StepOutcome = { + type: 'record-task', + stepId: 'load-1', + stepIndex: 1, + status: 'success', + }; + const execution: StepExecutionData = { + type: 'load-related-record', + stepIndex: 1, + selectedRecordRef: { collectionName: 'customers', recordId: [42], stepIndex: 0 }, + executionResult: { + relation: { name: 'address', displayName: 'Address' }, + record: { collectionName: 'addresses', recordId: [1], stepIndex: 1 }, + }, + }; + + const result = StepSummaryBuilder.build(step, outcome, execution); + + const lines = result.split('\n'); + expect(lines).toHaveLength(3); + expect(lines[0]).toBe('Step "load-1" (index 1):'); + expect(lines[1]).toBe(' Prompt: Load the address'); + expect(lines[2]).toBe(' Loaded: customers #42 → [Address] → addresses #1 (step 1)'); + expect(result).not.toContain('Input:'); + expect(result).not.toContain('Output:'); + }); + + it('renders load-related-record skipped as generic Output: fallback', () => { + const step: StepDefinition = { + type: StepType.LoadRelatedRecord, + prompt: 'Load the address', + }; + const outcome: StepOutcome = { + type: 'record-task', + stepId: 'load-1', + stepIndex: 1, + status: 'success', + }; + const execution: StepExecutionData = { + type: 'load-related-record', + stepIndex: 1, + selectedRecordRef: { collectionName: 'customers', recordId: [42], stepIndex: 0 }, + executionResult: { skipped: true }, + }; + + const result = StepSummaryBuilder.build(step, outcome, execution); + + expect(result).toContain('Output: {"skipped":true}'); + expect(result).not.toContain('Loaded:'); + }); + + it('renders load-related-record pending state with Pending: line', () => { + const step: StepDefinition = { + type: StepType.LoadRelatedRecord, + prompt: 'Load the address', + }; + const outcome: StepOutcome = { + type: 'record-task', + stepId: 'load-1', + stepIndex: 1, + status: 'awaiting-input', + }; + const execution: StepExecutionData = { + type: 'load-related-record', + stepIndex: 1, + selectedRecordRef: { collectionName: 'customers', recordId: [42], stepIndex: 0 }, + pendingData: { + displayName: 'Address', + name: 'address', + relatedCollectionName: 'addresses', + selectedRecordId: [1], + }, + }; + + const result = StepSummaryBuilder.build(step, outcome, execution); + + expect(result).toContain('Pending:'); + expect(result).toContain('"displayName":"Address"'); + expect(result).not.toContain('Input:'); + expect(result).not.toContain('Output:'); + expect(result).not.toContain('Loaded:'); + }); + + it('shows "(no prompt)" when step has no prompt', () => { + const step: StepDefinition = { type: StepType.Condition, options: ['A', 'B'] }; + const outcome = makeConditionOutcome('cond-1', 0); + const execution: StepExecutionData = { + type: 'condition', + stepIndex: 0, + executionParams: { answer: 'A', reasoning: 'Only option' }, + executionResult: { answer: 'A' }, + }; + + const result = StepSummaryBuilder.build(step, outcome, execution); + + expect(result).toContain('Prompt: (no prompt)'); + }); + }); +}); diff --git a/packages/workflow-executor/test/executors/trigger-record-action-step-executor.test.ts b/packages/workflow-executor/test/executors/trigger-record-action-step-executor.test.ts new file mode 100644 index 0000000000..7bb0a77df3 --- /dev/null +++ b/packages/workflow-executor/test/executors/trigger-record-action-step-executor.test.ts @@ -0,0 +1,920 @@ +import type { AgentPort } from '../../src/ports/agent-port'; +import type { RunStore } from '../../src/ports/run-store'; +import type { WorkflowPort } from '../../src/ports/workflow-port'; +import type { ExecutionContext } from '../../src/types/execution'; +import type { CollectionSchema, RecordRef } from '../../src/types/record'; +import type { RecordTaskStepDefinition } from '../../src/types/step-definition'; +import type { TriggerRecordActionStepExecutionData } from '../../src/types/step-execution-data'; + +import { StepStateError } from '../../src/errors'; +import TriggerRecordActionStepExecutor from '../../src/executors/trigger-record-action-step-executor'; +import { StepType } from '../../src/types/step-definition'; + +function makeStep(overrides: Partial = {}): RecordTaskStepDefinition { + return { + type: StepType.TriggerAction, + prompt: 'Send a welcome email to the customer', + ...overrides, + }; +} + +function makeRecordRef(overrides: Partial = {}): RecordRef { + return { + collectionName: 'customers', + recordId: [42], + stepIndex: 0, + ...overrides, + }; +} + +function makeMockAgentPort(): AgentPort { + return { + getRecord: jest.fn(), + updateRecord: jest.fn(), + getRelatedData: jest.fn(), + executeAction: jest.fn().mockResolvedValue(undefined), + } as unknown as AgentPort; +} + +function makeCollectionSchema(overrides: Partial = {}): CollectionSchema { + return { + collectionName: 'customers', + collectionDisplayName: 'Customers', + primaryKeyFields: ['id'], + fields: [ + { fieldName: 'email', displayName: 'Email', isRelationship: false }, + { fieldName: 'status', displayName: 'Status', isRelationship: false }, + ], + actions: [ + { name: 'send-welcome-email', displayName: 'Send Welcome Email' }, + { name: 'archive', displayName: 'Archive Customer' }, + ], + ...overrides, + }; +} + +function makeMockRunStore(overrides: Partial = {}): RunStore { + return { + getStepExecutions: jest.fn().mockResolvedValue([]), + saveStepExecution: jest.fn().mockResolvedValue(undefined), + ...overrides, + }; +} + +function makeMockWorkflowPort( + schemasByCollection: Record = { + customers: makeCollectionSchema(), + }, +): WorkflowPort { + return { + getPendingStepExecutions: jest.fn().mockResolvedValue([]), + getPendingStepExecutionsForRun: jest.fn().mockResolvedValue(null), + updateStepExecution: jest.fn().mockResolvedValue(undefined), + getCollectionSchema: jest + .fn() + .mockImplementation((name: string) => + Promise.resolve( + schemasByCollection[name] ?? makeCollectionSchema({ collectionName: name }), + ), + ), + getMcpServerConfigs: jest.fn().mockResolvedValue([]), + }; +} + +function makeMockModel(toolCallArgs?: Record, toolName = 'select-action') { + const invoke = jest.fn().mockResolvedValue({ + tool_calls: toolCallArgs ? [{ name: toolName, args: toolCallArgs, id: 'call_1' }] : undefined, + }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const model = { bindTools } as unknown as ExecutionContext['model']; + + return { model, bindTools, invoke }; +} + +function makeContext( + overrides: Partial> = {}, +): ExecutionContext { + return { + runId: 'run-1', + stepId: 'trigger-1', + stepIndex: 0, + baseRecordRef: makeRecordRef(), + stepDefinition: makeStep(), + model: makeMockModel({ + actionName: 'Send Welcome Email', + reasoning: 'User requested welcome email', + }).model, + agentPort: makeMockAgentPort(), + workflowPort: makeMockWorkflowPort(), + runStore: makeMockRunStore(), + previousSteps: [], + logger: { error: jest.fn() }, + ...overrides, + }; +} + +describe('TriggerRecordActionStepExecutor', () => { + describe('automaticExecution: trigger direct (Branch B)', () => { + it('triggers the action and returns success', async () => { + const agentPort = makeMockAgentPort(); + (agentPort.executeAction as jest.Mock).mockResolvedValue({ message: 'Email sent' }); + const mockModel = makeMockModel({ + actionName: 'Send Welcome Email', + reasoning: 'User requested welcome email', + }); + const runStore = makeMockRunStore(); + const context = makeContext({ + model: mockModel.model, + agentPort, + runStore, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new TriggerRecordActionStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + expect(agentPort.executeAction).toHaveBeenCalledWith({ + collection: 'customers', + action: 'send-welcome-email', + id: [42], + }); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + type: 'trigger-action', + stepIndex: 0, + executionParams: { + displayName: 'Send Welcome Email', + name: 'send-welcome-email', + }, + executionResult: { success: true, actionResult: { message: 'Email sent' } }, + selectedRecordRef: expect.objectContaining({ + collectionName: 'customers', + recordId: [42], + }), + }), + ); + }); + }); + + describe('without automaticExecution: awaiting-input (Branch C)', () => { + it('saves pendingAction and returns awaiting-input', async () => { + const mockModel = makeMockModel({ + actionName: 'Send Welcome Email', + reasoning: 'User requested welcome email', + }); + const runStore = makeMockRunStore(); + const context = makeContext({ + model: mockModel.model, + runStore, + }); + const executor = new TriggerRecordActionStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('awaiting-input'); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + type: 'trigger-action', + stepIndex: 0, + pendingData: { + displayName: 'Send Welcome Email', + name: 'send-welcome-email', + }, + selectedRecordRef: expect.objectContaining({ + collectionName: 'customers', + recordId: [42], + }), + }), + ); + }); + }); + + describe('confirmation accepted (Branch A)', () => { + it('triggers the action when user confirms and preserves pendingAction', async () => { + const agentPort = makeMockAgentPort(); + (agentPort.executeAction as jest.Mock).mockResolvedValue({ message: 'Email sent' }); + const execution: TriggerRecordActionStepExecutionData = { + type: 'trigger-action', + stepIndex: 0, + pendingData: { + displayName: 'Send Welcome Email', + name: 'send-welcome-email', + }, + selectedRecordRef: makeRecordRef(), + }; + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([execution]), + }); + const userConfirmed = true; + const context = makeContext({ agentPort, runStore, userConfirmed }); + const executor = new TriggerRecordActionStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + expect(agentPort.executeAction).toHaveBeenCalledWith({ + collection: 'customers', + action: 'send-welcome-email', + id: [42], + }); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + type: 'trigger-action', + executionParams: { + displayName: 'Send Welcome Email', + name: 'send-welcome-email', + }, + executionResult: { success: true, actionResult: { message: 'Email sent' } }, + pendingData: { + displayName: 'Send Welcome Email', + name: 'send-welcome-email', + }, + }), + ); + }); + }); + + describe('confirmation rejected (Branch A)', () => { + it('skips the action when user rejects', async () => { + const agentPort = makeMockAgentPort(); + const execution: TriggerRecordActionStepExecutionData = { + type: 'trigger-action', + stepIndex: 0, + pendingData: { + displayName: 'Send Welcome Email', + name: 'send-welcome-email', + }, + selectedRecordRef: makeRecordRef(), + }; + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([execution]), + }); + const userConfirmed = false; + const context = makeContext({ agentPort, runStore, userConfirmed }); + const executor = new TriggerRecordActionStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + expect(agentPort.executeAction).not.toHaveBeenCalled(); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + executionResult: { skipped: true }, + pendingData: { + displayName: 'Send Welcome Email', + name: 'send-welcome-email', + }, + }), + ); + }); + }); + + describe('no pending action in confirmation flow (Branch A)', () => { + it('returns error outcome when no pending action is found', async () => { + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([]), + }); + const userConfirmed = true; + const context = makeContext({ runStore, userConfirmed }); + const executor = new TriggerRecordActionStepExecutor(context); + + await expect(executor.execute()).resolves.toMatchObject({ + stepOutcome: { + type: 'record-task', + stepId: 'trigger-1', + stepIndex: 0, + status: 'error', + error: 'An unexpected error occurred while processing this step.', + }, + }); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + + it('returns error outcome when execution exists but stepIndex does not match', async () => { + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([ + { + type: 'trigger-action', + stepIndex: 5, + pendingData: { displayName: 'Send Welcome Email' }, + selectedRecordRef: makeRecordRef(), + }, + ]), + }); + const userConfirmed = true; + const context = makeContext({ runStore, userConfirmed }); + const executor = new TriggerRecordActionStepExecutor(context); + + await expect(executor.execute()).resolves.toMatchObject({ + stepOutcome: { + type: 'record-task', + stepId: 'trigger-1', + stepIndex: 0, + status: 'error', + error: 'An unexpected error occurred while processing this step.', + }, + }); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + + it('returns error outcome when execution exists but pendingData is absent', async () => { + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([ + { + type: 'trigger-action', + stepIndex: 0, + selectedRecordRef: makeRecordRef(), + }, + ]), + }); + const userConfirmed = true; + const context = makeContext({ runStore, userConfirmed }); + const executor = new TriggerRecordActionStepExecutor(context); + + await expect(executor.execute()).resolves.toMatchObject({ + stepOutcome: { + type: 'record-task', + stepId: 'trigger-1', + stepIndex: 0, + status: 'error', + error: 'An unexpected error occurred while processing this step.', + }, + }); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + }); + + describe('NoActionsError', () => { + it('returns error when collection has no actions', async () => { + const schema = makeCollectionSchema({ actions: [] }); + const mockModel = makeMockModel({ + actionName: 'Send Welcome Email', + reasoning: 'test', + }); + const runStore = makeMockRunStore(); + const workflowPort = makeMockWorkflowPort({ customers: schema }); + const context = makeContext({ model: mockModel.model, runStore, workflowPort }); + const executor = new TriggerRecordActionStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe('No actions are available on this record.'); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + }); + + describe('resolveActionName failure', () => { + it('returns error when AI returns an action name not found in the schema', async () => { + const agentPort = makeMockAgentPort(); + const mockModel = makeMockModel({ + actionName: 'NonExistentAction', + reasoning: 'hallucinated', + }); + const schema = makeCollectionSchema({ + actions: [{ name: 'archive', displayName: 'Archive Customer' }], + }); + const runStore = makeMockRunStore(); + const workflowPort = makeMockWorkflowPort({ customers: schema }); + const context = makeContext({ + model: mockModel.model, + agentPort, + runStore, + workflowPort, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new TriggerRecordActionStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + "The AI selected an action that doesn't exist on this record. Try rephrasing the step's prompt.", + ); + expect(agentPort.executeAction).not.toHaveBeenCalled(); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + }); + + describe('agentPort.executeAction WorkflowExecutorError (Branch B)', () => { + it('returns error when executeAction throws WorkflowExecutorError', async () => { + const agentPort = makeMockAgentPort(); + (agentPort.executeAction as jest.Mock).mockRejectedValue( + new StepStateError('Action not permitted'), + ); + const mockModel = makeMockModel({ + actionName: 'Send Welcome Email', + reasoning: 'test', + }); + const runStore = makeMockRunStore(); + const context = makeContext({ + model: mockModel.model, + agentPort, + runStore, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new TriggerRecordActionStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.type).toBe('record-task'); + expect(result.stepOutcome.stepId).toBe('trigger-1'); + expect(result.stepOutcome.stepIndex).toBe(0); + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + 'An unexpected error occurred while processing this step.', + ); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + }); + + describe('agentPort.executeAction WorkflowExecutorError (Branch A)', () => { + it('returns error when executeAction throws WorkflowExecutorError during confirmation', async () => { + const agentPort = makeMockAgentPort(); + (agentPort.executeAction as jest.Mock).mockRejectedValue( + new StepStateError('Action not permitted'), + ); + const execution: TriggerRecordActionStepExecutionData = { + type: 'trigger-action', + stepIndex: 0, + pendingData: { + displayName: 'Send Welcome Email', + name: 'send-welcome-email', + }, + selectedRecordRef: makeRecordRef(), + }; + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([execution]), + }); + const userConfirmed = true; + const context = makeContext({ agentPort, runStore, userConfirmed }); + const executor = new TriggerRecordActionStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.type).toBe('record-task'); + expect(result.stepOutcome.stepId).toBe('trigger-1'); + expect(result.stepOutcome.stepIndex).toBe(0); + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + 'An unexpected error occurred while processing this step.', + ); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + }); + + describe('agentPort.executeAction infra error', () => { + it('returns error outcome for infrastructure errors (Branch B)', async () => { + const agentPort = makeMockAgentPort(); + (agentPort.executeAction as jest.Mock).mockRejectedValue(new Error('Connection refused')); + const mockModel = makeMockModel({ + actionName: 'Send Welcome Email', + reasoning: 'test', + }); + const context = makeContext({ + model: mockModel.model, + agentPort, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new TriggerRecordActionStepExecutor(context); + + const result = await executor.execute(); + expect(result.stepOutcome.status).toBe('error'); + }); + + it('returns error outcome for infrastructure errors (Branch A)', async () => { + const agentPort = makeMockAgentPort(); + (agentPort.executeAction as jest.Mock).mockRejectedValue(new Error('Connection refused')); + const execution: TriggerRecordActionStepExecutionData = { + type: 'trigger-action', + stepIndex: 0, + pendingData: { + displayName: 'Send Welcome Email', + name: 'send-welcome-email', + }, + selectedRecordRef: makeRecordRef(), + }; + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([execution]), + }); + const userConfirmed = true; + const context = makeContext({ agentPort, runStore, userConfirmed }); + const executor = new TriggerRecordActionStepExecutor(context); + + const result = await executor.execute(); + expect(result.stepOutcome.status).toBe('error'); + }); + + it('returns user message and logs cause when agentPort.executeAction throws an infra error', async () => { + const logger = { error: jest.fn() }; + const agentPort = makeMockAgentPort(); + (agentPort.executeAction as jest.Mock).mockRejectedValue(new Error('DB connection lost')); + const mockModel = makeMockModel({ + actionName: 'Send Welcome Email', + reasoning: 'User requested welcome email', + }); + const context = makeContext({ + model: mockModel.model, + agentPort, + logger, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new TriggerRecordActionStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + 'An error occurred while accessing your data. Please try again.', + ); + expect(logger.error).toHaveBeenCalledWith( + 'Agent port "executeAction" failed: DB connection lost', + expect.objectContaining({ cause: 'DB connection lost' }), + ); + }); + }); + + describe('displayName → name resolution', () => { + it('calls executeAction with the technical name when AI returns a displayName', async () => { + const agentPort = makeMockAgentPort(); + // AI returns displayName 'Archive Customer', technical name is 'archive' + const mockModel = makeMockModel({ + actionName: 'Archive Customer', + reasoning: 'User wants to archive', + }); + const context = makeContext({ + model: mockModel.model, + agentPort, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new TriggerRecordActionStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + expect(agentPort.executeAction).toHaveBeenCalledWith({ + collection: 'customers', + action: 'archive', + id: [42], + }); + }); + + it('resolves action when AI returns technical name instead of displayName', async () => { + const agentPort = makeMockAgentPort(); + // AI returns technical name 'archive' instead of display name 'Archive Customer' + const mockModel = makeMockModel({ + actionName: 'archive', + reasoning: 'fallback to technical name', + }); + const schema = makeCollectionSchema({ + actions: [{ name: 'archive', displayName: 'Archive Customer' }], + }); + const workflowPort = makeMockWorkflowPort({ customers: schema }); + const context = makeContext({ + model: mockModel.model, + agentPort, + workflowPort, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new TriggerRecordActionStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + expect(agentPort.executeAction).toHaveBeenCalledWith({ + collection: 'customers', + action: 'archive', + id: [42], + }); + }); + }); + + describe('multi-record AI selection', () => { + it('uses AI to select among multiple records then selects action', async () => { + const baseRecordRef = makeRecordRef({ stepIndex: 1 }); + const relatedRecord = makeRecordRef({ + stepIndex: 2, + recordId: [99], + collectionName: 'orders', + }); + + const ordersSchema = makeCollectionSchema({ + collectionName: 'orders', + collectionDisplayName: 'Orders', + actions: [{ name: 'cancel-order', displayName: 'Cancel Order' }], + }); + + // First call: select-record, second call: select-action + const invoke = jest + .fn() + .mockResolvedValueOnce({ + tool_calls: [ + { + name: 'select-record', + args: { recordIdentifier: 'Step 2 - Orders #99' }, + id: 'call_1', + }, + ], + }) + .mockResolvedValueOnce({ + tool_calls: [ + { + name: 'select-action', + args: { actionName: 'Cancel Order', reasoning: 'Cancel the order' }, + id: 'call_2', + }, + ], + }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const model = { bindTools } as unknown as ExecutionContext['model']; + + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([ + { + type: 'load-related-record', + stepIndex: 2, + executionResult: { + relation: { name: 'order', displayName: 'Order' }, + record: relatedRecord, + }, + selectedRecordRef: makeRecordRef(), + }, + ]), + }); + const workflowPort = makeMockWorkflowPort({ + customers: makeCollectionSchema(), + orders: ordersSchema, + }); + const agentPort = makeMockAgentPort(); + const context = makeContext({ baseRecordRef, model, runStore, workflowPort, agentPort }); + const executor = new TriggerRecordActionStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('awaiting-input'); + expect(bindTools).toHaveBeenCalledTimes(2); + + const selectTool = bindTools.mock.calls[0][0][0]; + expect(selectTool.name).toBe('select-record'); + + const actionTool = bindTools.mock.calls[1][0][0]; + expect(actionTool.name).toBe('select-action'); + + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + pendingData: { displayName: 'Cancel Order', name: 'cancel-order' }, + selectedRecordRef: expect.objectContaining({ + recordId: [99], + collectionName: 'orders', + }), + }), + ); + }); + }); + + describe('stepOutcome shape', () => { + it('emits correct type, stepId and stepIndex in the outcome', async () => { + const context = makeContext({ stepDefinition: makeStep({ automaticExecution: true }) }); + const executor = new TriggerRecordActionStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome).toMatchObject({ + type: 'record-task', + stepId: 'trigger-1', + stepIndex: 0, + status: 'success', + }); + }); + }); + + describe('schema caching', () => { + it('fetches getCollectionSchema once per collection even when called twice (Branch B)', async () => { + const workflowPort = makeMockWorkflowPort(); + const context = makeContext({ + workflowPort, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new TriggerRecordActionStepExecutor(context); + + await executor.execute(); + + expect(workflowPort.getCollectionSchema).toHaveBeenCalledTimes(1); + }); + }); + + describe('AI malformed/missing tool call', () => { + it('returns error on malformed tool call', async () => { + const invoke = jest.fn().mockResolvedValue({ + tool_calls: [], + invalid_tool_calls: [ + { name: 'select-action', args: '{bad json', error: 'JSON parse error' }, + ], + }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const runStore = makeMockRunStore(); + const context = makeContext({ + model: { bindTools } as unknown as ExecutionContext['model'], + runStore, + }); + const executor = new TriggerRecordActionStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.type).toBe('record-task'); + expect(result.stepOutcome.stepId).toBe('trigger-1'); + expect(result.stepOutcome.stepIndex).toBe(0); + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + "The AI returned an unexpected response. Try rephrasing the step's prompt.", + ); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + + it('returns error when AI returns no tool call', async () => { + const invoke = jest.fn().mockResolvedValue({ tool_calls: [] }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const runStore = makeMockRunStore(); + const context = makeContext({ + model: { bindTools } as unknown as ExecutionContext['model'], + runStore, + }); + const executor = new TriggerRecordActionStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.type).toBe('record-task'); + expect(result.stepOutcome.stepId).toBe('trigger-1'); + expect(result.stepOutcome.stepIndex).toBe(0); + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + "The AI couldn't decide what to do. Try rephrasing the step's prompt.", + ); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + }); + + describe('RunStore error propagation', () => { + it('returns error outcome when getStepExecutions fails (Branch A)', async () => { + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockRejectedValue(new Error('DB timeout')), + }); + const userConfirmed = true; + const context = makeContext({ runStore, userConfirmed }); + const executor = new TriggerRecordActionStepExecutor(context); + + const result = await executor.execute(); + expect(result.stepOutcome.status).toBe('error'); + }); + + it('returns error outcome when saveStepExecution fails on user reject (Branch A)', async () => { + const execution: TriggerRecordActionStepExecutionData = { + type: 'trigger-action', + stepIndex: 0, + pendingData: { + displayName: 'Send Welcome Email', + name: 'send-welcome-email', + }, + selectedRecordRef: makeRecordRef(), + }; + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([execution]), + saveStepExecution: jest.fn().mockRejectedValue(new Error('Disk full')), + }); + const userConfirmed = false; + const context = makeContext({ runStore, userConfirmed }); + const executor = new TriggerRecordActionStepExecutor(context); + + const result = await executor.execute(); + expect(result.stepOutcome.status).toBe('error'); + }); + + it('returns error outcome when saveStepExecution fails saving awaiting-input (Branch C)', async () => { + const runStore = makeMockRunStore({ + saveStepExecution: jest.fn().mockRejectedValue(new Error('Disk full')), + }); + const context = makeContext({ runStore }); + const executor = new TriggerRecordActionStepExecutor(context); + + const result = await executor.execute(); + expect(result.stepOutcome.status).toBe('error'); + }); + + it('returns error outcome after successful executeAction when saveStepExecution fails (Branch B)', async () => { + const runStore = makeMockRunStore({ + saveStepExecution: jest.fn().mockRejectedValue(new Error('Disk full')), + }); + const context = makeContext({ + runStore, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new TriggerRecordActionStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe('The step result could not be saved. Please retry.'); + }); + + it('returns error outcome after successful executeAction when saveStepExecution fails (Branch A confirmed)', async () => { + const execution: TriggerRecordActionStepExecutionData = { + type: 'trigger-action', + stepIndex: 0, + pendingData: { + displayName: 'Send Welcome Email', + name: 'send-welcome-email', + }, + selectedRecordRef: makeRecordRef(), + }; + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([execution]), + saveStepExecution: jest.fn().mockRejectedValue(new Error('Disk full')), + }); + const userConfirmed = true; + const context = makeContext({ runStore, userConfirmed }); + const executor = new TriggerRecordActionStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe('The step result could not be saved. Please retry.'); + }); + }); + + describe('default prompt', () => { + it('uses default prompt when step.prompt is undefined', async () => { + const mockModel = makeMockModel({ + actionName: 'Send Welcome Email', + reasoning: 'test', + }); + const context = makeContext({ + model: mockModel.model, + stepDefinition: makeStep({ prompt: undefined }), + }); + const executor = new TriggerRecordActionStepExecutor(context); + + await executor.execute(); + + const messages = mockModel.invoke.mock.calls[mockModel.invoke.mock.calls.length - 1][0]; + const humanMessage = messages[messages.length - 1]; + expect(humanMessage.content).toBe('**Request**: Trigger the relevant action.'); + }); + }); + + describe('previous steps context', () => { + it('includes previous steps summary in select-action messages', async () => { + const mockModel = makeMockModel({ + actionName: 'Send Welcome Email', + reasoning: 'test', + }); + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([ + { + type: 'condition', + stepIndex: 0, + executionParams: { answer: 'Yes', reasoning: 'Approved' }, + }, + ]), + }); + const context = makeContext({ + model: mockModel.model, + runStore, + previousSteps: [ + { + stepDefinition: { + type: StepType.Condition, + options: ['Yes', 'No'], + prompt: 'Should we proceed?', + }, + stepOutcome: { + type: 'condition', + stepId: 'prev-step', + stepIndex: 0, + status: 'success', + }, + }, + ], + }); + const executor = new TriggerRecordActionStepExecutor({ + ...context, + stepId: 'trigger-2', + stepIndex: 1, + }); + + await executor.execute(); + + const messages = mockModel.invoke.mock.calls[0][0]; + // previous steps message + system prompt + collection info + human message = 4 + expect(messages).toHaveLength(4); + expect(messages[0].content).toContain('Should we proceed?'); + expect(messages[0].content).toContain('"answer":"Yes"'); + expect(messages[1].content).toContain('triggering an action'); + }); + }); +}); diff --git a/packages/workflow-executor/test/executors/update-record-step-executor.test.ts b/packages/workflow-executor/test/executors/update-record-step-executor.test.ts new file mode 100644 index 0000000000..00ed053d06 --- /dev/null +++ b/packages/workflow-executor/test/executors/update-record-step-executor.test.ts @@ -0,0 +1,875 @@ +import type { AgentPort } from '../../src/ports/agent-port'; +import type { RunStore } from '../../src/ports/run-store'; +import type { WorkflowPort } from '../../src/ports/workflow-port'; +import type { ExecutionContext } from '../../src/types/execution'; +import type { CollectionSchema, RecordRef } from '../../src/types/record'; +import type { RecordTaskStepDefinition } from '../../src/types/step-definition'; +import type { UpdateRecordStepExecutionData } from '../../src/types/step-execution-data'; + +import { StepStateError } from '../../src/errors'; +import UpdateRecordStepExecutor from '../../src/executors/update-record-step-executor'; +import { StepType } from '../../src/types/step-definition'; + +function makeStep(overrides: Partial = {}): RecordTaskStepDefinition { + return { + type: StepType.UpdateRecord, + prompt: 'Set the customer status to active', + ...overrides, + }; +} + +function makeRecordRef(overrides: Partial = {}): RecordRef { + return { + collectionName: 'customers', + recordId: [42], + stepIndex: 0, + ...overrides, + }; +} + +function makeMockAgentPort( + updatedValues: Record = { status: 'active', name: 'John Doe' }, +): AgentPort { + return { + getRecord: jest.fn().mockResolvedValue({ values: updatedValues }), + updateRecord: jest.fn().mockResolvedValue({ + collectionName: 'customers', + recordId: [42], + values: updatedValues, + }), + getRelatedData: jest.fn(), + executeAction: jest.fn(), + } as unknown as AgentPort; +} + +function makeCollectionSchema(overrides: Partial = {}): CollectionSchema { + return { + collectionName: 'customers', + collectionDisplayName: 'Customers', + primaryKeyFields: ['id'], + fields: [ + { fieldName: 'email', displayName: 'Email', isRelationship: false }, + { fieldName: 'status', displayName: 'Status', isRelationship: false }, + { fieldName: 'name', displayName: 'Full Name', isRelationship: false }, + { fieldName: 'orders', displayName: 'Orders', isRelationship: true }, + ], + actions: [], + ...overrides, + }; +} + +function makeMockRunStore(overrides: Partial = {}): RunStore { + return { + getStepExecutions: jest.fn().mockResolvedValue([]), + saveStepExecution: jest.fn().mockResolvedValue(undefined), + ...overrides, + }; +} + +function makeMockWorkflowPort( + schemasByCollection: Record = { + customers: makeCollectionSchema(), + }, +): WorkflowPort { + return { + getPendingStepExecutions: jest.fn().mockResolvedValue([]), + getPendingStepExecutionsForRun: jest.fn().mockResolvedValue(null), + updateStepExecution: jest.fn().mockResolvedValue(undefined), + getCollectionSchema: jest + .fn() + .mockImplementation((name: string) => + Promise.resolve( + schemasByCollection[name] ?? makeCollectionSchema({ collectionName: name }), + ), + ), + getMcpServerConfigs: jest.fn().mockResolvedValue([]), + }; +} + +function makeMockModel(toolCallArgs?: Record, toolName = 'update-record-field') { + const invoke = jest.fn().mockResolvedValue({ + tool_calls: toolCallArgs ? [{ name: toolName, args: toolCallArgs, id: 'call_1' }] : undefined, + }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const model = { bindTools } as unknown as ExecutionContext['model']; + + return { model, bindTools, invoke }; +} + +function makeContext( + overrides: Partial> = {}, +): ExecutionContext { + return { + runId: 'run-1', + stepId: 'update-1', + stepIndex: 0, + baseRecordRef: makeRecordRef(), + stepDefinition: makeStep(), + model: makeMockModel({ + fieldName: 'Status', + value: 'active', + reasoning: 'User requested status change', + }).model, + agentPort: makeMockAgentPort(), + workflowPort: makeMockWorkflowPort(), + runStore: makeMockRunStore(), + previousSteps: [], + logger: { error: jest.fn() }, + ...overrides, + }; +} + +describe('UpdateRecordStepExecutor', () => { + describe('automaticExecution: update direct (Branch B)', () => { + it('updates the record and returns success', async () => { + const updatedValues = { status: 'active', name: 'John Doe' }; + const agentPort = makeMockAgentPort(updatedValues); + const mockModel = makeMockModel({ + fieldName: 'Status', + value: 'active', + reasoning: 'User requested status change', + }); + const runStore = makeMockRunStore(); + const context = makeContext({ + model: mockModel.model, + agentPort, + runStore, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new UpdateRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + expect(agentPort.updateRecord).toHaveBeenCalledWith({ + collection: 'customers', + id: [42], + values: { status: 'active' }, + }); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + type: 'update-record', + stepIndex: 0, + executionParams: { displayName: 'Status', name: 'status', value: 'active' }, + executionResult: { updatedValues }, + selectedRecordRef: expect.objectContaining({ + collectionName: 'customers', + recordId: [42], + }), + }), + ); + }); + }); + + describe('without automaticExecution: awaiting-input (Branch C)', () => { + it('saves execution and returns awaiting-input', async () => { + const mockModel = makeMockModel({ + fieldName: 'Status', + value: 'active', + reasoning: 'User requested status change', + }); + const runStore = makeMockRunStore(); + const context = makeContext({ + model: mockModel.model, + runStore, + }); + const executor = new UpdateRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('awaiting-input'); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + type: 'update-record', + stepIndex: 0, + pendingData: { displayName: 'Status', name: 'status', value: 'active' }, + selectedRecordRef: expect.objectContaining({ + collectionName: 'customers', + recordId: [42], + }), + }), + ); + }); + }); + + describe('confirmation accepted (Branch A)', () => { + it('updates the record when user confirms', async () => { + const updatedValues = { status: 'active', name: 'John Doe' }; + const agentPort = makeMockAgentPort(updatedValues); + const execution: UpdateRecordStepExecutionData = { + type: 'update-record', + stepIndex: 0, + pendingData: { displayName: 'Status', name: 'status', value: 'active' }, + selectedRecordRef: makeRecordRef(), + }; + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([execution]), + }); + const userConfirmed = true; + const context = makeContext({ agentPort, runStore, userConfirmed }); + const executor = new UpdateRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + expect(agentPort.updateRecord).toHaveBeenCalledWith({ + collection: 'customers', + id: [42], + values: { status: 'active' }, + }); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + type: 'update-record', + executionParams: { displayName: 'Status', name: 'status', value: 'active' }, + executionResult: { updatedValues }, + pendingData: { displayName: 'Status', name: 'status', value: 'active' }, + }), + ); + }); + }); + + describe('confirmation rejected (Branch A)', () => { + it('skips the update when user rejects', async () => { + const agentPort = makeMockAgentPort(); + const execution: UpdateRecordStepExecutionData = { + type: 'update-record', + stepIndex: 0, + pendingData: { displayName: 'Status', name: 'status', value: 'active' }, + selectedRecordRef: makeRecordRef(), + }; + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([execution]), + }); + const userConfirmed = false; + const context = makeContext({ agentPort, runStore, userConfirmed }); + const executor = new UpdateRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + expect(agentPort.updateRecord).not.toHaveBeenCalled(); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + executionResult: { skipped: true }, + pendingData: { displayName: 'Status', name: 'status', value: 'active' }, + }), + ); + }); + }); + + describe('no pending update in phase 2 (Branch A)', () => { + it('returns error outcome when no pending update is found', async () => { + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([]), + }); + const userConfirmed = true; + const context = makeContext({ runStore, userConfirmed }); + const executor = new UpdateRecordStepExecutor(context); + + await expect(executor.execute()).resolves.toMatchObject({ + stepOutcome: { + type: 'record-task', + stepId: 'update-1', + stepIndex: 0, + status: 'error', + error: 'An unexpected error occurred while processing this step.', + }, + }); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + + it('returns error outcome when execution exists but stepIndex does not match', async () => { + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([ + { + type: 'update-record', + stepIndex: 5, + pendingData: { displayName: 'Status', name: 'status', value: 'active' }, + selectedRecordRef: makeRecordRef(), + }, + ]), + }); + const userConfirmed = true; + const context = makeContext({ runStore, userConfirmed }); + const executor = new UpdateRecordStepExecutor(context); + + await expect(executor.execute()).resolves.toMatchObject({ + stepOutcome: { + type: 'record-task', + stepId: 'update-1', + stepIndex: 0, + status: 'error', + error: 'An unexpected error occurred while processing this step.', + }, + }); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + + it('returns error outcome when execution exists but pendingData is absent', async () => { + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([ + { + type: 'update-record', + stepIndex: 0, + selectedRecordRef: makeRecordRef(), + }, + ]), + }); + const userConfirmed = true; + const context = makeContext({ runStore, userConfirmed }); + const executor = new UpdateRecordStepExecutor(context); + + await expect(executor.execute()).resolves.toMatchObject({ + stepOutcome: { + type: 'record-task', + stepId: 'update-1', + stepIndex: 0, + status: 'error', + error: 'An unexpected error occurred while processing this step.', + }, + }); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + }); + + describe('multi-record AI selection', () => { + it('uses AI to select among multiple records then selects field', async () => { + const baseRecordRef = makeRecordRef({ stepIndex: 1 }); + const relatedRecord = makeRecordRef({ + stepIndex: 2, + recordId: [99], + collectionName: 'orders', + }); + + const ordersSchema = makeCollectionSchema({ + collectionName: 'orders', + collectionDisplayName: 'Orders', + fields: [ + { fieldName: 'total', displayName: 'Total', isRelationship: false }, + { fieldName: 'status', displayName: 'Order Status', isRelationship: false }, + ], + }); + + // First call: select-record, second call: update-record-field + const invoke = jest + .fn() + .mockResolvedValueOnce({ + tool_calls: [ + { + name: 'select-record', + args: { recordIdentifier: 'Step 2 - Orders #99' }, + id: 'call_1', + }, + ], + }) + .mockResolvedValueOnce({ + tool_calls: [ + { + name: 'update-record-field', + args: { fieldName: 'Order Status', value: 'shipped', reasoning: 'Mark as shipped' }, + id: 'call_2', + }, + ], + }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const model = { bindTools } as unknown as ExecutionContext['model']; + + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([ + { + type: 'load-related-record', + stepIndex: 2, + executionResult: { + relation: { name: 'order', displayName: 'Order' }, + record: relatedRecord, + }, + selectedRecordRef: makeRecordRef(), + }, + ]), + }); + const workflowPort = makeMockWorkflowPort({ + customers: makeCollectionSchema(), + orders: ordersSchema, + }); + const context = makeContext({ baseRecordRef, model, runStore, workflowPort }); + const executor = new UpdateRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('awaiting-input'); + expect(bindTools).toHaveBeenCalledTimes(2); + + const selectTool = bindTools.mock.calls[0][0][0]; + expect(selectTool.name).toBe('select-record'); + + const updateTool = bindTools.mock.calls[1][0][0]; + expect(updateTool.name).toBe('update-record-field'); + + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + pendingData: { + displayName: 'Order Status', + name: 'status', + value: 'shipped', + }, + selectedRecordRef: expect.objectContaining({ + recordId: [99], + collectionName: 'orders', + }), + }), + ); + }); + }); + + describe('NoWritableFieldsError', () => { + it('returns error when all fields are relationships', async () => { + const schema = makeCollectionSchema({ + fields: [{ fieldName: 'orders', displayName: 'Orders', isRelationship: true }], + }); + const mockModel = makeMockModel({ + fieldName: 'Status', + value: 'active', + reasoning: 'test', + }); + const runStore = makeMockRunStore(); + const workflowPort = makeMockWorkflowPort({ customers: schema }); + const context = makeContext({ model: mockModel.model, runStore, workflowPort }); + const executor = new UpdateRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + 'This record type has no editable fields configured in Forest Admin.', + ); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + }); + + describe('resolveFieldName failure', () => { + it('returns error when field is not found during automaticExecution (Branch B)', async () => { + // AI returns a display name that doesn't match any field in the schema + const mockModel = makeMockModel({ + fieldName: 'NonExistentField', + value: 'test', + reasoning: 'test', + }); + const context = makeContext({ + model: mockModel.model, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new UpdateRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + "The AI selected a field that doesn't exist on this record. Try rephrasing the step's prompt.", + ); + }); + }); + + describe('relationship fields excluded from update tool', () => { + it('excludes relationship fields from the tool schema', async () => { + const mockModel = makeMockModel({ + fieldName: 'Status', + value: 'active', + reasoning: 'test', + }); + const context = makeContext({ model: mockModel.model }); + const executor = new UpdateRecordStepExecutor(context); + + await executor.execute(); + + // Second bindTools call is for update-record-field (first may be select-record) + const lastCall = mockModel.bindTools.mock.calls[mockModel.bindTools.mock.calls.length - 1]; + const tool = lastCall[0][0]; + expect(tool.name).toBe('update-record-field'); + + // Non-relationship display names should be accepted + expect(tool.schema.parse({ fieldName: 'Email', value: 'x', reasoning: 'r' })).toBeTruthy(); + expect(tool.schema.parse({ fieldName: 'Status', value: 'x', reasoning: 'r' })).toBeTruthy(); + expect( + tool.schema.parse({ fieldName: 'Full Name', value: 'x', reasoning: 'r' }), + ).toBeTruthy(); + + // Relationship display name should be rejected + expect(() => + tool.schema.parse({ fieldName: 'Orders', value: 'x', reasoning: 'r' }), + ).toThrow(); + }); + }); + + describe('AI malformed/missing tool call', () => { + it('returns error on malformed tool call', async () => { + const invoke = jest.fn().mockResolvedValue({ + tool_calls: [], + invalid_tool_calls: [ + { name: 'update-record-field', args: '{bad json', error: 'JSON parse error' }, + ], + }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const runStore = makeMockRunStore(); + const context = makeContext({ + model: { bindTools } as unknown as ExecutionContext['model'], + runStore, + }); + const executor = new UpdateRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.type).toBe('record-task'); + expect(result.stepOutcome.stepId).toBe('update-1'); + expect(result.stepOutcome.stepIndex).toBe(0); + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + "The AI returned an unexpected response. Try rephrasing the step's prompt.", + ); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + + it('returns error when AI returns no tool call', async () => { + const invoke = jest.fn().mockResolvedValue({ tool_calls: [] }); + const bindTools = jest.fn().mockReturnValue({ invoke }); + const runStore = makeMockRunStore(); + const context = makeContext({ + model: { bindTools } as unknown as ExecutionContext['model'], + runStore, + }); + const executor = new UpdateRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.type).toBe('record-task'); + expect(result.stepOutcome.stepId).toBe('update-1'); + expect(result.stepOutcome.stepIndex).toBe(0); + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + "The AI couldn't decide what to do. Try rephrasing the step's prompt.", + ); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + }); + + describe('agentPort.updateRecord WorkflowExecutorError (Branch B)', () => { + it('returns error when updateRecord throws WorkflowExecutorError', async () => { + const agentPort = makeMockAgentPort(); + (agentPort.updateRecord as jest.Mock).mockRejectedValue(new StepStateError('Record locked')); + const mockModel = makeMockModel({ + fieldName: 'Status', + value: 'active', + reasoning: 'test', + }); + const runStore = makeMockRunStore(); + const context = makeContext({ + model: mockModel.model, + agentPort, + runStore, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new UpdateRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.type).toBe('record-task'); + expect(result.stepOutcome.stepId).toBe('update-1'); + expect(result.stepOutcome.stepIndex).toBe(0); + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + 'An unexpected error occurred while processing this step.', + ); + }); + }); + + describe('agentPort.updateRecord WorkflowExecutorError (Branch A)', () => { + it('returns error when updateRecord throws WorkflowExecutorError during confirmation', async () => { + const agentPort = makeMockAgentPort(); + (agentPort.updateRecord as jest.Mock).mockRejectedValue(new StepStateError('Record locked')); + const execution: UpdateRecordStepExecutionData = { + type: 'update-record', + stepIndex: 0, + pendingData: { displayName: 'Status', name: 'status', value: 'active' }, + selectedRecordRef: makeRecordRef(), + }; + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([execution]), + }); + const userConfirmed = true; + const context = makeContext({ agentPort, runStore, userConfirmed }); + const executor = new UpdateRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.type).toBe('record-task'); + expect(result.stepOutcome.stepId).toBe('update-1'); + expect(result.stepOutcome.stepIndex).toBe(0); + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + 'An unexpected error occurred while processing this step.', + ); + }); + }); + + describe('agentPort.updateRecord infra error', () => { + it('returns error outcome for infrastructure errors (Branch B)', async () => { + const agentPort = makeMockAgentPort(); + (agentPort.updateRecord as jest.Mock).mockRejectedValue(new Error('Connection refused')); + const mockModel = makeMockModel({ + fieldName: 'Status', + value: 'active', + reasoning: 'test', + }); + const context = makeContext({ + model: mockModel.model, + agentPort, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new UpdateRecordStepExecutor(context); + + const result = await executor.execute(); + expect(result.stepOutcome.status).toBe('error'); + }); + + it('returns error outcome for infrastructure errors (Branch A)', async () => { + const agentPort = makeMockAgentPort(); + (agentPort.updateRecord as jest.Mock).mockRejectedValue(new Error('Connection refused')); + const execution: UpdateRecordStepExecutionData = { + type: 'update-record', + stepIndex: 0, + pendingData: { displayName: 'Status', name: 'status', value: 'active' }, + selectedRecordRef: makeRecordRef(), + }; + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([execution]), + }); + const userConfirmed = true; + const context = makeContext({ agentPort, runStore, userConfirmed }); + const executor = new UpdateRecordStepExecutor(context); + + const result = await executor.execute(); + expect(result.stepOutcome.status).toBe('error'); + }); + + it('returns user message and logs cause when agentPort.updateRecord throws an infra error', async () => { + const logger = { error: jest.fn() }; + const agentPort = makeMockAgentPort(); + (agentPort.updateRecord as jest.Mock).mockRejectedValue(new Error('DB connection lost')); + const mockModel = makeMockModel({ + fieldName: 'Status', + value: 'active', + reasoning: 'test', + }); + const context = makeContext({ + model: mockModel.model, + agentPort, + logger, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new UpdateRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + 'An error occurred while accessing your data. Please try again.', + ); + expect(logger.error).toHaveBeenCalledWith( + 'Agent port "updateRecord" failed: DB connection lost', + expect.objectContaining({ cause: 'DB connection lost' }), + ); + }); + }); + + describe('stepOutcome shape', () => { + it('emits correct type, stepId and stepIndex in the outcome', async () => { + const context = makeContext({ stepDefinition: makeStep({ automaticExecution: true }) }); + const executor = new UpdateRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome).toMatchObject({ + type: 'record-task', + stepId: 'update-1', + stepIndex: 0, + status: 'success', + }); + }); + }); + + describe('findField fieldName fallback', () => { + it('resolves update when AI returns raw fieldName instead of displayName', async () => { + const agentPort = makeMockAgentPort(); + // AI returns 'status' (fieldName) instead of 'Status' (displayName) + const mockModel = makeMockModel({ fieldName: 'status', value: 'active', reasoning: 'test' }); + const context = makeContext({ + model: mockModel.model, + agentPort, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new UpdateRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + expect(agentPort.updateRecord).toHaveBeenCalledWith({ + collection: 'customers', + id: [42], + values: { status: 'active' }, + }); + }); + }); + + describe('schema caching', () => { + it('fetches getCollectionSchema once per collection even when called twice (Branch B)', async () => { + const workflowPort = makeMockWorkflowPort(); + const context = makeContext({ + workflowPort, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new UpdateRecordStepExecutor(context); + + await executor.execute(); + + // resolveFieldName is called in handleFirstCall, so getCollectionSchema is only fetched once + expect(workflowPort.getCollectionSchema).toHaveBeenCalledTimes(1); + }); + }); + + describe('RunStore error propagation', () => { + it('returns error outcome when getStepExecutions fails (Branch A)', async () => { + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockRejectedValue(new Error('DB timeout')), + }); + const userConfirmed = true; + const context = makeContext({ runStore, userConfirmed }); + const executor = new UpdateRecordStepExecutor(context); + + const result = await executor.execute(); + expect(result.stepOutcome.status).toBe('error'); + }); + + it('returns error outcome when saveStepExecution fails on user reject (Branch A)', async () => { + const execution: UpdateRecordStepExecutionData = { + type: 'update-record', + stepIndex: 0, + pendingData: { displayName: 'Status', name: 'status', value: 'active' }, + selectedRecordRef: makeRecordRef(), + }; + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([execution]), + saveStepExecution: jest.fn().mockRejectedValue(new Error('Disk full')), + }); + const userConfirmed = false; + const context = makeContext({ runStore, userConfirmed }); + const executor = new UpdateRecordStepExecutor(context); + + const result = await executor.execute(); + expect(result.stepOutcome.status).toBe('error'); + }); + + it('returns error outcome when saveStepExecution fails saving awaiting-input (Branch C)', async () => { + const runStore = makeMockRunStore({ + saveStepExecution: jest.fn().mockRejectedValue(new Error('Disk full')), + }); + const context = makeContext({ runStore }); + const executor = new UpdateRecordStepExecutor(context); + + const result = await executor.execute(); + expect(result.stepOutcome.status).toBe('error'); + }); + + it('returns error outcome after successful updateRecord when saveStepExecution fails (Branch B)', async () => { + const runStore = makeMockRunStore({ + saveStepExecution: jest.fn().mockRejectedValue(new Error('Disk full')), + }); + const context = makeContext({ + runStore, + stepDefinition: makeStep({ automaticExecution: true }), + }); + const executor = new UpdateRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe('The step result could not be saved. Please retry.'); + }); + }); + + describe('default prompt', () => { + it('uses default prompt when step.prompt is undefined', async () => { + const mockModel = makeMockModel({ + fieldName: 'Status', + value: 'active', + reasoning: 'test', + }); + const context = makeContext({ + model: mockModel.model, + stepDefinition: makeStep({ prompt: undefined }), + }); + const executor = new UpdateRecordStepExecutor(context); + + await executor.execute(); + + const messages = mockModel.invoke.mock.calls[0][0]; + const humanMessage = messages[messages.length - 1]; + expect(humanMessage.content).toBe('**Request**: Update the relevant field.'); + }); + }); + + describe('previous steps context', () => { + it('includes previous steps summary in update-field messages', async () => { + const mockModel = makeMockModel({ + fieldName: 'Status', + value: 'active', + reasoning: 'test', + }); + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([ + { + type: 'condition', + stepIndex: 0, + executionParams: { answer: 'Yes', reasoning: 'Approved' }, + }, + ]), + }); + const context = makeContext({ + model: mockModel.model, + runStore, + previousSteps: [ + { + stepDefinition: { + type: StepType.Condition, + options: ['Yes', 'No'], + prompt: 'Should we proceed?', + }, + stepOutcome: { + type: 'condition', + stepId: 'prev-step', + stepIndex: 0, + status: 'success', + }, + }, + ], + }); + const executor = new UpdateRecordStepExecutor({ + ...context, + stepId: 'update-2', + stepIndex: 1, + }); + + await executor.execute(); + + const messages = mockModel.invoke.mock.calls[0][0]; + // previous steps summary + system prompt + collection info + human message = 4 + expect(messages).toHaveLength(4); + expect(messages[0].content).toContain('Should we proceed?'); + expect(messages[0].content).toContain('"answer":"Yes"'); + expect(messages[1].content).toContain('updating a field on a record'); + }); + }); +}); diff --git a/packages/workflow-executor/test/http/executor-http-server.test.ts b/packages/workflow-executor/test/http/executor-http-server.test.ts index f4d415b4c8..e660b926a6 100644 --- a/packages/workflow-executor/test/http/executor-http-server.test.ts +++ b/packages/workflow-executor/test/http/executor-http-server.test.ts @@ -3,6 +3,7 @@ import type Runner from '../../src/runner'; import request from 'supertest'; +import { RunNotFoundError } from '../../src/errors'; import ExecutorHttpServer from '../../src/http/executor-http-server'; function createMockRunStore(overrides: Partial = {}): RunStore { @@ -58,7 +59,7 @@ describe('ExecutorHttpServer', () => { const response = await request(server.callback).get('/runs/run-1'); expect(response.status).toBe(500); - expect(response.body).toEqual({ error: 'db error' }); + expect(response.body).toEqual({ error: 'Internal server error' }); }); }); @@ -79,9 +80,26 @@ describe('ExecutorHttpServer', () => { expect(runner.triggerPoll).toHaveBeenCalledWith('run-1'); }); - it('should propagate errors from runner', async () => { + it('returns 404 when triggerPoll rejects with RunNotFoundError', async () => { const runner = createMockRunner({ - triggerPoll: jest.fn().mockRejectedValue(new Error('poll failed')), + triggerPoll: jest.fn().mockRejectedValue(new RunNotFoundError('run-1')), + }); + + const server = new ExecutorHttpServer({ + port: 0, + runStore: createMockRunStore(), + runner, + }); + + const response = await request(server.callback).post('/runs/run-1/trigger'); + + expect(response.status).toBe(404); + expect(response.body).toEqual({ error: 'Run not found or unavailable' }); + }); + + it('returns 500 when triggerPoll rejects with an unexpected error', async () => { + const runner = createMockRunner({ + triggerPoll: jest.fn().mockRejectedValue(new Error('unexpected')), }); const server = new ExecutorHttpServer({ @@ -93,7 +111,7 @@ describe('ExecutorHttpServer', () => { const response = await request(server.callback).post('/runs/run-1/trigger'); expect(response.status).toBe(500); - expect(response.body).toEqual({ error: 'poll failed' }); + expect(response.body).toEqual({ error: 'Internal server error' }); }); }); diff --git a/packages/workflow-executor/test/index.test.ts b/packages/workflow-executor/test/index.test.ts index 05affa035c..1267b1cbbd 100644 --- a/packages/workflow-executor/test/index.test.ts +++ b/packages/workflow-executor/test/index.test.ts @@ -1,9 +1,9 @@ import { StepType } from '../src/index'; describe('StepType', () => { - it('should expose exactly 5 step types', () => { + it('should expose exactly 6 step types', () => { const values = Object.values(StepType); - expect(values).toHaveLength(5); + expect(values).toHaveLength(6); }); it.each([ @@ -12,6 +12,7 @@ describe('StepType', () => { ['UpdateRecord', 'update-record'], ['TriggerAction', 'trigger-action'], ['LoadRelatedRecord', 'load-related-record'], + ['McpTask', 'mcp-task'], ] as const)('should have %s = "%s"', (key, value) => { expect(StepType[key]).toBe(value); }); diff --git a/packages/workflow-executor/test/runner.test.ts b/packages/workflow-executor/test/runner.test.ts index 0ea16bd276..52ba1c0c3b 100644 --- a/packages/workflow-executor/test/runner.test.ts +++ b/packages/workflow-executor/test/runner.test.ts @@ -1,96 +1,794 @@ +import type { StepContextConfig } from '../src/executors/step-executor-factory'; import type { AgentPort } from '../src/ports/agent-port'; +import type { Logger } from '../src/ports/logger-port'; import type { RunStore } from '../src/ports/run-store'; import type { WorkflowPort } from '../src/ports/workflow-port'; +import type { PendingStepExecution } from '../src/types/execution'; +import type { StepDefinition } from '../src/types/step-definition'; +import type { AiClient, BaseChatModel } from '@forestadmin/ai-proxy'; +import { RunNotFoundError } from '../src/errors'; +import BaseStepExecutor from '../src/executors/base-step-executor'; +import ConditionStepExecutor from '../src/executors/condition-step-executor'; +import LoadRelatedRecordStepExecutor from '../src/executors/load-related-record-step-executor'; +import McpTaskStepExecutor from '../src/executors/mcp-task-step-executor'; +import ReadRecordStepExecutor from '../src/executors/read-record-step-executor'; +import StepExecutorFactory from '../src/executors/step-executor-factory'; +import TriggerRecordActionStepExecutor from '../src/executors/trigger-record-action-step-executor'; +import UpdateRecordStepExecutor from '../src/executors/update-record-step-executor'; import ExecutorHttpServer from '../src/http/executor-http-server'; import Runner from '../src/runner'; +import { StepType } from '../src/types/step-definition'; jest.mock('../src/http/executor-http-server'); const MockedExecutorHttpServer = ExecutorHttpServer as jest.MockedClass; -function createRunnerConfig(overrides: { httpPort?: number } = {}) { +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +const POLLING_INTERVAL_MS = 1000; + +const flushPromises = async () => { + await Promise.resolve(); + await Promise.resolve(); + await Promise.resolve(); +}; + +function createMockWorkflowPort(): jest.Mocked { + return { + getPendingStepExecutions: jest.fn().mockResolvedValue([]), + getPendingStepExecutionsForRun: jest.fn(), + updateStepExecution: jest.fn().mockResolvedValue(undefined), + getCollectionSchema: jest.fn(), + getMcpServerConfigs: jest.fn().mockResolvedValue([]), + }; +} + +function createMockAiClient() { + return { + getModel: jest.fn().mockReturnValue({} as BaseChatModel), + loadRemoteTools: jest.fn().mockResolvedValue([]), + closeConnections: jest.fn().mockResolvedValue(undefined), + }; +} + +function createMockLogger(): jest.Mocked { + return { error: jest.fn() }; +} + +function createRunnerConfig( + overrides: Partial<{ + workflowPort: WorkflowPort; + aiClient: AiClient; + logger: Logger; + httpPort: number; + }> = {}, +) { return { agentPort: {} as AgentPort, - workflowPort: {} as WorkflowPort, + workflowPort: createMockWorkflowPort(), runStore: {} as RunStore, - pollingIntervalMs: 2000, + pollingIntervalMs: POLLING_INTERVAL_MS, + aiClient: createMockAiClient() as unknown as AiClient, + logger: createMockLogger(), ...overrides, }; } -describe('Runner', () => { - beforeEach(() => { - jest.clearAllMocks(); - MockedExecutorHttpServer.prototype.start = jest.fn().mockResolvedValue(undefined); - MockedExecutorHttpServer.prototype.stop = jest.fn().mockResolvedValue(undefined); +function makeStepDefinition(stepType: StepType): StepDefinition { + if (stepType === StepType.Condition) { + return { type: StepType.Condition, options: ['opt1', 'opt2'] }; + } + + if (stepType === StepType.McpTask) { + return { type: StepType.McpTask }; + } + + return { type: stepType as Exclude }; +} + +function makePendingStep( + overrides: Partial & { stepType?: StepType } = {}, +): PendingStepExecution { + const { stepType = StepType.ReadRecord, ...rest } = overrides; + + return { + runId: 'run-1', + stepId: 'step-1', + stepIndex: 0, + baseRecordRef: { collectionName: 'customers', recordId: ['1'], stepIndex: 0 }, + stepDefinition: makeStepDefinition(stepType), + previousSteps: [], + ...rest, + }; +} + +// --------------------------------------------------------------------------- +// Test setup +// --------------------------------------------------------------------------- + +let executeSpy: jest.SpyInstance; +let runner: Runner; + +beforeAll(() => { + jest.useFakeTimers(); +}); + +afterAll(() => { + jest.useRealTimers(); +}); + +beforeEach(() => { + jest.clearAllMocks(); + jest.clearAllTimers(); + + MockedExecutorHttpServer.prototype.start = jest.fn().mockResolvedValue(undefined); + MockedExecutorHttpServer.prototype.stop = jest.fn().mockResolvedValue(undefined); + + executeSpy = jest.spyOn(BaseStepExecutor.prototype, 'execute').mockResolvedValue({ + stepOutcome: { type: 'record-task', stepId: 'step-1', stepIndex: 0, status: 'success' }, }); +}); + +afterEach(async () => { + // eslint-disable-next-line @typescript-eslint/no-unnecessary-condition + if (runner) { + await runner.stop(); + (runner as Runner | undefined) = undefined; + } + + jest.clearAllTimers(); +}); + +// --------------------------------------------------------------------------- +// HTTP server (existing tests, kept passing) +// --------------------------------------------------------------------------- - describe('start', () => { - it('should start the HTTP server when httpPort is configured', async () => { - const config = createRunnerConfig({ httpPort: 3100 }); - const runner = new Runner(config); +describe('start', () => { + it('should start the HTTP server when httpPort is configured', async () => { + const config = createRunnerConfig({ httpPort: 3100 }); + runner = new Runner(config); - await runner.start(); + await runner.start(); - expect(MockedExecutorHttpServer).toHaveBeenCalledWith({ - port: 3100, - runStore: config.runStore, - runner, - }); - expect(MockedExecutorHttpServer.prototype.start).toHaveBeenCalled(); + expect(MockedExecutorHttpServer).toHaveBeenCalledWith({ + port: 3100, + runStore: config.runStore, + runner, }); + expect(MockedExecutorHttpServer.prototype.start).toHaveBeenCalled(); + }); + + it('should not start the HTTP server when httpPort is not configured', async () => { + runner = new Runner(createRunnerConfig()); + + await runner.start(); + + expect(MockedExecutorHttpServer).not.toHaveBeenCalled(); + }); + + it('should not create a second HTTP server if already started', async () => { + runner = new Runner(createRunnerConfig({ httpPort: 3100 })); + + await runner.start(); + await runner.start(); + + expect(MockedExecutorHttpServer).toHaveBeenCalledTimes(1); + }); +}); + +describe('stop', () => { + it('should stop the HTTP server when running', async () => { + runner = new Runner(createRunnerConfig({ httpPort: 3100 })); + + await runner.start(); + await runner.stop(); + + expect(MockedExecutorHttpServer.prototype.stop).toHaveBeenCalled(); + }); + + it('should handle stop when no HTTP server is running', async () => { + runner = new Runner(createRunnerConfig()); + + await expect(runner.stop()).resolves.toBeUndefined(); + }); + + it('should allow restarting after stop', async () => { + runner = new Runner(createRunnerConfig({ httpPort: 3100 })); + + await runner.start(); + await runner.stop(); + await runner.start(); + + expect(MockedExecutorHttpServer).toHaveBeenCalledTimes(2); + }); +}); + +// --------------------------------------------------------------------------- +// Polling loop +// --------------------------------------------------------------------------- + +describe('polling loop', () => { + it('schedules a poll after pollingIntervalMs', async () => { + const workflowPort = createMockWorkflowPort(); + runner = new Runner(createRunnerConfig({ workflowPort })); + await runner.start(); + + expect(workflowPort.getPendingStepExecutions).not.toHaveBeenCalled(); + + jest.advanceTimersByTime(POLLING_INTERVAL_MS); + await flushPromises(); + + expect(workflowPort.getPendingStepExecutions).toHaveBeenCalledTimes(1); + }); + + it('reschedules automatically after each cycle', async () => { + const workflowPort = createMockWorkflowPort(); + runner = new Runner(createRunnerConfig({ workflowPort })); + await runner.start(); + + jest.advanceTimersByTime(POLLING_INTERVAL_MS); + await flushPromises(); + expect(workflowPort.getPendingStepExecutions).toHaveBeenCalledTimes(1); + + jest.advanceTimersByTime(POLLING_INTERVAL_MS); + await flushPromises(); + expect(workflowPort.getPendingStepExecutions).toHaveBeenCalledTimes(2); + }); + + it('stop() prevents scheduling a new cycle', async () => { + const workflowPort = createMockWorkflowPort(); + runner = new Runner(createRunnerConfig({ workflowPort })); + await runner.start(); + + jest.advanceTimersByTime(POLLING_INTERVAL_MS); + await flushPromises(); + expect(workflowPort.getPendingStepExecutions).toHaveBeenCalledTimes(1); + + await runner.stop(); + + jest.advanceTimersByTime(POLLING_INTERVAL_MS * 3); + await flushPromises(); + + expect(workflowPort.getPendingStepExecutions).toHaveBeenCalledTimes(1); + }); + + it('stop() clears the pending timer', async () => { + const workflowPort = createMockWorkflowPort(); + runner = new Runner(createRunnerConfig({ workflowPort })); + await runner.start(); + + await runner.stop(); + + jest.advanceTimersByTime(POLLING_INTERVAL_MS); + await flushPromises(); + + expect(workflowPort.getPendingStepExecutions).not.toHaveBeenCalled(); + }); + + it('calling start() twice does not schedule two timers', async () => { + const workflowPort = createMockWorkflowPort(); + runner = new Runner(createRunnerConfig({ workflowPort })); + + await runner.start(); + await runner.start(); + + jest.advanceTimersByTime(POLLING_INTERVAL_MS); + await flushPromises(); + + expect(workflowPort.getPendingStepExecutions).toHaveBeenCalledTimes(1); + }); +}); + +// --------------------------------------------------------------------------- +// Deduplication +// --------------------------------------------------------------------------- + +describe('deduplication', () => { + it('skips a step whose key is already in inFlightSteps', async () => { + const workflowPort = createMockWorkflowPort(); + const step = makePendingStep({ runId: 'run-1', stepId: 'inflight-step' }); + workflowPort.getPendingStepExecutionsForRun.mockResolvedValue(step); + + // Block the first execution so the key stays in-flight + const unblockRef = { fn: (): void => {} }; + executeSpy.mockReturnValueOnce( + new Promise(resolve => { + unblockRef.fn = () => + resolve({ + stepOutcome: { + type: 'record-task', + stepId: 'inflight-step', + stepIndex: 0, + status: 'success', + }, + }); + }), + ); + + runner = new Runner(createRunnerConfig({ workflowPort })); + + const poll1 = runner.triggerPoll('run-1'); + await Promise.resolve(); // let getPendingStepExecutionsForRun resolve and step key get added + + // Second poll: step is in-flight → should be skipped + await runner.triggerPoll('run-1'); + + expect(executeSpy).toHaveBeenCalledTimes(1); + + unblockRef.fn(); + await poll1; + }); + + it('removes the step key after successful execution', async () => { + const workflowPort = createMockWorkflowPort(); + const step = makePendingStep({ runId: 'run-1', stepId: 'step-dedup' }); + workflowPort.getPendingStepExecutionsForRun.mockResolvedValue(step); - it('should not start the HTTP server when httpPort is not configured', async () => { - const runner = new Runner(createRunnerConfig()); + runner = new Runner(createRunnerConfig({ workflowPort })); - await runner.start(); + await runner.triggerPoll('run-1'); + await runner.triggerPoll('run-1'); - expect(MockedExecutorHttpServer).not.toHaveBeenCalled(); + expect(executeSpy).toHaveBeenCalledTimes(2); + }); + + it('removes the step key even when executor construction fails', async () => { + const workflowPort = createMockWorkflowPort(); + const aiClient = createMockAiClient(); + const step = makePendingStep({ runId: 'run-1', stepId: 'step-throws' }); + workflowPort.getPendingStepExecutionsForRun.mockResolvedValue(step); + aiClient.getModel.mockImplementationOnce(() => { + throw new Error('construction error'); }); - it('should not create a second HTTP server if already started', async () => { - const runner = new Runner(createRunnerConfig({ httpPort: 3100 })); + runner = new Runner( + createRunnerConfig({ workflowPort, aiClient: aiClient as unknown as AiClient }), + ); + + await runner.triggerPoll('run-1'); + await runner.triggerPoll('run-1'); + + // Both polls completed: the step key was cleared after the first (failed) execution + expect(workflowPort.updateStepExecution).toHaveBeenCalledTimes(2); + // First poll produced an error outcome from the construction failure + expect(workflowPort.updateStepExecution).toHaveBeenNthCalledWith( + 1, + 'run-1', + expect.objectContaining({ status: 'error', error: 'An unexpected error occurred.' }), + ); + }); +}); + +// --------------------------------------------------------------------------- +// triggerPoll +// --------------------------------------------------------------------------- + +describe('triggerPoll', () => { + it('calls getPendingStepExecutionsForRun with the given runId and executes the step', async () => { + const workflowPort = createMockWorkflowPort(); + const step = makePendingStep({ runId: 'run-A', stepId: 'step-a' }); + workflowPort.getPendingStepExecutionsForRun.mockResolvedValue(step); + + runner = new Runner(createRunnerConfig({ workflowPort })); + await runner.triggerPoll('run-A'); + + expect(workflowPort.getPendingStepExecutionsForRun).toHaveBeenCalledWith('run-A'); + expect(workflowPort.getPendingStepExecutions).not.toHaveBeenCalled(); + expect(executeSpy).toHaveBeenCalledTimes(1); + expect(workflowPort.updateStepExecution).toHaveBeenCalledWith('run-A', expect.anything()); + }); + + it('skips in-flight steps', async () => { + const workflowPort = createMockWorkflowPort(); + const step = makePendingStep({ runId: 'run-1', stepId: 'step-inflight' }); + workflowPort.getPendingStepExecutionsForRun.mockResolvedValue(step); + + const unblockRef = { fn: (): void => {} }; + executeSpy.mockReturnValueOnce( + new Promise(resolve => { + unblockRef.fn = () => + resolve({ + stepOutcome: { + type: 'record-task', + stepId: 'step-inflight', + stepIndex: 0, + status: 'success', + }, + }); + }), + ); + + runner = new Runner(createRunnerConfig({ workflowPort })); + + const poll1 = runner.triggerPoll('run-1'); + await Promise.resolve(); + + await runner.triggerPoll('run-1'); + + expect(executeSpy).toHaveBeenCalledTimes(1); + + unblockRef.fn(); + await poll1; + }); + + it('resolves after the step has settled', async () => { + const workflowPort = createMockWorkflowPort(); + const step = makePendingStep({ runId: 'run-1', stepId: 'step-a' }); + workflowPort.getPendingStepExecutionsForRun.mockResolvedValue(step); + + runner = new Runner(createRunnerConfig({ workflowPort })); + + await expect(runner.triggerPoll('run-1')).resolves.toBeUndefined(); + expect(executeSpy).toHaveBeenCalledTimes(1); + }); + + it('rejects with RunNotFoundError when getPendingStepExecutionsForRun returns null', async () => { + const workflowPort = createMockWorkflowPort(); + workflowPort.getPendingStepExecutionsForRun.mockResolvedValue(null); - await runner.start(); - await runner.start(); + runner = new Runner(createRunnerConfig({ workflowPort })); - expect(MockedExecutorHttpServer).toHaveBeenCalledTimes(1); + await expect(runner.triggerPoll('run-1')).rejects.toThrow(RunNotFoundError); + }); + + it('propagates errors from getPendingStepExecutionsForRun as-is', async () => { + const workflowPort = createMockWorkflowPort(); + workflowPort.getPendingStepExecutionsForRun.mockRejectedValue(new Error('Network error')); + + runner = new Runner(createRunnerConfig({ workflowPort })); + + await expect(runner.triggerPoll('run-1')).rejects.toThrow('Network error'); + }); +}); + +// --------------------------------------------------------------------------- +// MCP lazy loading +// --------------------------------------------------------------------------- + +describe('MCP lazy loading (via once thunk)', () => { + it('does not call fetchRemoteTools when there are no McpTask steps', async () => { + const workflowPort = createMockWorkflowPort(); + const aiClient = createMockAiClient(); + const step = makePendingStep({ runId: 'run-1', stepType: StepType.ReadRecord }); + workflowPort.getPendingStepExecutionsForRun.mockResolvedValue(step); + + runner = new Runner( + createRunnerConfig({ workflowPort, aiClient: aiClient as unknown as AiClient }), + ); + await runner.triggerPoll('run-1'); + + expect(workflowPort.getMcpServerConfigs).not.toHaveBeenCalled(); + expect(aiClient.loadRemoteTools).not.toHaveBeenCalled(); + }); + + it('calls fetchRemoteTools once for an McpTask step', async () => { + const workflowPort = createMockWorkflowPort(); + const aiClient = createMockAiClient(); + const step = makePendingStep({ + runId: 'run-1', + stepId: 'step-mcp-1', + stepType: StepType.McpTask, }); + workflowPort.getPendingStepExecutionsForRun.mockResolvedValue(step); + // Provide a non-empty config so fetchRemoteTools actually calls loadRemoteTools + workflowPort.getMcpServerConfigs.mockResolvedValue([{ configs: {} }] as never); + + runner = new Runner( + createRunnerConfig({ workflowPort, aiClient: aiClient as unknown as AiClient }), + ); + await runner.triggerPoll('run-1'); + + expect(workflowPort.getMcpServerConfigs).toHaveBeenCalledTimes(1); + expect(aiClient.loadRemoteTools).toHaveBeenCalledTimes(1); + }); +}); + +// --------------------------------------------------------------------------- +// getExecutor — factory +// --------------------------------------------------------------------------- + +describe('StepExecutorFactory.create — factory', () => { + const makeContextConfig = (): StepContextConfig => ({ + aiClient: { + getModel: jest.fn().mockReturnValue({} as BaseChatModel), + } as unknown as AiClient, + agentPort: {} as AgentPort, + workflowPort: {} as WorkflowPort, + runStore: {} as RunStore, + logger: { error: jest.fn() }, }); - describe('stop', () => { - it('should stop the HTTP server when running', async () => { - const runner = new Runner(createRunnerConfig({ httpPort: 3100 })); + it('dispatches Condition steps to ConditionStepExecutor', async () => { + const step = makePendingStep({ stepType: StepType.Condition }); + const executor = await StepExecutorFactory.create(step, makeContextConfig(), jest.fn()); + expect(executor).toBeInstanceOf(ConditionStepExecutor); + }); - await runner.start(); - await runner.stop(); + it('dispatches ReadRecord steps to ReadRecordStepExecutor', async () => { + const step = makePendingStep({ stepType: StepType.ReadRecord }); + const executor = await StepExecutorFactory.create(step, makeContextConfig(), jest.fn()); + expect(executor).toBeInstanceOf(ReadRecordStepExecutor); + }); - expect(MockedExecutorHttpServer.prototype.stop).toHaveBeenCalled(); + it('dispatches UpdateRecord steps to UpdateRecordStepExecutor', async () => { + const step = makePendingStep({ stepType: StepType.UpdateRecord }); + const executor = await StepExecutorFactory.create(step, makeContextConfig(), jest.fn()); + expect(executor).toBeInstanceOf(UpdateRecordStepExecutor); + }); + + it('dispatches TriggerAction steps to TriggerRecordActionStepExecutor', async () => { + const step = makePendingStep({ stepType: StepType.TriggerAction }); + const executor = await StepExecutorFactory.create(step, makeContextConfig(), jest.fn()); + expect(executor).toBeInstanceOf(TriggerRecordActionStepExecutor); + }); + + it('dispatches LoadRelatedRecord steps to LoadRelatedRecordStepExecutor', async () => { + const step = makePendingStep({ stepType: StepType.LoadRelatedRecord }); + const executor = await StepExecutorFactory.create(step, makeContextConfig(), jest.fn()); + expect(executor).toBeInstanceOf(LoadRelatedRecordStepExecutor); + }); + + it('dispatches McpTask steps to McpTaskStepExecutor and calls loadTools', async () => { + const step = makePendingStep({ stepType: StepType.McpTask }); + const loadTools = jest.fn().mockResolvedValue([]); + const executor = await StepExecutorFactory.create(step, makeContextConfig(), loadTools); + expect(executor).toBeInstanceOf(McpTaskStepExecutor); + expect(loadTools).toHaveBeenCalledTimes(1); + }); + + it('returns an executor with an error outcome for an unknown step type', async () => { + const step = { + ...makePendingStep(), + stepDefinition: { type: 'unknown-type' as StepType }, + } as unknown as PendingStepExecution; + const executor = await StepExecutorFactory.create(step, makeContextConfig(), jest.fn()); + const { stepOutcome } = await executor.execute(); + expect(stepOutcome.status).toBe('error'); + expect(stepOutcome.error).toBe('An unexpected error occurred.'); + }); + + it('returns an executor with an error outcome when loadTools rejects for a McpTask step', async () => { + const step = makePendingStep({ stepType: StepType.McpTask }); + const loadTools = jest.fn().mockRejectedValueOnce(new Error('MCP server down')); + const executor = await StepExecutorFactory.create(step, makeContextConfig(), loadTools); + const { stepOutcome } = await executor.execute(); + expect(stepOutcome.status).toBe('error'); + expect(stepOutcome.type).toBe('mcp-task'); + expect(stepOutcome.error).toBe('An unexpected error occurred.'); + }); + + it('logs cause message when construction error has an Error cause', async () => { + const rootCause = new Error('root cause'); + const error = new Error('wrapper'); + (error as Error & { cause: Error }).cause = rootCause; + const logger = { error: jest.fn() }; + const contextConfig: StepContextConfig = { + ...makeContextConfig(), + aiClient: { + getModel: jest.fn().mockImplementationOnce(() => { + throw error; + }), + } as unknown as AiClient, + logger, + }; + + await StepExecutorFactory.create(makePendingStep(), contextConfig, jest.fn()); + + expect(logger.error).toHaveBeenCalledWith( + 'Step execution failed unexpectedly', + expect.objectContaining({ cause: 'root cause' }), + ); + }); + + it('logs cause as undefined when construction error cause is not an Error instance', async () => { + const error = new Error('wrapper'); + (error as Error & { cause: string }).cause = 'plain string'; + const logger = { error: jest.fn() }; + const contextConfig: StepContextConfig = { + ...makeContextConfig(), + aiClient: { + getModel: jest.fn().mockImplementationOnce(() => { + throw error; + }), + } as unknown as AiClient, + logger, + }; + + await StepExecutorFactory.create(makePendingStep(), contextConfig, jest.fn()); + + expect(logger.error).toHaveBeenCalledWith( + 'Step execution failed unexpectedly', + expect.objectContaining({ cause: undefined }), + ); + }); +}); + +// --------------------------------------------------------------------------- +// Error handling +// --------------------------------------------------------------------------- + +describe('error handling', () => { + it('reports a fallback error outcome when buildContext throws (getModel throws)', async () => { + const workflowPort = createMockWorkflowPort(); + const mockLogger = createMockLogger(); + const aiClient = createMockAiClient(); + const step = makePendingStep({ runId: 'run-1', stepId: 'step-err' }); + workflowPort.getPendingStepExecutionsForRun.mockResolvedValue(step); + aiClient.getModel.mockImplementationOnce(() => { + throw new Error('AI not configured'); }); - it('should handle stop when no HTTP server is running', async () => { - const runner = new Runner(createRunnerConfig()); + runner = new Runner( + createRunnerConfig({ + workflowPort, + aiClient: aiClient as unknown as AiClient, + logger: mockLogger, + }), + ); + await runner.triggerPoll('run-1'); - await expect(runner.stop()).resolves.toBeUndefined(); + expect(mockLogger.error).toHaveBeenCalledWith( + 'Step execution failed unexpectedly', + expect.objectContaining({ + runId: 'run-1', + stepId: 'step-err', + stepIndex: 0, + error: 'AI not configured', + }), + ); + expect(workflowPort.updateStepExecution).toHaveBeenCalledWith('run-1', { + type: 'record-task', + stepId: 'step-err', + stepIndex: 0, + status: 'error', + error: 'An unexpected error occurred.', }); + }); - it('should allow restarting after stop', async () => { - const runner = new Runner(createRunnerConfig({ httpPort: 3100 })); + it('reports type mcp-task in fallback error outcome for McpTask steps', async () => { + const workflowPort = createMockWorkflowPort(); + const aiClient = createMockAiClient(); + const step = makePendingStep({ + runId: 'run-1', + stepId: 'step-mcp-err', + stepType: StepType.McpTask, + }); + workflowPort.getPendingStepExecutionsForRun.mockResolvedValue(step); + aiClient.getModel.mockImplementationOnce(() => { + throw new Error('AI not configured'); + }); - await runner.start(); - await runner.stop(); - await runner.start(); + runner = new Runner( + createRunnerConfig({ workflowPort, aiClient: aiClient as unknown as AiClient }), + ); + await runner.triggerPoll('run-1'); - expect(MockedExecutorHttpServer).toHaveBeenCalledTimes(2); + expect(workflowPort.updateStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ type: 'mcp-task', status: 'error' }), + ); + }); + + it('logs unexpected errors with runId, stepId, and stack when getModel throws', async () => { + const workflowPort = createMockWorkflowPort(); + const mockLogger = createMockLogger(); + const aiClient = createMockAiClient(); + const error = new Error('something blew up'); + const step = makePendingStep({ runId: 'run-2', stepId: 'step-log' }); + workflowPort.getPendingStepExecutionsForRun.mockResolvedValue(step); + aiClient.getModel.mockImplementationOnce(() => { + throw error; + }); + + runner = new Runner( + createRunnerConfig({ + workflowPort, + aiClient: aiClient as unknown as AiClient, + logger: mockLogger, + }), + ); + await runner.triggerPoll('run-2'); + + expect(mockLogger.error).toHaveBeenCalledWith( + 'Step execution failed unexpectedly', + expect.objectContaining({ + runId: 'run-2', + stepId: 'step-log', + stepIndex: 0, + error: 'something blew up', + stack: expect.any(String), + }), + ); + }); + + it('does not re-throw if updateStepExecution fails after a construction error', async () => { + const workflowPort = createMockWorkflowPort(); + const aiClient = createMockAiClient(); + const step = makePendingStep({ runId: 'run-1', stepId: 'step-fallback' }); + workflowPort.getPendingStepExecutionsForRun.mockResolvedValue(step); + aiClient.getModel.mockImplementationOnce(() => { + throw new Error('construction error'); }); + workflowPort.updateStepExecution.mockRejectedValueOnce(new Error('update failed')); + + runner = new Runner( + createRunnerConfig({ workflowPort, aiClient: aiClient as unknown as AiClient }), + ); + + await expect(runner.triggerPoll('run-1')).resolves.toBeUndefined(); }); - describe('triggerPoll', () => { - it('should resolve without error', async () => { - const runner = new Runner(createRunnerConfig()); + it('logs FATAL and does not call updateStepExecution if executor.execute() rejects', async () => { + const workflowPort = createMockWorkflowPort(); + const mockLogger = createMockLogger(); + const step = makePendingStep({ runId: 'run-1', stepId: 'step-fatal' }); + workflowPort.getPendingStepExecutionsForRun.mockResolvedValue(step); - await expect(runner.triggerPoll('run-1')).resolves.toBeUndefined(); + // Simulate a broken executor that violates the never-throw contract + jest.spyOn(StepExecutorFactory, 'create').mockResolvedValueOnce({ + execute: jest.fn().mockRejectedValueOnce(new Error('contract violated')), }); + + runner = new Runner(createRunnerConfig({ workflowPort, logger: mockLogger })); + await runner.triggerPoll('run-1'); + + expect(mockLogger.error).toHaveBeenCalledWith( + 'FATAL: executor contract violated — step outcome not reported', + expect.objectContaining({ + runId: 'run-1', + stepId: 'step-fatal', + error: 'contract violated', + }), + ); + expect(workflowPort.updateStepExecution).not.toHaveBeenCalled(); + }); + + it('reports an outcome when getModel throws a non-Error throwable', async () => { + const workflowPort = createMockWorkflowPort(); + const aiClient = createMockAiClient(); + const step = makePendingStep({ runId: 'run-1', stepId: 'step-string-throw' }); + workflowPort.getPendingStepExecutionsForRun.mockResolvedValue(step); + aiClient.getModel.mockImplementationOnce(() => { + // eslint-disable-next-line @typescript-eslint/no-throw-literal + throw 'plain string error'; + }); + + runner = new Runner( + createRunnerConfig({ workflowPort, aiClient: aiClient as unknown as AiClient }), + ); + await runner.triggerPoll('run-1'); + + expect(workflowPort.updateStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ status: 'error', error: 'An unexpected error occurred.' }), + ); + }); + + it('catches getPendingStepExecutions failure, logs it, and reschedules', async () => { + const workflowPort = createMockWorkflowPort(); + const mockLogger = createMockLogger(); + workflowPort.getPendingStepExecutions + .mockRejectedValueOnce(new Error('network error')) + .mockResolvedValue([]); + + runner = new Runner(createRunnerConfig({ workflowPort, logger: mockLogger })); + await runner.start(); + + jest.advanceTimersByTime(POLLING_INTERVAL_MS); + await flushPromises(); + + expect(mockLogger.error).toHaveBeenCalledWith( + 'Poll cycle failed', + expect.objectContaining({ error: 'network error' }), + ); + + // After the error, the cycle should have been rescheduled + jest.advanceTimersByTime(POLLING_INTERVAL_MS); + await flushPromises(); + + expect(workflowPort.getPendingStepExecutions).toHaveBeenCalledTimes(2); }); }); diff --git a/packages/workflow-executor/test/types/step-outcome.test.ts b/packages/workflow-executor/test/types/step-outcome.test.ts new file mode 100644 index 0000000000..e044e79f84 --- /dev/null +++ b/packages/workflow-executor/test/types/step-outcome.test.ts @@ -0,0 +1,32 @@ +import { StepType } from '../../src/types/step-definition'; +import { stepTypeToOutcomeType } from '../../src/types/step-outcome'; + +describe('stepTypeToOutcomeType', () => { + it('maps Condition to condition', () => { + expect(stepTypeToOutcomeType(StepType.Condition)).toBe('condition'); + }); + + it('maps McpTask to mcp-task', () => { + expect(stepTypeToOutcomeType(StepType.McpTask)).toBe('mcp-task'); + }); + + it('maps ReadRecord to record-task', () => { + expect(stepTypeToOutcomeType(StepType.ReadRecord)).toBe('record-task'); + }); + + it('maps UpdateRecord to record-task', () => { + expect(stepTypeToOutcomeType(StepType.UpdateRecord)).toBe('record-task'); + }); + + it('maps TriggerAction to record-task', () => { + expect(stepTypeToOutcomeType(StepType.TriggerAction)).toBe('record-task'); + }); + + it('maps LoadRelatedRecord to record-task', () => { + expect(stepTypeToOutcomeType(StepType.LoadRelatedRecord)).toBe('record-task'); + }); + + it('falls through to record-task for an unknown future step type', () => { + expect(stepTypeToOutcomeType('future-step-type' as StepType)).toBe('record-task'); + }); +}); diff --git a/yarn.lock b/yarn.lock index c7c7add7ea..371ee67d9e 100644 --- a/yarn.lock +++ b/yarn.lock @@ -2314,23 +2314,6 @@ uuid "^10.0.0" zod "^3.25.76 || ^4" -"@langchain/core@1.1.33": - version "1.1.33" - resolved "https://registry.yarnpkg.com/@langchain/core/-/core-1.1.33.tgz#414536e9d0a6f90576502e532336104360ed4392" - integrity sha512-At1ooBmPlHMkhTkG6NqeOVjNscuJwneBB8F88rFRvBvIfhTACVLzEwMiZFWNTM8DzUXUOcxxqS7xKRyr6JBbOQ== - dependencies: - "@cfworker/json-schema" "^4.0.2" - "@standard-schema/spec" "^1.1.0" - ansi-styles "^5.0.0" - camelcase "6" - decamelize "1.2.0" - js-tiktoken "^1.0.12" - langsmith ">=0.5.0 <1.0.0" - mustache "^4.2.0" - p-queue "^6.6.2" - uuid "^11.1.0" - zod "^3.25.76 || ^4" - "@langchain/langgraph-checkpoint@^1.0.0": version "1.0.0" resolved "https://registry.yarnpkg.com/@langchain/langgraph-checkpoint/-/langgraph-checkpoint-1.0.0.tgz#ece2ede439d0d0b0b532c4be7817fd5029afe4f8" @@ -4186,11 +4169,6 @@ dependencies: tslib "^2.6.2" -"@standard-schema/spec@^1.1.0": - version "1.1.0" - resolved "https://registry.yarnpkg.com/@standard-schema/spec/-/spec-1.1.0.tgz#a79b55dbaf8604812f52d140b2c9ab41bc150bb8" - integrity sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w== - "@tokenizer/token@^0.3.0": version "0.3.0" resolved "https://registry.yarnpkg.com/@tokenizer/token/-/token-0.3.0.tgz#fe98a93fe789247e998c75e74e9c7c63217aa276" @@ -11319,18 +11297,6 @@ koa@^3.0.1: semver "^7.6.3" uuid "^10.0.0" -"langsmith@>=0.5.0 <1.0.0": - version "0.5.10" - resolved "https://registry.yarnpkg.com/langsmith/-/langsmith-0.5.10.tgz#f0df23538e6a7c2928787030cedfb4be9d5b3db6" - integrity sha512-unBdaaD/CqAOLIYjd9kT33FgHUMvHSsyBIPbQa+p/rE/Sv/l4pAC5ISEE79zphxi+vV4qxHqEgqahVXj2Xvz7A== - dependencies: - "@types/uuid" "^10.0.0" - chalk "^5.6.2" - console-table-printer "^2.12.1" - p-queue "^6.6.2" - semver "^7.6.3" - uuid "^10.0.0" - lerna@^8.2.3: version "8.2.3" resolved "https://registry.yarnpkg.com/lerna/-/lerna-8.2.3.tgz#0a9c07eda4cfac84a480b3e66915189ccfb5bd2c" @@ -17322,11 +17288,6 @@ uuid@^10.0.0: resolved "https://registry.yarnpkg.com/uuid/-/uuid-10.0.0.tgz#5a95aa454e6e002725c79055fd42aaba30ca6294" integrity sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ== -uuid@^11.1.0: - version "11.1.0" - resolved "https://registry.yarnpkg.com/uuid/-/uuid-11.1.0.tgz#9549028be1753bb934fc96e2bca09bb4105ae912" - integrity sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A== - uuid@^13.0.0: version "13.0.0" resolved "https://registry.yarnpkg.com/uuid/-/uuid-13.0.0.tgz#263dc341b19b4d755eb8fe36b78d95a6b65707e8" From de39c307a1c6898254c4029374fe53e55132f9ad Mon Sep 17 00:00:00 2001 From: Matthieu Date: Tue, 24 Mar 2026 15:38:57 +0100 Subject: [PATCH 12/18] feat(workflow-executor): add JWT auth and secret validation to HTTP server (#1504) Co-authored-by: Claude Opus 4.6 (1M context) Co-authored-by: alban bertolini --- packages/workflow-executor/package.json | 5 +- .../adapters/forest-server-workflow-port.ts | 8 + packages/workflow-executor/src/errors.ts | 7 + .../src/http/executor-http-server.ts | 54 +- packages/workflow-executor/src/index.ts | 2 + .../src/ports/workflow-port.ts | 1 + packages/workflow-executor/src/runner.ts | 9 + .../workflow-executor/src/validate-secrets.ts | 13 + .../forest-server-workflow-port.test.ts | 9 + .../load-related-record-step-executor.test.ts | 1 + .../executors/mcp-task-step-executor.test.ts | 1 + .../read-record-step-executor.test.ts | 1 + ...rigger-record-action-step-executor.test.ts | 1 + .../update-record-step-executor.test.ts | 1 + .../test/http/executor-http-server.test.ts | 311 +- .../workflow-executor/test/runner.test.ts | 27 +- .../test/validate-secrets.test.ts | 61 + yarn.lock | 2764 +++++++---------- 18 files changed, 1674 insertions(+), 1602 deletions(-) create mode 100644 packages/workflow-executor/src/validate-secrets.ts create mode 100644 packages/workflow-executor/test/validate-secrets.test.ts diff --git a/packages/workflow-executor/package.json b/packages/workflow-executor/package.json index 4a6a93b0a4..32ae26db19 100644 --- a/packages/workflow-executor/package.json +++ b/packages/workflow-executor/package.json @@ -23,14 +23,17 @@ "test": "jest" }, "dependencies": { - "@forestadmin/ai-proxy": "1.6.1", "@forestadmin/agent-client": "1.4.13", + "@forestadmin/ai-proxy": "1.6.1", "@forestadmin/forestadmin-client": "1.37.17", "@koa/router": "^13.1.0", + "jsonwebtoken": "^9.0.3", "koa": "^3.0.1", + "koa-jwt": "^4.0.4", "zod": "4.3.6" }, "devDependencies": { + "@types/jsonwebtoken": "^9.0.10", "@types/koa": "^2.13.5", "@types/koa__router": "^12.0.4", "supertest": "^7.1.3" diff --git a/packages/workflow-executor/src/adapters/forest-server-workflow-port.ts b/packages/workflow-executor/src/adapters/forest-server-workflow-port.ts index 47f45a6c1f..8233e9b705 100644 --- a/packages/workflow-executor/src/adapters/forest-server-workflow-port.ts +++ b/packages/workflow-executor/src/adapters/forest-server-workflow-port.ts @@ -60,4 +60,12 @@ export default class ForestServerWorkflowPort implements WorkflowPort { async getMcpServerConfigs(): Promise { return ServerUtils.query(this.options, 'get', ROUTES.mcpServerConfigs); } + + // eslint-disable-next-line @typescript-eslint/no-unused-vars + async hasRunAccess(_runId: string, _userToken: string): Promise { + // TODO: implement once GET /liana/v1/workflow-runs/:runId/access is available. + // When live: call ServerUtils.query with extra header 'forest-user-token': userToken + // to let the orchestrator verify ownership. + return true; + } } diff --git a/packages/workflow-executor/src/errors.ts b/packages/workflow-executor/src/errors.ts index 4560bafdeb..ae7df0b995 100644 --- a/packages/workflow-executor/src/errors.ts +++ b/packages/workflow-executor/src/errors.ts @@ -199,6 +199,13 @@ export class McpToolInvocationError extends WorkflowExecutorError { } } +export class ConfigurationError extends Error { + constructor(message: string) { + super(message); + this.name = 'ConfigurationError'; + } +} + export class RunNotFoundError extends Error { cause?: unknown; diff --git a/packages/workflow-executor/src/http/executor-http-server.ts b/packages/workflow-executor/src/http/executor-http-server.ts index 10062deddd..4200ae0347 100644 --- a/packages/workflow-executor/src/http/executor-http-server.ts +++ b/packages/workflow-executor/src/http/executor-http-server.ts @@ -1,11 +1,13 @@ import type { Logger } from '../ports/logger-port'; import type { RunStore } from '../ports/run-store'; +import type { WorkflowPort } from '../ports/workflow-port'; import type Runner from '../runner'; import type { Server } from 'http'; import Router from '@koa/router'; import http from 'http'; import Koa from 'koa'; +import koaJwt from 'koa-jwt'; import { RunNotFoundError } from '../errors'; @@ -13,6 +15,8 @@ export interface ExecutorHttpServerOptions { port: number; runStore: RunStore; runner: Runner; + authSecret: string; + workflowPort: WorkflowPort; logger?: Logger; } @@ -25,11 +29,20 @@ export default class ExecutorHttpServer { this.options = options; this.app = new Koa(); - // Error middleware — catches all async handler errors and returns structured JSON + // Error middleware — catches all errors (including JWT 401) and returns structured JSON this.app.use(async (ctx, next) => { try { await next(); } catch (err: unknown) { + const { status } = err as { status?: number }; + + if (status === 401) { + ctx.status = 401; + ctx.body = { error: 'Unauthorized' }; + + return; + } + this.options.logger?.error('Unhandled HTTP error', { method: ctx.method, path: ctx.path, @@ -41,7 +54,46 @@ export default class ExecutorHttpServer { } }); + // JWT middleware — validates Bearer token using authSecret + // tokenKey: 'rawToken' exposes the raw token string on ctx.state.rawToken for downstream use + this.app.use( + koaJwt({ secret: options.authSecret, cookie: 'forest_session_token', tokenKey: 'rawToken' }), + ); + const router = new Router(); + + // Authorization middleware — verifies that the authenticated user owns the requested run. + // Applied to all /runs/:runId routes so future routes are automatically protected. + router.use('/runs/:runId', async (ctx, next) => { + // Raw token is always present here: koa-jwt already rejected the request if missing. + const userToken = ctx.state.rawToken as string; + + try { + const allowed = await this.options.workflowPort.hasRunAccess(ctx.params.runId, userToken); + + if (!allowed) { + ctx.status = 403; + ctx.body = { error: 'Forbidden' }; + + return; + } + } catch (err) { + this.options.logger?.error('Failed to check run access', { + runId: ctx.params.runId, + method: ctx.method, + path: ctx.path, + error: err instanceof Error ? err.message : String(err), + stack: err instanceof Error ? err.stack : undefined, + }); + ctx.status = 503; + ctx.body = { error: 'Service unavailable' }; + + return; + } + + await next(); + }); + router.get('/runs/:runId', this.handleGetRun.bind(this)); router.post('/runs/:runId/trigger', this.handleTrigger.bind(this)); diff --git a/packages/workflow-executor/src/index.ts b/packages/workflow-executor/src/index.ts index 3075c5f418..7077ae07da 100644 --- a/packages/workflow-executor/src/index.ts +++ b/packages/workflow-executor/src/index.ts @@ -86,6 +86,7 @@ export { McpToolNotFoundError, McpToolInvocationError, AgentPortError, + ConfigurationError, } from './errors'; export { default as BaseStepExecutor } from './executors/base-step-executor'; export { default as ConditionStepExecutor } from './executors/condition-step-executor'; @@ -100,3 +101,4 @@ export { default as ExecutorHttpServer } from './http/executor-http-server'; export type { ExecutorHttpServerOptions } from './http/executor-http-server'; export { default as Runner } from './runner'; export type { RunnerConfig } from './runner'; +export { default as validateSecrets } from './validate-secrets'; diff --git a/packages/workflow-executor/src/ports/workflow-port.ts b/packages/workflow-executor/src/ports/workflow-port.ts index 223123b756..9373a0c7f6 100644 --- a/packages/workflow-executor/src/ports/workflow-port.ts +++ b/packages/workflow-executor/src/ports/workflow-port.ts @@ -13,4 +13,5 @@ export interface WorkflowPort { updateStepExecution(runId: string, stepOutcome: StepOutcome): Promise; getCollectionSchema(collectionName: string): Promise; getMcpServerConfigs(): Promise; + hasRunAccess(runId: string, userToken: string): Promise; } diff --git a/packages/workflow-executor/src/runner.ts b/packages/workflow-executor/src/runner.ts index b97a8260aa..4dd16528bd 100644 --- a/packages/workflow-executor/src/runner.ts +++ b/packages/workflow-executor/src/runner.ts @@ -10,6 +10,7 @@ import ConsoleLogger from './adapters/console-logger'; import { RunNotFoundError, causeMessage } from './errors'; import StepExecutorFactory from './executors/step-executor-factory'; import ExecutorHttpServer from './http/executor-http-server'; +import validateSecrets from './validate-secrets'; export interface RunnerConfig { agentPort: AgentPort; @@ -17,6 +18,8 @@ export interface RunnerConfig { runStore: RunStore; pollingIntervalMs: number; aiClient: AiClient; + envSecret: string; + authSecret: string; logger?: Logger; httpPort?: number; } @@ -50,6 +53,9 @@ export default class Runner { async start(): Promise { if (this.isRunning) return; + + validateSecrets({ envSecret: this.config.envSecret, authSecret: this.config.authSecret }); + this.isRunning = true; try { @@ -58,6 +64,9 @@ export default class Runner { port: this.config.httpPort, runStore: this.config.runStore, runner: this, + authSecret: this.config.authSecret, + workflowPort: this.config.workflowPort, + logger: this.logger, }); await server.start(); this.httpServer = server; diff --git a/packages/workflow-executor/src/validate-secrets.ts b/packages/workflow-executor/src/validate-secrets.ts new file mode 100644 index 0000000000..dfa10b54c7 --- /dev/null +++ b/packages/workflow-executor/src/validate-secrets.ts @@ -0,0 +1,13 @@ +import { ConfigurationError } from './errors'; + +const ENV_SECRET_PATTERN = /^[0-9a-f]{64}$/; + +export default function validateSecrets(params: { envSecret: string; authSecret: string }): void { + if (!params.authSecret || typeof params.authSecret !== 'string') { + throw new ConfigurationError('authSecret must be a non-empty string'); + } + + if (!ENV_SECRET_PATTERN.test(params.envSecret)) { + throw new ConfigurationError('envSecret must be a 64-character hex string'); + } +} diff --git a/packages/workflow-executor/test/adapters/forest-server-workflow-port.test.ts b/packages/workflow-executor/test/adapters/forest-server-workflow-port.test.ts index 8b38812dff..2bc9172293 100644 --- a/packages/workflow-executor/test/adapters/forest-server-workflow-port.test.ts +++ b/packages/workflow-executor/test/adapters/forest-server-workflow-port.test.ts @@ -123,6 +123,15 @@ describe('ForestServerWorkflowPort', () => { }); }); + describe('hasRunAccess', () => { + it('always returns true (stub until orchestrator endpoint is available)', async () => { + const result = await port.hasRunAccess('run-42', 'some-token'); + + expect(result).toBe(true); + expect(mockQuery).not.toHaveBeenCalled(); + }); + }); + describe('error propagation', () => { it('should propagate errors from ServerUtils.query', async () => { mockQuery.mockRejectedValue(new Error('Network error')); diff --git a/packages/workflow-executor/test/executors/load-related-record-step-executor.test.ts b/packages/workflow-executor/test/executors/load-related-record-step-executor.test.ts index 46910f72db..d4c3aa0cda 100644 --- a/packages/workflow-executor/test/executors/load-related-record-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/load-related-record-step-executor.test.ts @@ -90,6 +90,7 @@ function makeMockWorkflowPort( ), ), getMcpServerConfigs: jest.fn().mockResolvedValue([]), + hasRunAccess: jest.fn().mockResolvedValue(true), }; } diff --git a/packages/workflow-executor/test/executors/mcp-task-step-executor.test.ts b/packages/workflow-executor/test/executors/mcp-task-step-executor.test.ts index 5525014ddf..5a13735135 100644 --- a/packages/workflow-executor/test/executors/mcp-task-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/mcp-task-step-executor.test.ts @@ -59,6 +59,7 @@ function makeMockWorkflowPort(): WorkflowPort { actions: [], }), getMcpServerConfigs: jest.fn().mockResolvedValue([]), + hasRunAccess: jest.fn().mockResolvedValue(true), }; } diff --git a/packages/workflow-executor/test/executors/read-record-step-executor.test.ts b/packages/workflow-executor/test/executors/read-record-step-executor.test.ts index cee40bc8c3..f565b7525f 100644 --- a/packages/workflow-executor/test/executors/read-record-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/read-record-step-executor.test.ts @@ -83,6 +83,7 @@ function makeMockWorkflowPort( ), ), getMcpServerConfigs: jest.fn().mockResolvedValue([]), + hasRunAccess: jest.fn().mockResolvedValue(true), }; } diff --git a/packages/workflow-executor/test/executors/trigger-record-action-step-executor.test.ts b/packages/workflow-executor/test/executors/trigger-record-action-step-executor.test.ts index 7bb0a77df3..c17fb4bfa3 100644 --- a/packages/workflow-executor/test/executors/trigger-record-action-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/trigger-record-action-step-executor.test.ts @@ -78,6 +78,7 @@ function makeMockWorkflowPort( ), ), getMcpServerConfigs: jest.fn().mockResolvedValue([]), + hasRunAccess: jest.fn().mockResolvedValue(true), }; } diff --git a/packages/workflow-executor/test/executors/update-record-step-executor.test.ts b/packages/workflow-executor/test/executors/update-record-step-executor.test.ts index 00ed053d06..3e0447c07e 100644 --- a/packages/workflow-executor/test/executors/update-record-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/update-record-step-executor.test.ts @@ -83,6 +83,7 @@ function makeMockWorkflowPort( ), ), getMcpServerConfigs: jest.fn().mockResolvedValue([]), + hasRunAccess: jest.fn().mockResolvedValue(true), }; } diff --git a/packages/workflow-executor/test/http/executor-http-server.test.ts b/packages/workflow-executor/test/http/executor-http-server.test.ts index e660b926a6..a910db9cb8 100644 --- a/packages/workflow-executor/test/http/executor-http-server.test.ts +++ b/packages/workflow-executor/test/http/executor-http-server.test.ts @@ -1,11 +1,19 @@ import type { RunStore } from '../../src/ports/run-store'; +import type { WorkflowPort } from '../../src/ports/workflow-port'; import type Runner from '../../src/runner'; +import jsonwebtoken from 'jsonwebtoken'; import request from 'supertest'; import { RunNotFoundError } from '../../src/errors'; import ExecutorHttpServer from '../../src/http/executor-http-server'; +const AUTH_SECRET = 'test-auth-secret'; + +function signToken(payload: object, secret = AUTH_SECRET, options?: jsonwebtoken.SignOptions) { + return jsonwebtoken.sign(payload, secret, { expiresIn: '1h', ...options }); +} + function createMockRunStore(overrides: Partial = {}): RunStore { return { getStepExecutions: jest.fn().mockResolvedValue([]), @@ -23,7 +31,242 @@ function createMockRunner(overrides: Partial = {}): Runner { } as unknown as Runner; } +function createMockWorkflowPort(overrides: Partial = {}): WorkflowPort { + return { + getPendingStepExecutions: jest.fn().mockResolvedValue([]), + getPendingStepExecutionsForRun: jest.fn(), + updateStepExecution: jest.fn().mockResolvedValue(undefined), + getCollectionSchema: jest.fn(), + getMcpServerConfigs: jest.fn().mockResolvedValue([]), + hasRunAccess: jest.fn().mockResolvedValue(true), + ...overrides, + } as unknown as WorkflowPort; +} + +function createServer( + overrides: { + runStore?: RunStore; + runner?: Runner; + workflowPort?: WorkflowPort; + logger?: { error: jest.Mock }; + } = {}, +) { + return new ExecutorHttpServer({ + port: 0, + runStore: overrides.runStore ?? createMockRunStore(), + runner: overrides.runner ?? createMockRunner(), + authSecret: AUTH_SECRET, + workflowPort: overrides.workflowPort ?? createMockWorkflowPort(), + logger: overrides.logger, + }); +} + describe('ExecutorHttpServer', () => { + describe('JWT authentication', () => { + it('should return 401 when no token is provided', async () => { + const server = createServer(); + + const response = await request(server.callback).get('/runs/run-1'); + + expect(response.status).toBe(401); + expect(response.body).toEqual({ error: 'Unauthorized' }); + }); + + it('should return 401 when token is signed with wrong secret', async () => { + const server = createServer(); + const token = signToken({ id: 'user-1' }, 'wrong-secret'); + + const response = await request(server.callback) + .get('/runs/run-1') + .set('Authorization', `Bearer ${token}`); + + expect(response.status).toBe(401); + expect(response.body).toEqual({ error: 'Unauthorized' }); + }); + + it('should return 401 when token is expired', async () => { + const server = createServer(); + const token = signToken({ id: 'user-1' }, AUTH_SECRET, { expiresIn: '0s' }); + + // Small delay to ensure token is expired + await new Promise(resolve => { + setTimeout(resolve, 10); + }); + + const response = await request(server.callback) + .get('/runs/run-1') + .set('Authorization', `Bearer ${token}`); + + expect(response.status).toBe(401); + expect(response.body).toEqual({ error: 'Unauthorized' }); + }); + + it('should return 401 when token is malformed', async () => { + const server = createServer(); + + const response = await request(server.callback) + .get('/runs/run-1') + .set('Authorization', 'Bearer not-a-jwt'); + + expect(response.status).toBe(401); + expect(response.body).toEqual({ error: 'Unauthorized' }); + }); + + it('should accept valid token in Authorization header', async () => { + const server = createServer(); + const token = signToken({ id: 'user-1' }); + + const response = await request(server.callback) + .get('/runs/run-1') + .set('Authorization', `Bearer ${token}`); + + expect(response.status).toBe(200); + }); + + it('should accept valid token in forest_session_token cookie', async () => { + const server = createServer(); + const token = signToken({ id: 'user-1' }); + + const response = await request(server.callback) + .get('/runs/run-1') + .set('Cookie', `forest_session_token=${token}`); + + expect(response.status).toBe(200); + }); + + it('should populate ctx.state.user with the decoded JWT payload', async () => { + let capturedUser: unknown; + + // Build a thin Koa app with the same JWT config to prove user extraction + const Koa = (await import('koa')).default; + const koaJwt = (await import('koa-jwt')).default; + const app = new Koa(); + app.use(koaJwt({ secret: AUTH_SECRET, cookie: 'forest_session_token' })); + app.use(async ctx => { + capturedUser = ctx.state.user; + ctx.body = { ok: true }; + }); + + const token = signToken({ + id: 'user-42', + email: 'admin@forest.com', + firstName: 'Ada', + }); + + await request(app.callback()).get('/').set('Authorization', `Bearer ${token}`); + + expect(capturedUser).toEqual( + expect.objectContaining({ id: 'user-42', email: 'admin@forest.com', firstName: 'Ada' }), + ); + }); + }); + + describe('run access authorization', () => { + it('returns 403 when hasRunAccess returns false on GET /runs/:runId', async () => { + const workflowPort = createMockWorkflowPort({ + hasRunAccess: jest.fn().mockResolvedValue(false), + }); + const server = createServer({ workflowPort }); + const token = signToken({ id: 'user-1' }); + + const response = await request(server.callback) + .get('/runs/run-1') + .set('Authorization', `Bearer ${token}`); + + expect(response.status).toBe(403); + expect(response.body).toEqual({ error: 'Forbidden' }); + }); + + it('returns 403 when hasRunAccess returns false on POST /runs/:runId/trigger', async () => { + const workflowPort = createMockWorkflowPort({ + hasRunAccess: jest.fn().mockResolvedValue(false), + }); + const server = createServer({ workflowPort }); + const token = signToken({ id: 'user-1' }); + + const response = await request(server.callback) + .post('/runs/run-1/trigger') + .set('Authorization', `Bearer ${token}`); + + expect(response.status).toBe(403); + expect(response.body).toEqual({ error: 'Forbidden' }); + }); + + it('calls hasRunAccess with the correct runId and userToken', async () => { + const workflowPort = createMockWorkflowPort(); + const server = createServer({ workflowPort }); + const token = signToken({ id: 'user-1' }); + + const response = await request(server.callback) + .get('/runs/run-42') + .set('Authorization', `Bearer ${token}`); + + expect(response.status).toBe(200); + expect(workflowPort.hasRunAccess).toHaveBeenCalledWith('run-42', token); + }); + + it('calls hasRunAccess with token from cookie', async () => { + const workflowPort = createMockWorkflowPort(); + const server = createServer({ workflowPort }); + const token = signToken({ id: 'user-1' }); + + const response = await request(server.callback) + .get('/runs/run-cookie') + .set('Cookie', `forest_session_token=${token}`); + + expect(response.status).toBe(200); + expect(workflowPort.hasRunAccess).toHaveBeenCalledWith('run-cookie', token); + }); + + it('returns 503 when hasRunAccess throws', async () => { + const logger = { error: jest.fn() }; + const workflowPort = createMockWorkflowPort({ + hasRunAccess: jest.fn().mockRejectedValue(new Error('orchestrator down')), + }); + const server = createServer({ workflowPort, logger }); + const token = signToken({ id: 'user-1' }); + + const response = await request(server.callback) + .get('/runs/run-1') + .set('Authorization', `Bearer ${token}`); + + expect(response.status).toBe(503); + expect(response.body).toEqual({ error: 'Service unavailable' }); + expect(logger.error).toHaveBeenCalledWith( + 'Failed to check run access', + expect.objectContaining({ runId: 'run-1', error: 'orchestrator down' }), + ); + }); + + it('does not call getStepExecutions when hasRunAccess returns false', async () => { + const runStore = createMockRunStore(); + const workflowPort = createMockWorkflowPort({ + hasRunAccess: jest.fn().mockResolvedValue(false), + }); + const server = createServer({ runStore, workflowPort }); + const token = signToken({ id: 'user-1' }); + + await request(server.callback).get('/runs/run-1').set('Authorization', `Bearer ${token}`); + + expect(runStore.getStepExecutions).not.toHaveBeenCalled(); + }); + + it('does not call triggerPoll when hasRunAccess returns false', async () => { + const runner = createMockRunner(); + const workflowPort = createMockWorkflowPort({ + hasRunAccess: jest.fn().mockResolvedValue(false), + }); + const server = createServer({ runner, workflowPort }); + const token = signToken({ id: 'user-1' }); + + await request(server.callback) + .post('/runs/run-1/trigger') + .set('Authorization', `Bearer ${token}`); + + expect(runner.triggerPoll).not.toHaveBeenCalled(); + }); + }); + describe('GET /runs/:runId', () => { it('should return steps from the run store', async () => { const steps = [{ type: 'condition' as const, stepIndex: 0 }]; @@ -32,13 +275,12 @@ describe('ExecutorHttpServer', () => { getStepExecutions: jest.fn().mockResolvedValue(steps), }); - const server = new ExecutorHttpServer({ - port: 0, - runStore, - runner: createMockRunner(), - }); + const server = createServer({ runStore }); + const token = signToken({ id: 'user-1' }); - const response = await request(server.callback).get('/runs/run-1'); + const response = await request(server.callback) + .get('/runs/run-1') + .set('Authorization', `Bearer ${token}`); expect(response.status).toBe(200); expect(response.body).toEqual({ steps }); @@ -50,13 +292,12 @@ describe('ExecutorHttpServer', () => { getStepExecutions: jest.fn().mockRejectedValue(new Error('db error')), }); - const server = new ExecutorHttpServer({ - port: 0, - runStore, - runner: createMockRunner(), - }); + const server = createServer({ runStore }); + const token = signToken({ id: 'user-1' }); - const response = await request(server.callback).get('/runs/run-1'); + const response = await request(server.callback) + .get('/runs/run-1') + .set('Authorization', `Bearer ${token}`); expect(response.status).toBe(500); expect(response.body).toEqual({ error: 'Internal server error' }); @@ -66,14 +307,12 @@ describe('ExecutorHttpServer', () => { describe('POST /runs/:runId/trigger', () => { it('should call runner.triggerPoll with the runId', async () => { const runner = createMockRunner(); + const server = createServer({ runner }); + const token = signToken({ id: 'user-1' }); - const server = new ExecutorHttpServer({ - port: 0, - runStore: createMockRunStore(), - runner, - }); - - const response = await request(server.callback).post('/runs/run-1/trigger'); + const response = await request(server.callback) + .post('/runs/run-1/trigger') + .set('Authorization', `Bearer ${token}`); expect(response.status).toBe(200); expect(response.body).toEqual({ triggered: true }); @@ -85,13 +324,12 @@ describe('ExecutorHttpServer', () => { triggerPoll: jest.fn().mockRejectedValue(new RunNotFoundError('run-1')), }); - const server = new ExecutorHttpServer({ - port: 0, - runStore: createMockRunStore(), - runner, - }); + const server = createServer({ runner }); + const token = signToken({ id: 'user-1' }); - const response = await request(server.callback).post('/runs/run-1/trigger'); + const response = await request(server.callback) + .post('/runs/run-1/trigger') + .set('Authorization', `Bearer ${token}`); expect(response.status).toBe(404); expect(response.body).toEqual({ error: 'Run not found or unavailable' }); @@ -102,13 +340,12 @@ describe('ExecutorHttpServer', () => { triggerPoll: jest.fn().mockRejectedValue(new Error('unexpected')), }); - const server = new ExecutorHttpServer({ - port: 0, - runStore: createMockRunStore(), - runner, - }); + const server = createServer({ runner }); + const token = signToken({ id: 'user-1' }); - const response = await request(server.callback).post('/runs/run-1/trigger'); + const response = await request(server.callback) + .post('/runs/run-1/trigger') + .set('Authorization', `Bearer ${token}`); expect(response.status).toBe(500); expect(response.body).toEqual({ error: 'Internal server error' }); @@ -117,22 +354,14 @@ describe('ExecutorHttpServer', () => { describe('start / stop', () => { it('should start and stop the server', async () => { - const server = new ExecutorHttpServer({ - port: 0, - runStore: createMockRunStore(), - runner: createMockRunner(), - }); + const server = createServer(); await server.start(); await expect(server.stop()).resolves.toBeUndefined(); }); it('should handle stop when not started', async () => { - const server = new ExecutorHttpServer({ - port: 0, - runStore: createMockRunStore(), - runner: createMockRunner(), - }); + const server = createServer(); await expect(server.stop()).resolves.toBeUndefined(); }); diff --git a/packages/workflow-executor/test/runner.test.ts b/packages/workflow-executor/test/runner.test.ts index 52ba1c0c3b..57b1aa8117 100644 --- a/packages/workflow-executor/test/runner.test.ts +++ b/packages/workflow-executor/test/runner.test.ts @@ -7,7 +7,7 @@ import type { PendingStepExecution } from '../src/types/execution'; import type { StepDefinition } from '../src/types/step-definition'; import type { AiClient, BaseChatModel } from '@forestadmin/ai-proxy'; -import { RunNotFoundError } from '../src/errors'; +import { ConfigurationError, RunNotFoundError } from '../src/errors'; import BaseStepExecutor from '../src/executors/base-step-executor'; import ConditionStepExecutor from '../src/executors/condition-step-executor'; import LoadRelatedRecordStepExecutor from '../src/executors/load-related-record-step-executor'; @@ -43,6 +43,7 @@ function createMockWorkflowPort(): jest.Mocked { updateStepExecution: jest.fn().mockResolvedValue(undefined), getCollectionSchema: jest.fn(), getMcpServerConfigs: jest.fn().mockResolvedValue([]), + hasRunAccess: jest.fn().mockResolvedValue(true), }; } @@ -58,12 +59,17 @@ function createMockLogger(): jest.Mocked { return { error: jest.fn() }; } +const VALID_ENV_SECRET = 'a'.repeat(64); +const VALID_AUTH_SECRET = 'test-auth-secret'; + function createRunnerConfig( overrides: Partial<{ workflowPort: WorkflowPort; aiClient: AiClient; logger: Logger; httpPort: number; + envSecret: string; + authSecret: string; }> = {}, ) { return { @@ -73,6 +79,8 @@ function createRunnerConfig( pollingIntervalMs: POLLING_INTERVAL_MS, aiClient: createMockAiClient() as unknown as AiClient, logger: createMockLogger(), + envSecret: VALID_ENV_SECRET, + authSecret: VALID_AUTH_SECRET, ...overrides, }; } @@ -157,6 +165,9 @@ describe('start', () => { port: 3100, runStore: config.runStore, runner, + authSecret: VALID_AUTH_SECRET, + workflowPort: config.workflowPort, + logger: config.logger, }); expect(MockedExecutorHttpServer.prototype.start).toHaveBeenCalled(); }); @@ -177,6 +188,20 @@ describe('start', () => { expect(MockedExecutorHttpServer).toHaveBeenCalledTimes(1); }); + + it('should throw ConfigurationError when envSecret is invalid', async () => { + runner = new Runner(createRunnerConfig({ envSecret: 'bad' })); + + await expect(runner.start()).rejects.toThrow(ConfigurationError); + await expect(runner.start()).rejects.toThrow('envSecret must be a 64-character hex string'); + }); + + it('should throw ConfigurationError when authSecret is empty', async () => { + runner = new Runner(createRunnerConfig({ authSecret: '' })); + + await expect(runner.start()).rejects.toThrow(ConfigurationError); + await expect(runner.start()).rejects.toThrow('authSecret must be a non-empty string'); + }); }); describe('stop', () => { diff --git a/packages/workflow-executor/test/validate-secrets.test.ts b/packages/workflow-executor/test/validate-secrets.test.ts new file mode 100644 index 0000000000..0c249c0454 --- /dev/null +++ b/packages/workflow-executor/test/validate-secrets.test.ts @@ -0,0 +1,61 @@ +import { ConfigurationError } from '../src/errors'; +import validateSecrets from '../src/validate-secrets'; + +const VALID_ENV_SECRET = 'a'.repeat(64); +const VALID_AUTH_SECRET = 'my-auth-secret'; + +describe('validateSecrets', () => { + it('does not throw when both secrets are valid', () => { + expect(() => + validateSecrets({ envSecret: VALID_ENV_SECRET, authSecret: VALID_AUTH_SECRET }), + ).not.toThrow(); + }); + + describe('authSecret', () => { + it('throws ConfigurationError when authSecret is empty', () => { + expect(() => validateSecrets({ envSecret: VALID_ENV_SECRET, authSecret: '' })).toThrow( + ConfigurationError, + ); + }); + + it('throws ConfigurationError with descriptive message', () => { + expect(() => validateSecrets({ envSecret: VALID_ENV_SECRET, authSecret: '' })).toThrow( + 'authSecret must be a non-empty string', + ); + }); + }); + + describe('envSecret', () => { + it('throws ConfigurationError when envSecret is empty', () => { + expect(() => validateSecrets({ envSecret: '', authSecret: VALID_AUTH_SECRET })).toThrow( + ConfigurationError, + ); + }); + + it('throws ConfigurationError when envSecret is not 64 chars', () => { + expect(() => validateSecrets({ envSecret: 'abc', authSecret: VALID_AUTH_SECRET })).toThrow( + ConfigurationError, + ); + }); + + it('throws ConfigurationError when envSecret contains non-hex chars', () => { + const nonHex = 'g'.repeat(64); + expect(() => validateSecrets({ envSecret: nonHex, authSecret: VALID_AUTH_SECRET })).toThrow( + ConfigurationError, + ); + }); + + it('throws ConfigurationError when envSecret contains uppercase hex', () => { + const upperHex = 'A'.repeat(64); + expect(() => validateSecrets({ envSecret: upperHex, authSecret: VALID_AUTH_SECRET })).toThrow( + ConfigurationError, + ); + }); + + it('throws with descriptive message', () => { + expect(() => validateSecrets({ envSecret: 'bad', authSecret: VALID_AUTH_SECRET })).toThrow( + 'envSecret must be a 64-character hex string', + ); + }); + }); +}); diff --git a/yarn.lock b/yarn.lock index 371ee67d9e..114e6c834d 100644 --- a/yarn.lock +++ b/yarn.lock @@ -7,34 +7,6 @@ resolved "https://registry.yarnpkg.com/@aashutoshrathi/word-wrap/-/word-wrap-1.2.6.tgz#bd9154aec9983f77b3a034ecaa015c2e4201f6cf" integrity sha512-1Yjs2SvM8TflER/OD3cOjhWWOZb58A2t7wpE2S9XfBYTiIl+XFhQG2bjy4Pu1I+EAlCNUzRDYDdFwFYUKvXcIA== -"@actions/core@^2.0.0": - version "2.0.2" - resolved "https://registry.yarnpkg.com/@actions/core/-/core-2.0.2.tgz#81c59e1f3437660d2148a064c1ba8e99931f2cf7" - integrity sha512-Ast1V7yHbGAhplAsuVlnb/5J8Mtr/Zl6byPPL+Qjq3lmfIgWF1ak1iYfF/079cRERiuTALTXkSuEUdZeDCfGtA== - dependencies: - "@actions/exec" "^2.0.0" - "@actions/http-client" "^3.0.1" - -"@actions/exec@^2.0.0": - version "2.0.0" - resolved "https://registry.yarnpkg.com/@actions/exec/-/exec-2.0.0.tgz#35e829723389f80e362ec2cc415697ec74362ad8" - integrity sha512-k8ngrX2voJ/RIN6r9xB82NVqKpnMRtxDoiO+g3olkIUpQNqjArXrCQceduQZCQj3P3xm32pChRLqRrtXTlqhIw== - dependencies: - "@actions/io" "^2.0.0" - -"@actions/http-client@^3.0.1": - version "3.0.1" - resolved "https://registry.yarnpkg.com/@actions/http-client/-/http-client-3.0.1.tgz#0ac91c3abf179a401e23d40abf0d7caa92324268" - integrity sha512-SbGS8c/vySbNO3kjFgSW77n83C4MQx/Yoe+b1hAdpuvfHxnkHzDq2pWljUpAA56Si1Gae/7zjeZsV0CYjmLo/w== - dependencies: - tunnel "^0.0.6" - undici "^5.28.5" - -"@actions/io@^2.0.0": - version "2.0.0" - resolved "https://registry.yarnpkg.com/@actions/io/-/io-2.0.0.tgz#3ad1271ba3cd515324f2215e8d4c1c0c3864d65b" - integrity sha512-Jv33IN09XLO+0HS79aaODsvIRyduiF7NY/F6LYeK5oeUmrsz7aFdRphQjFoESF4jS7lMauDOttKALcpapVDIAg== - "@ampproject/remapping@^2.2.0": version "2.2.1" resolved "https://registry.yarnpkg.com/@ampproject/remapping/-/remapping-2.2.1.tgz#99e8e11851128b8702cd57c33684f1d0f260b630" @@ -688,7 +660,7 @@ "@smithy/types" "^4.12.1" tslib "^2.6.2" -"@aws-sdk/xml-builder@>=3.972.9", "@aws-sdk/xml-builder@^3.972.6": +"@aws-sdk/xml-builder@^3.972.6": version "3.972.9" resolved "https://registry.yarnpkg.com/@aws-sdk/xml-builder/-/xml-builder-3.972.9.tgz#38a43a0a860c4c73100d727e5b28c43339597b50" integrity sha512-ItnlMgSqkPrUfJs7EsvU/01zw5UeIb2tNPhD09LBLHbg+g+HDiKibSLwpkuz/ZIlz4F2IMn+5XgE4AK/pfPuog== @@ -919,10 +891,10 @@ "@babel/highlight" "^7.22.13" chalk "^2.4.2" -"@babel/code-frame@^7.26.2", "@babel/code-frame@^7.28.6": - version "7.28.6" - resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.28.6.tgz#72499312ec58b1e2245ba4a4f550c132be4982f7" - integrity sha512-JYgintcMjRiCvS8mMECzaEn+m3PfoQiyqukOMCCVQtoJGYJw8j/8LBJEiqkHLkfwCcs74E3pbAUFNg7d9VNJ+Q== +"@babel/code-frame@^7.21.4": + version "7.29.0" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.29.0.tgz#7cd7a59f15b3cc0dcd803038f7792712a7d0b15c" + integrity sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw== dependencies: "@babel/helper-validator-identifier" "^7.28.5" js-tokens "^4.0.0" @@ -937,6 +909,15 @@ js-tokens "^4.0.0" picocolors "^1.1.1" +"@babel/code-frame@^7.28.6": + version "7.28.6" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.28.6.tgz#72499312ec58b1e2245ba4a4f550c132be4982f7" + integrity sha512-JYgintcMjRiCvS8mMECzaEn+m3PfoQiyqukOMCCVQtoJGYJw8j/8LBJEiqkHLkfwCcs74E3pbAUFNg7d9VNJ+Q== + dependencies: + "@babel/helper-validator-identifier" "^7.28.5" + js-tokens "^4.0.0" + picocolors "^1.1.1" + "@babel/compat-data@^7.22.9": version "7.23.3" resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.23.3.tgz#3febd552541e62b5e883a25eb3effd7c7379db11" @@ -1643,11 +1624,6 @@ ajv-formats "^2.1.1" fast-uri "^2.0.0" -"@fastify/busboy@^2.0.0": - version "2.1.1" - resolved "https://registry.yarnpkg.com/@fastify/busboy/-/busboy-2.1.1.tgz#b9da6a878a371829a0502c9b6c1c143ef6663f4d" - integrity sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA== - "@fastify/cors@9.0.1": version "9.0.1" resolved "https://registry.yarnpkg.com/@fastify/cors/-/cors-9.0.1.tgz#9ddb61b4a61e02749c5c54ca29f1c646794145be" @@ -1745,7 +1721,7 @@ object-hash "^3.0.0" uuid "^9.0.0" -"@gar/promisify@^1.0.1": +"@gar/promisify@^1.0.1", "@gar/promisify@^1.1.3": version "1.1.3" resolved "https://registry.yarnpkg.com/@gar/promisify/-/promisify-1.1.3.tgz#555193ab2e3bb3b6adc3d551c9c030d9e860daf6" integrity sha512-k2Ty1JcVojjJFwrg/ThKi2ujJ7XNLYaFGNB/bWT9wGR+oSMJHMa5w+CUq6p/pVrKeNNgA7pCqEcjSnHVoqJQFw== @@ -1939,12 +1915,17 @@ resolved "https://registry.yarnpkg.com/@inquirer/type/-/type-3.0.5.tgz#fe00207e57d5f040e5b18e809c8e7abc3a2ade3a" integrity sha512-ZJpeIYYueOz/i/ONzrfof8g89kNdO2hjGuvULROo3O8rlB2CRtSseE5KeirnyE4t/thAn/EwvS/vuQeJCn+NZg== -"@isaacs/fs-minipass@^4.0.0": - version "4.0.1" - resolved "https://registry.yarnpkg.com/@isaacs/fs-minipass/-/fs-minipass-4.0.1.tgz#2d59ae3ab4b38fb4270bfa23d30f8e2e86c7fe32" - integrity sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w== +"@isaacs/cliui@^8.0.2": + version "8.0.2" + resolved "https://registry.yarnpkg.com/@isaacs/cliui/-/cliui-8.0.2.tgz#b37667b7bc181c168782259bab42474fbf52b550" + integrity sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA== dependencies: - minipass "^7.0.4" + string-width "^5.1.2" + string-width-cjs "npm:string-width@^4.2.0" + strip-ansi "^7.0.1" + strip-ansi-cjs "npm:strip-ansi@^6.0.1" + wrap-ansi "^8.1.0" + wrap-ansi-cjs "npm:wrap-ansi@^7.0.0" "@isaacs/string-locale-compare@^1.1.0": version "1.1.0" @@ -2599,17 +2580,6 @@ lru-cache "^10.0.1" socks-proxy-agent "^8.0.3" -"@npmcli/agent@^4.0.0": - version "4.0.0" - resolved "https://registry.yarnpkg.com/@npmcli/agent/-/agent-4.0.0.tgz#2bb2b1c0a170940511554a7986ae2a8be9fedcce" - integrity sha512-kAQTcEN9E8ERLVg5AsGwLNoFb+oEG6engbqAU2P43gD4JEIkNGMHdVQ096FsOAAYpZPB0RSt0zgInKIAS1l5QA== - dependencies: - agent-base "^7.1.0" - http-proxy-agent "^7.0.0" - https-proxy-agent "^7.0.1" - lru-cache "^11.2.1" - socks-proxy-agent "^8.0.3" - "@npmcli/arborist@7.5.4": version "7.5.4" resolved "https://registry.yarnpkg.com/@npmcli/arborist/-/arborist-7.5.4.tgz#3dd9e531d6464ef6715e964c188e0880c471ac9b" @@ -2651,58 +2621,65 @@ treeverse "^3.0.0" walk-up-path "^3.0.1" -"@npmcli/arborist@^9.1.9": - version "9.1.9" - resolved "https://registry.yarnpkg.com/@npmcli/arborist/-/arborist-9.1.9.tgz#1458850184fa97967263c67c6f34a052ac632b46" - integrity sha512-O/rLeBo64mkUn1zU+1tFDWXvbAA9UXe9eUldwTwRLxOLFx9obqjNoozW65LmYqgWb0DG40i9lNZSv78VX2GKhw== +"@npmcli/arborist@^6.5.0": + version "6.5.1" + resolved "https://registry.yarnpkg.com/@npmcli/arborist/-/arborist-6.5.1.tgz#b378a2e162e9b868d06f8f2c7e87e828de7e63ba" + integrity sha512-cdV8pGurLK0CifZRilMJbm2CZ3H4Snk8PAqOngj5qmgFLjEllMLvScSZ3XKfd+CK8fo/hrPHO9zazy9OYdvmUg== dependencies: "@isaacs/string-locale-compare" "^1.1.0" - "@npmcli/fs" "^5.0.0" - "@npmcli/installed-package-contents" "^4.0.0" - "@npmcli/map-workspaces" "^5.0.0" - "@npmcli/metavuln-calculator" "^9.0.2" - "@npmcli/name-from-folder" "^4.0.0" - "@npmcli/node-gyp" "^5.0.0" - "@npmcli/package-json" "^7.0.0" - "@npmcli/query" "^5.0.0" - "@npmcli/redact" "^4.0.0" - "@npmcli/run-script" "^10.0.0" - bin-links "^6.0.0" - cacache "^20.0.1" + "@npmcli/fs" "^3.1.0" + "@npmcli/installed-package-contents" "^2.0.2" + "@npmcli/map-workspaces" "^3.0.2" + "@npmcli/metavuln-calculator" "^5.0.0" + "@npmcli/name-from-folder" "^2.0.0" + "@npmcli/node-gyp" "^3.0.0" + "@npmcli/package-json" "^4.0.0" + "@npmcli/query" "^3.1.0" + "@npmcli/run-script" "^6.0.0" + bin-links "^4.0.1" + cacache "^17.0.4" common-ancestor-path "^1.0.1" - hosted-git-info "^9.0.0" + hosted-git-info "^6.1.1" + json-parse-even-better-errors "^3.0.0" json-stringify-nice "^1.1.4" - lru-cache "^11.2.1" - minimatch "^10.0.3" - nopt "^9.0.0" - npm-install-checks "^8.0.0" - npm-package-arg "^13.0.0" - npm-pick-manifest "^11.0.1" - npm-registry-fetch "^19.0.0" - pacote "^21.0.2" - parse-conflict-json "^5.0.1" - proc-log "^6.0.0" - proggy "^4.0.0" + minimatch "^9.0.0" + nopt "^7.0.0" + npm-install-checks "^6.2.0" + npm-package-arg "^10.1.0" + npm-pick-manifest "^8.0.1" + npm-registry-fetch "^14.0.3" + npmlog "^7.0.1" + pacote "^15.0.8" + parse-conflict-json "^3.0.0" + proc-log "^3.0.0" promise-all-reject-late "^1.0.0" - promise-call-limit "^3.0.1" + promise-call-limit "^1.0.2" + read-package-json-fast "^3.0.2" semver "^7.3.7" - ssri "^13.0.0" + ssri "^10.0.1" treeverse "^3.0.0" - walk-up-path "^4.0.0" + walk-up-path "^3.0.1" -"@npmcli/config@^10.4.5": - version "10.4.5" - resolved "https://registry.yarnpkg.com/@npmcli/config/-/config-10.4.5.tgz#6b5bfe6326d8ffe0c53998ea59b3b338a972a058" - integrity sha512-i3d+ysO0ix+2YGXLxKu44cEe9z47dtUPKbiPLFklDZvp/rJAsLmeWG2Bf6YKuqR8jEhMl/pHw1pGOquJBxvKIA== +"@npmcli/config@^6.4.0": + version "6.4.1" + resolved "https://registry.yarnpkg.com/@npmcli/config/-/config-6.4.1.tgz#006409c739635db008e78bf58c92421cc147911d" + integrity sha512-uSz+elSGzjCMANWa5IlbGczLYPkNI/LeR+cHrgaTqTrTSh9RHhOFA4daD2eRUz6lMtOW+Fnsb+qv7V2Zz8ML0g== dependencies: - "@npmcli/map-workspaces" "^5.0.0" - "@npmcli/package-json" "^7.0.0" + "@npmcli/map-workspaces" "^3.0.2" ci-info "^4.0.0" - ini "^6.0.0" - nopt "^9.0.0" - proc-log "^6.0.0" + ini "^4.1.0" + nopt "^7.0.0" + proc-log "^3.0.0" + read-package-json-fast "^3.0.2" semver "^7.3.5" - walk-up-path "^4.0.0" + walk-up-path "^3.0.1" + +"@npmcli/disparity-colors@^3.0.0": + version "3.0.1" + resolved "https://registry.yarnpkg.com/@npmcli/disparity-colors/-/disparity-colors-3.0.1.tgz#042d5ef548200c81e3ee3a84c994744573fe79fd" + integrity sha512-cOypTz/9IAhaPgOktbDNPeccTU88y8I1ZURbPeC0ooziK1h6dRJs2iGz1eKP1muaeVbow8GqQ0DaxLG8Bpmblw== + dependencies: + ansi-styles "^4.3.0" "@npmcli/fs@^1.0.0": version "1.1.1" @@ -2712,6 +2689,14 @@ "@gar/promisify" "^1.0.1" semver "^7.3.5" +"@npmcli/fs@^2.1.0": + version "2.1.2" + resolved "https://registry.yarnpkg.com/@npmcli/fs/-/fs-2.1.2.tgz#a9e2541a4a2fec2e69c29b35e6060973da79b865" + integrity sha512-yOJKRvohFOaLqipNtwYB9WugyZKhC/DZC4VYPmpaCzDBrA8YpK3qHZ8/HGscMnE4GqbkLNuVcCnxkeQEdGt6LQ== + dependencies: + "@gar/promisify" "^1.1.3" + semver "^7.3.5" + "@npmcli/fs@^3.1.0": version "3.1.0" resolved "https://registry.yarnpkg.com/@npmcli/fs/-/fs-3.1.0.tgz#233d43a25a91d68c3a863ba0da6a3f00924a173e" @@ -2726,12 +2711,19 @@ dependencies: semver "^7.3.5" -"@npmcli/fs@^5.0.0": - version "5.0.0" - resolved "https://registry.yarnpkg.com/@npmcli/fs/-/fs-5.0.0.tgz#674619771907342b3d1ac197aaf1deeb657e3539" - integrity sha512-7OsC1gNORBEawOa5+j2pXN9vsicaIOH5cPXxoR6fJOmH6/EXpJB2CajXOu1fPRFun2m1lktEFX11+P89hqO/og== +"@npmcli/git@^4.0.0", "@npmcli/git@^4.0.1", "@npmcli/git@^4.1.0": + version "4.1.0" + resolved "https://registry.yarnpkg.com/@npmcli/git/-/git-4.1.0.tgz#ab0ad3fd82bc4d8c1351b6c62f0fa56e8fe6afa6" + integrity sha512-9hwoB3gStVfa0N31ymBmrX+GuDGdVA/QWShZVqE0HK2Af+7QGGrCTbZia/SW0ImUTjTne7SP91qxDmtXvDHRPQ== dependencies: + "@npmcli/promise-spawn" "^6.0.0" + lru-cache "^7.4.4" + npm-pick-manifest "^8.0.0" + proc-log "^3.0.0" + promise-inflight "^1.0.1" + promise-retry "^2.0.1" semver "^7.3.5" + which "^3.0.0" "@npmcli/git@^5.0.0": version "5.0.8" @@ -2748,20 +2740,6 @@ semver "^7.3.5" which "^4.0.0" -"@npmcli/git@^7.0.0": - version "7.0.1" - resolved "https://registry.yarnpkg.com/@npmcli/git/-/git-7.0.1.tgz#d1f6462af0e9901536e447beea922bc20dcc5762" - integrity sha512-+XTFxK2jJF/EJJ5SoAzXk3qwIDfvFc5/g+bD274LZ7uY7LE8sTfG6Z8rOanPl2ZEvZWqNvmEdtXC25cE54VcoA== - dependencies: - "@npmcli/promise-spawn" "^9.0.0" - ini "^6.0.0" - lru-cache "^11.2.1" - npm-pick-manifest "^11.0.1" - proc-log "^6.0.0" - promise-retry "^2.0.1" - semver "^7.3.5" - which "^6.0.0" - "@npmcli/installed-package-contents@^2.0.1": version "2.0.2" resolved "https://registry.yarnpkg.com/@npmcli/installed-package-contents/-/installed-package-contents-2.0.2.tgz#bfd817eccd9e8df200919e73f57f9e3d9e4f9e33" @@ -2770,7 +2748,7 @@ npm-bundled "^3.0.0" npm-normalize-package-bin "^3.0.0" -"@npmcli/installed-package-contents@^2.1.0": +"@npmcli/installed-package-contents@^2.0.2", "@npmcli/installed-package-contents@^2.1.0": version "2.1.0" resolved "https://registry.yarnpkg.com/@npmcli/installed-package-contents/-/installed-package-contents-2.1.0.tgz#63048e5f6e40947a3a88dcbcb4fd9b76fdd37c17" integrity sha512-c8UuGLeZpm69BryRykLuKRyKFZYJsZSCT4aVY5ds4omyZqJ172ApzgfKJ5eV/r3HgLdUYgFVe54KSFVjKoe27w== @@ -2778,15 +2756,7 @@ npm-bundled "^3.0.0" npm-normalize-package-bin "^3.0.0" -"@npmcli/installed-package-contents@^4.0.0": - version "4.0.0" - resolved "https://registry.yarnpkg.com/@npmcli/installed-package-contents/-/installed-package-contents-4.0.0.tgz#18e5070704cfe0278f9ae48038558b6efd438426" - integrity sha512-yNyAdkBxB72gtZ4GrwXCM0ZUedo9nIbOMKfGjt6Cu6DXf0p8y1PViZAKDC8q8kv/fufx0WTjRBdSlyrvnP7hmA== - dependencies: - npm-bundled "^5.0.0" - npm-normalize-package-bin "^5.0.0" - -"@npmcli/map-workspaces@^3.0.2": +"@npmcli/map-workspaces@^3.0.2", "@npmcli/map-workspaces@^3.0.4": version "3.0.6" resolved "https://registry.yarnpkg.com/@npmcli/map-workspaces/-/map-workspaces-3.0.6.tgz#27dc06c20c35ef01e45a08909cab9cb3da08cea6" integrity sha512-tkYs0OYnzQm6iIRdfy+LcLBjcKuQCeE5YLb8KnrIlutJfheNaPvPpgoFEyEFgbjzl5PLZ3IA/BWAwRU0eHuQDA== @@ -2796,15 +2766,15 @@ minimatch "^9.0.0" read-package-json-fast "^3.0.0" -"@npmcli/map-workspaces@^5.0.0", "@npmcli/map-workspaces@^5.0.3": - version "5.0.3" - resolved "https://registry.yarnpkg.com/@npmcli/map-workspaces/-/map-workspaces-5.0.3.tgz#5b887ec0b535a2ba64d1d338867326a2b9c041d1" - integrity sha512-o2grssXo1e774E5OtEwwrgoszYRh0lqkJH+Pb9r78UcqdGJRDRfhpM8DvZPjzNLLNYeD/rNbjOKM3Ss5UABROw== +"@npmcli/metavuln-calculator@^5.0.0": + version "5.0.1" + resolved "https://registry.yarnpkg.com/@npmcli/metavuln-calculator/-/metavuln-calculator-5.0.1.tgz#426b3e524c2008bcc82dbc2ef390aefedd643d76" + integrity sha512-qb8Q9wIIlEPj3WeA1Lba91R4ZboPL0uspzV0F9uwP+9AYMVB2zOoa7Pbk12g6D2NHAinSbHh6QYmGuRyHZ874Q== dependencies: - "@npmcli/name-from-folder" "^4.0.0" - "@npmcli/package-json" "^7.0.0" - glob "^13.0.0" - minimatch "^10.0.3" + cacache "^17.0.0" + json-parse-even-better-errors "^3.0.0" + pacote "^15.0.0" + semver "^7.3.5" "@npmcli/metavuln-calculator@^7.1.1": version "7.1.1" @@ -2817,17 +2787,6 @@ proc-log "^4.1.0" semver "^7.3.5" -"@npmcli/metavuln-calculator@^9.0.2", "@npmcli/metavuln-calculator@^9.0.3": - version "9.0.3" - resolved "https://registry.yarnpkg.com/@npmcli/metavuln-calculator/-/metavuln-calculator-9.0.3.tgz#57b330f3fb8ca34db2782ad5349ea4384bed9c96" - integrity sha512-94GLSYhLXF2t2LAC7pDwLaM4uCARzxShyAQKsirmlNcpidH89VA4/+K1LbJmRMgz5gy65E/QBBWQdUvGLe2Frg== - dependencies: - cacache "^20.0.0" - json-parse-even-better-errors "^5.0.0" - pacote "^21.0.0" - proc-log "^6.0.0" - semver "^7.3.5" - "@npmcli/move-file@^1.0.1": version "1.1.2" resolved "https://registry.yarnpkg.com/@npmcli/move-file/-/move-file-1.1.2.tgz#1a82c3e372f7cae9253eb66d72543d6b8685c674" @@ -2836,26 +2795,24 @@ mkdirp "^1.0.4" rimraf "^3.0.2" +"@npmcli/move-file@^2.0.0": + version "2.0.1" + resolved "https://registry.yarnpkg.com/@npmcli/move-file/-/move-file-2.0.1.tgz#26f6bdc379d87f75e55739bab89db525b06100e4" + integrity sha512-mJd2Z5TjYWq/ttPLLGqArdtnC74J6bOzg4rMDnN+p1xTacZ2yPRCk2y0oSWQtygLR9YVQXgOcONrwtnk3JupxQ== + dependencies: + mkdirp "^1.0.4" + rimraf "^3.0.2" + "@npmcli/name-from-folder@^2.0.0": version "2.0.0" resolved "https://registry.yarnpkg.com/@npmcli/name-from-folder/-/name-from-folder-2.0.0.tgz#c44d3a7c6d5c184bb6036f4d5995eee298945815" integrity sha512-pwK+BfEBZJbKdNYpHHRTNBwBoqrN/iIMO0AiGvYsp3Hoaq0WbgGSWQR6SCldZovoDpY3yje5lkFUe6gsDgJ2vg== -"@npmcli/name-from-folder@^4.0.0": - version "4.0.0" - resolved "https://registry.yarnpkg.com/@npmcli/name-from-folder/-/name-from-folder-4.0.0.tgz#b4d516ae4fab5ed4e8e8032abff3488703fc24a3" - integrity sha512-qfrhVlOSqmKM8i6rkNdZzABj8MKEITGFAY+4teqBziksCQAOLutiAxM1wY2BKEd8KjUSpWmWCYxvXr0y4VTlPg== - "@npmcli/node-gyp@^3.0.0": version "3.0.0" resolved "https://registry.yarnpkg.com/@npmcli/node-gyp/-/node-gyp-3.0.0.tgz#101b2d0490ef1aa20ed460e4c0813f0db560545a" integrity sha512-gp8pRXC2oOxu0DUE1/M3bYtb1b3/DbJ5aM113+XJBgfXdussRAsX0YOrOhdd8WvnAR6auDBvJomGAkLKA5ydxA== -"@npmcli/node-gyp@^5.0.0": - version "5.0.0" - resolved "https://registry.yarnpkg.com/@npmcli/node-gyp/-/node-gyp-5.0.0.tgz#35475a58b5d791764a7252231197a14deefe8e47" - integrity sha512-uuG5HZFXLfyFKqg8QypsmgLQW7smiRjVc45bqD/ofZZcR/uxEjgQU8qDPv0s9TEeMUiAAU/GC5bR6++UdTirIQ== - "@npmcli/package-json@5.2.0": version "5.2.0" resolved "https://registry.yarnpkg.com/@npmcli/package-json/-/package-json-5.2.0.tgz#a1429d3111c10044c7efbfb0fce9f2c501f4cfad" @@ -2869,6 +2826,19 @@ proc-log "^4.0.0" semver "^7.5.3" +"@npmcli/package-json@^4.0.0", "@npmcli/package-json@^4.0.1": + version "4.0.1" + resolved "https://registry.yarnpkg.com/@npmcli/package-json/-/package-json-4.0.1.tgz#1a07bf0e086b640500791f6bf245ff43cc27fa37" + integrity sha512-lRCEGdHZomFsURroh522YvA/2cVb9oPIJrjHanCJZkiasz1BzcnLr3tBJhlV7S86MBJBuAQ33is2D60YitZL2Q== + dependencies: + "@npmcli/git" "^4.1.0" + glob "^10.2.2" + hosted-git-info "^6.1.1" + json-parse-even-better-errors "^3.0.0" + normalize-package-data "^5.0.0" + proc-log "^3.0.0" + semver "^7.5.3" + "@npmcli/package-json@^5.0.0", "@npmcli/package-json@^5.1.0": version "5.2.1" resolved "https://registry.yarnpkg.com/@npmcli/package-json/-/package-json-5.2.1.tgz#df69477b1023b81ff8503f2b9db4db4faea567ed" @@ -2882,18 +2852,12 @@ proc-log "^4.0.0" semver "^7.5.3" -"@npmcli/package-json@^7.0.0", "@npmcli/package-json@^7.0.4": - version "7.0.4" - resolved "https://registry.yarnpkg.com/@npmcli/package-json/-/package-json-7.0.4.tgz#f4178e5d90b888f3bdf666915706f613c2d870d7" - integrity sha512-0wInJG3j/K40OJt/33ax47WfWMzZTm6OQxB9cDhTt5huCP2a9g2GnlsxmfN+PulItNPIpPrZ+kfwwUil7eHcZQ== - dependencies: - "@npmcli/git" "^7.0.0" - glob "^13.0.0" - hosted-git-info "^9.0.0" - json-parse-even-better-errors "^5.0.0" - proc-log "^6.0.0" - semver "^7.5.3" - validate-npm-package-license "^3.0.4" +"@npmcli/promise-spawn@^6.0.0", "@npmcli/promise-spawn@^6.0.1", "@npmcli/promise-spawn@^6.0.2": + version "6.0.2" + resolved "https://registry.yarnpkg.com/@npmcli/promise-spawn/-/promise-spawn-6.0.2.tgz#c8bc4fa2bd0f01cb979d8798ba038f314cfa70f2" + integrity sha512-gGq0NJkIGSwdbUt4yhdF8ZrmkGKVz9vAdVzpOfnom+V8PLSmSOVhZwbNvZZS1EYcJN5hzzKBxmmVVAInM6HQLg== + dependencies: + which "^3.0.0" "@npmcli/promise-spawn@^7.0.0": version "7.0.2" @@ -2902,13 +2866,6 @@ dependencies: which "^4.0.0" -"@npmcli/promise-spawn@^9.0.0", "@npmcli/promise-spawn@^9.0.1": - version "9.0.1" - resolved "https://registry.yarnpkg.com/@npmcli/promise-spawn/-/promise-spawn-9.0.1.tgz#20e80cbdd2f24ad263a15de3ebbb1673cb82005b" - integrity sha512-OLUaoqBuyxeTqUvjA3FZFiXUfYC1alp3Sa99gW3EUDz3tZ3CbXDdcZ7qWKBzicrJleIgucoWamWH1saAmH/l2Q== - dependencies: - which "^6.0.0" - "@npmcli/query@^3.1.0": version "3.1.0" resolved "https://registry.yarnpkg.com/@npmcli/query/-/query-3.1.0.tgz#bc202c59e122a06cf8acab91c795edda2cdad42c" @@ -2916,23 +2873,11 @@ dependencies: postcss-selector-parser "^6.0.10" -"@npmcli/query@^5.0.0": - version "5.0.0" - resolved "https://registry.yarnpkg.com/@npmcli/query/-/query-5.0.0.tgz#c8cb9ec42c2ef149077282e948dc068ecc79ee11" - integrity sha512-8TZWfTQOsODpLqo9SVhVjHovmKXNpevHU0gO9e+y4V4fRIOneiXy0u0sMP9LmS71XivrEWfZWg50ReH4WRT4aQ== - dependencies: - postcss-selector-parser "^7.0.0" - "@npmcli/redact@^2.0.0": version "2.0.1" resolved "https://registry.yarnpkg.com/@npmcli/redact/-/redact-2.0.1.tgz#95432fd566e63b35c04494621767a4312c316762" integrity sha512-YgsR5jCQZhVmTJvjduTOIHph0L73pK8xwMVaDY0PatySqVM9AZj93jpoXYSJqfHFxFkN9dmqTw6OiqExsS3LPw== -"@npmcli/redact@^4.0.0": - version "4.0.0" - resolved "https://registry.yarnpkg.com/@npmcli/redact/-/redact-4.0.0.tgz#c91121e02b7559a997614a2c1057cd7fc67608c4" - integrity sha512-gOBg5YHMfZy+TfHArfVogwgfBeQnKbbGo3pSUyK/gSI0AVu+pEiDVcKlQb0D8Mg1LNRZILZ6XG8I5dJ4KuAd9Q== - "@npmcli/run-script@8.1.0", "@npmcli/run-script@^8.0.0", "@npmcli/run-script@^8.1.0": version "8.1.0" resolved "https://registry.yarnpkg.com/@npmcli/run-script/-/run-script-8.1.0.tgz#a563e5e29b1ca4e648a6b1bbbfe7220b4bfe39fc" @@ -2945,17 +2890,16 @@ proc-log "^4.0.0" which "^4.0.0" -"@npmcli/run-script@^10.0.0", "@npmcli/run-script@^10.0.3": - version "10.0.3" - resolved "https://registry.yarnpkg.com/@npmcli/run-script/-/run-script-10.0.3.tgz#85c16cd893e44cad5edded441b002d8a1d3a8a8e" - integrity sha512-ER2N6itRkzWbbtVmZ9WKaWxVlKlOeBFF1/7xx+KA5J1xKa4JjUwBdb6tDpk0v1qA+d+VDwHI9qmLcXSWcmi+Rw== +"@npmcli/run-script@^6.0.0", "@npmcli/run-script@^6.0.2": + version "6.0.2" + resolved "https://registry.yarnpkg.com/@npmcli/run-script/-/run-script-6.0.2.tgz#a25452d45ee7f7fb8c16dfaf9624423c0c0eb885" + integrity sha512-NCcr1uQo1k5U+SYlnIrbAh3cxy+OQT1VtqiAbxdymSlptbzBb62AjH2xXgjNCoP073hoa1CfCAcwoZ8k96C4nA== dependencies: - "@npmcli/node-gyp" "^5.0.0" - "@npmcli/package-json" "^7.0.0" - "@npmcli/promise-spawn" "^9.0.0" - node-gyp "^12.1.0" - proc-log "^6.0.0" - which "^6.0.0" + "@npmcli/node-gyp" "^3.0.0" + "@npmcli/promise-spawn" "^6.0.0" + node-gyp "^9.0.0" + read-package-json-fast "^3.0.0" + which "^3.0.0" "@nuxtjs/opencollective@0.3.2": version "0.3.2" @@ -3121,10 +3065,18 @@ resolved "https://registry.yarnpkg.com/@octokit/auth-token/-/auth-token-4.0.0.tgz#40d203ea827b9f17f42a29c6afb93b7745ef80c7" integrity sha512-tY/msAuJo6ARbK6SPIxZrPBms3xPbfwBrulZe0Wtr/DIY9lje2HeV1uoebShn6mx7SjCHif6EjMvoREj+gZ+SA== -"@octokit/auth-token@^6.0.0": - version "6.0.0" - resolved "https://registry.yarnpkg.com/@octokit/auth-token/-/auth-token-6.0.0.tgz#b02e9c08a2d8937df09a2a981f226ad219174c53" - integrity sha512-P4YJBPdPSpWTQ1NU4XYdvHvXJJDxM6YwpS0FZHRgP7YFkdVxsWcpWGy/NVqlAA7PcPCnMacXlRm1y2PFZRWL/w== +"@octokit/core@^5.0.0": + version "5.2.2" + resolved "https://registry.yarnpkg.com/@octokit/core/-/core-5.2.2.tgz#252805732de9b4e8e4f658d34b80c4c9b2534761" + integrity sha512-/g2d4sW9nUDJOMz3mabVQvOGhVa4e/BN/Um7yca9Bb2XTzPPnfTWHWQg+IsEYO7M3Vx+EXvaM/I2pJWIMun1bg== + dependencies: + "@octokit/auth-token" "^4.0.0" + "@octokit/graphql" "^7.1.0" + "@octokit/request" "^8.4.1" + "@octokit/request-error" "^5.1.1" + "@octokit/types" "^13.0.0" + before-after-hook "^2.2.0" + universal-user-agent "^6.0.0" "@octokit/core@^5.0.2": version "5.2.1" @@ -3139,27 +3091,6 @@ before-after-hook "^2.2.0" universal-user-agent "^6.0.0" -"@octokit/core@^7.0.0": - version "7.0.6" - resolved "https://registry.yarnpkg.com/@octokit/core/-/core-7.0.6.tgz#0d58704391c6b681dec1117240ea4d2a98ac3916" - integrity sha512-DhGl4xMVFGVIyMwswXeyzdL4uXD5OGILGX5N8Y+f6W7LhC1Ze2poSNrkF/fedpVDHEEZ+PHFW0vL14I+mm8K3Q== - dependencies: - "@octokit/auth-token" "^6.0.0" - "@octokit/graphql" "^9.0.3" - "@octokit/request" "^10.0.6" - "@octokit/request-error" "^7.0.2" - "@octokit/types" "^16.0.0" - before-after-hook "^4.0.0" - universal-user-agent "^7.0.0" - -"@octokit/endpoint@^11.0.2": - version "11.0.2" - resolved "https://registry.yarnpkg.com/@octokit/endpoint/-/endpoint-11.0.2.tgz#a8d955e053a244938b81d86cd73efd2dcb5ef5af" - integrity sha512-4zCpzP1fWc7QlqunZ5bSEjxc6yLAlRTnDwKtgXfcI/FxxGoqedDG8V2+xJ60bV2kODqcGB+nATdtap/XYq2NZQ== - dependencies: - "@octokit/types" "^16.0.0" - universal-user-agent "^7.0.2" - "@octokit/endpoint@^9.0.6": version "9.0.6" resolved "https://registry.yarnpkg.com/@octokit/endpoint/-/endpoint-9.0.6.tgz#114d912108fe692d8b139cfe7fc0846dfd11b6c0" @@ -3177,25 +3108,16 @@ "@octokit/types" "^13.0.0" universal-user-agent "^6.0.0" -"@octokit/graphql@^9.0.3": - version "9.0.3" - resolved "https://registry.yarnpkg.com/@octokit/graphql/-/graphql-9.0.3.tgz#5b8341c225909e924b466705c13477face869456" - integrity sha512-grAEuupr/C1rALFnXTv6ZQhFuL1D8G5y8CN04RgrO4FIPMrtm+mcZzFG7dcBm+nq+1ppNixu+Jd78aeJOYxlGA== - dependencies: - "@octokit/request" "^10.0.6" - "@octokit/types" "^16.0.0" - universal-user-agent "^7.0.0" +"@octokit/openapi-types@^20.0.0": + version "20.0.0" + resolved "https://registry.yarnpkg.com/@octokit/openapi-types/-/openapi-types-20.0.0.tgz#9ec2daa0090eeb865ee147636e0c00f73790c6e5" + integrity sha512-EtqRBEjp1dL/15V7WiX5LJMIxxkdiGJnabzYx5Apx4FkQIFgAfKumXeYAqqJCj1s+BMX4cPFIFC4OLCR6stlnA== "@octokit/openapi-types@^24.2.0": version "24.2.0" resolved "https://registry.yarnpkg.com/@octokit/openapi-types/-/openapi-types-24.2.0.tgz#3d55c32eac0d38da1a7083a9c3b0cca77924f7d3" integrity sha512-9sIH3nSUttelJSXUrmGzl7QUBFul0/mB8HRYl3fOlgHbIWG+WnYDXU3v/2zMtAvuzZ/ed00Ei6on975FhBfzrg== -"@octokit/openapi-types@^27.0.0": - version "27.0.0" - resolved "https://registry.yarnpkg.com/@octokit/openapi-types/-/openapi-types-27.0.0.tgz#374ea53781965fd02a9d36cacb97e152cefff12d" - integrity sha512-whrdktVs1h6gtR+09+QsNk2+FO+49j6ga1c55YZudfEG+oKJVvJLQi3zkOm5JjiUXAagWK2tI2kTGKJ2Ys7MGA== - "@octokit/plugin-enterprise-rest@6.0.1": version "6.0.1" resolved "https://registry.yarnpkg.com/@octokit/plugin-enterprise-rest/-/plugin-enterprise-rest-6.0.1.tgz#e07896739618dab8da7d4077c658003775f95437" @@ -3208,12 +3130,12 @@ dependencies: "@octokit/types" "^13.7.0" -"@octokit/plugin-paginate-rest@^14.0.0": - version "14.0.0" - resolved "https://registry.yarnpkg.com/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-14.0.0.tgz#44dc9fff2dacb148d4c5c788b573ddc044503026" - integrity sha512-fNVRE7ufJiAA3XUrha2omTA39M6IXIc6GIZLvlbsm8QOQCYvpq/LkMNGyFlB1d8hTDzsAXa3OKtybdMAYsV/fw== +"@octokit/plugin-paginate-rest@^9.0.0": + version "9.2.2" + resolved "https://registry.yarnpkg.com/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-9.2.2.tgz#c516bc498736bcdaa9095b9a1d10d9d0501ae831" + integrity sha512-u3KYkGF7GcZnSD/3UP0S7K5XUFT2FkOQdcfXZGZQPGv3lm4F2Xbf71lvjldr8c1H3nNbF+33cLEkWYbokGWqiQ== dependencies: - "@octokit/types" "^16.0.0" + "@octokit/types" "^12.6.0" "@octokit/plugin-request-log@^4.0.0": version "4.0.1" @@ -3227,24 +3149,24 @@ dependencies: "@octokit/types" "^13.8.0" -"@octokit/plugin-retry@^8.0.0": - version "8.0.3" - resolved "https://registry.yarnpkg.com/@octokit/plugin-retry/-/plugin-retry-8.0.3.tgz#8b7af9700272df724d12fd6333ead98961d135c6" - integrity sha512-vKGx1i3MC0za53IzYBSBXcrhmd+daQDzuZfYDd52X5S0M2otf3kVZTVP8bLA3EkU0lTvd1WEC2OlNNa4G+dohA== +"@octokit/plugin-retry@^6.0.0": + version "6.1.0" + resolved "https://registry.yarnpkg.com/@octokit/plugin-retry/-/plugin-retry-6.1.0.tgz#cf5b92223246327ca9c7e17262b93ffde028ab0a" + integrity sha512-WrO3bvq4E1Xh1r2mT9w6SDFg01gFmP81nIG77+p/MqW1JeXXgL++6umim3t6x0Zj5pZm3rXAN+0HEjmmdhIRig== dependencies: - "@octokit/request-error" "^7.0.2" - "@octokit/types" "^16.0.0" + "@octokit/request-error" "^5.0.0" + "@octokit/types" "^13.0.0" bottleneck "^2.15.3" -"@octokit/plugin-throttling@^11.0.0": - version "11.0.3" - resolved "https://registry.yarnpkg.com/@octokit/plugin-throttling/-/plugin-throttling-11.0.3.tgz#584b1a9ca73a5daafeeb7dd5cc13a1bd29a6a60d" - integrity sha512-34eE0RkFCKycLl2D2kq7W+LovheM/ex3AwZCYN8udpi6bxsyjZidb2McXs69hZhLmJlDqTSP8cH+jSRpiaijBg== +"@octokit/plugin-throttling@^8.0.0": + version "8.2.0" + resolved "https://registry.yarnpkg.com/@octokit/plugin-throttling/-/plugin-throttling-8.2.0.tgz#9ec3ea2e37b92fac63f06911d0c8141b46dc4941" + integrity sha512-nOpWtLayKFpgqmgD0y3GqXafMFuKcA4tRPZIfu7BArd2lEZeb1988nhWhwx4aZWmjDmUfdgVf7W+Tt4AmvRmMQ== dependencies: - "@octokit/types" "^16.0.0" + "@octokit/types" "^12.2.0" bottleneck "^2.15.3" -"@octokit/request-error@^5.1.1": +"@octokit/request-error@^5.0.0", "@octokit/request-error@^5.1.1": version "5.1.1" resolved "https://registry.yarnpkg.com/@octokit/request-error/-/request-error-5.1.1.tgz#b9218f9c1166e68bb4d0c89b638edc62c9334805" integrity sha512-v9iyEQJH6ZntoENr9/yXxjuezh4My67CBSu9r6Ve/05Iu5gNgnisNWOsoJHTP6k0Rr0+HQIpnH+kyammu90q/g== @@ -3253,24 +3175,6 @@ deprecation "^2.0.0" once "^1.4.0" -"@octokit/request-error@^7.0.2": - version "7.1.0" - resolved "https://registry.yarnpkg.com/@octokit/request-error/-/request-error-7.1.0.tgz#440fa3cae310466889778f5a222b47a580743638" - integrity sha512-KMQIfq5sOPpkQYajXHwnhjCC0slzCNScLHs9JafXc4RAJI+9f+jNDlBNaIMTvazOPLgb4BnlhGJOTbnN0wIjPw== - dependencies: - "@octokit/types" "^16.0.0" - -"@octokit/request@^10.0.6": - version "10.0.7" - resolved "https://registry.yarnpkg.com/@octokit/request/-/request-10.0.7.tgz#93f619914c523750a85e7888de983e1009eb03f6" - integrity sha512-v93h0i1yu4idj8qFPZwjehoJx4j3Ntn+JhXsdJrG9pYaX6j/XRz2RmasMUHtNgQD39nrv/VwTWSqK0RNXR8upA== - dependencies: - "@octokit/endpoint" "^11.0.2" - "@octokit/request-error" "^7.0.2" - "@octokit/types" "^16.0.0" - fast-content-type-parse "^3.0.0" - universal-user-agent "^7.0.2" - "@octokit/request@^8.4.1": version "8.4.1" resolved "https://registry.yarnpkg.com/@octokit/request/-/request-8.4.1.tgz#715a015ccf993087977ea4365c44791fc4572486" @@ -3291,6 +3195,13 @@ "@octokit/plugin-request-log" "^4.0.0" "@octokit/plugin-rest-endpoint-methods" "13.3.2-cjs.1" +"@octokit/types@^12.2.0", "@octokit/types@^12.6.0": + version "12.6.0" + resolved "https://registry.yarnpkg.com/@octokit/types/-/types-12.6.0.tgz#8100fb9eeedfe083aae66473bd97b15b62aedcb2" + integrity sha512-1rhSOfRa6H9w4YwK0yrf5faDaDTb+yLyBUKOCV4xtCDB5VmIPqd/v9yr9o6SAzOAlRxMiRiCic6JVM1/kunVkw== + dependencies: + "@octokit/openapi-types" "^20.0.0" + "@octokit/types@^13.0.0", "@octokit/types@^13.1.0", "@octokit/types@^13.7.0", "@octokit/types@^13.8.0": version "13.10.0" resolved "https://registry.yarnpkg.com/@octokit/types/-/types-13.10.0.tgz#3e7c6b19c0236c270656e4ea666148c2b51fd1a3" @@ -3298,13 +3209,6 @@ dependencies: "@octokit/openapi-types" "^24.2.0" -"@octokit/types@^16.0.0": - version "16.0.0" - resolved "https://registry.yarnpkg.com/@octokit/types/-/types-16.0.0.tgz#fbd7fa590c2ef22af881b1d79758bfaa234dbb7c" - integrity sha512-sKq+9r1Mm4efXW1FCk7hFSeJo4QKreL/tTbR0rz/qx/r1Oa2VV83LTA/H/MuCOX7uCIJmQVRKBcbmWoySjAnSg== - dependencies: - "@octokit/openapi-types" "^27.0.0" - "@paralleldrive/cuid2@2.2.2", "@paralleldrive/cuid2@^2.2.2": version "2.2.2" resolved "https://registry.yarnpkg.com/@paralleldrive/cuid2/-/cuid2-2.2.2.tgz#7f91364d53b89e2c9cb9e02e8dd0f129e834455f" @@ -3312,6 +3216,11 @@ dependencies: "@noble/hashes" "^1.1.5" +"@pkgjs/parseargs@^0.11.0": + version "0.11.0" + resolved "https://registry.yarnpkg.com/@pkgjs/parseargs/-/parseargs-0.11.0.tgz#a77ea742fab25775145434eb1d2328cf5013ac33" + integrity sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg== + "@pnpm/config.env-replace@^1.1.0": version "1.1.0" resolved "https://registry.yarnpkg.com/@pnpm/config.env-replace/-/config.env-replace-1.1.0.tgz#ab29da53df41e8948a00f2433f085f54de8b3a4c" @@ -3361,11 +3270,6 @@ resolved "https://registry.yarnpkg.com/@rtsao/scc/-/scc-1.1.0.tgz#927dd2fae9bc3361403ac2c7a00c32ddce9ad7e8" integrity sha512-zt6OdqaDoOnJ1ZYsCYGt9YmWzDXl4vQdKTyJev62gFhRGKdx7mcT54V9KIjg+d2wi9EXsPvAPKe7i7WjfVWB8g== -"@sec-ant/readable-stream@^0.4.1": - version "0.4.1" - resolved "https://registry.yarnpkg.com/@sec-ant/readable-stream/-/readable-stream-0.4.1.tgz#60de891bb126abfdc5410fdc6166aca065f10a0c" - integrity sha512-831qok9r2t8AlxLko40y2ebgSDhenenCatLVeW/uBtnHPyhHOvG0C7TvfgecV+wHzIm5KUICgzmVpWS+IMEAeg== - "@semantic-release/changelog@^6.0.3": version "6.0.3" resolved "https://registry.yarnpkg.com/@semantic-release/changelog/-/changelog-6.0.3.tgz#6195630ecbeccad174461de727d5f975abc23eeb" @@ -3376,17 +3280,16 @@ fs-extra "^11.0.0" lodash "^4.17.4" -"@semantic-release/commit-analyzer@^13.0.1": - version "13.0.1" - resolved "https://registry.yarnpkg.com/@semantic-release/commit-analyzer/-/commit-analyzer-13.0.1.tgz#d84b599c3fef623ccc01f0cc2025eb56a57d8feb" - integrity sha512-wdnBPHKkr9HhNhXOhZD5a2LNl91+hs8CC2vsAVYxtZH3y0dV3wKn+uZSN61rdJQZ8EGxzWB3inWocBHV9+u/CQ== +"@semantic-release/commit-analyzer@^10.0.0": + version "10.0.4" + resolved "https://registry.yarnpkg.com/@semantic-release/commit-analyzer/-/commit-analyzer-10.0.4.tgz#e2770f341b75d8f19fe6b5b833e8c2e0de2b84de" + integrity sha512-pFGn99fn8w4/MHE0otb2A/l5kxgOuxaaauIh4u30ncoTJuqWj4hXTgEJ03REqjS+w1R2vPftSsO26WC61yOcpw== dependencies: - conventional-changelog-angular "^8.0.0" - conventional-changelog-writer "^8.0.0" - conventional-commits-filter "^5.0.0" - conventional-commits-parser "^6.0.0" + conventional-changelog-angular "^6.0.0" + conventional-commits-filter "^3.0.0" + conventional-commits-parser "^5.0.0" debug "^4.0.0" - import-from-esm "^2.0.0" + import-from "^4.0.0" lodash-es "^4.17.21" micromatch "^4.0.2" @@ -3419,65 +3322,62 @@ micromatch "^4.0.0" p-reduce "^2.0.0" -"@semantic-release/github@^12.0.0": - version "12.0.2" - resolved "https://registry.yarnpkg.com/@semantic-release/github/-/github-12.0.2.tgz#bc1f76e9cd386c5b01a20c3f0606e8eec6b1b93a" - integrity sha512-qyqLS+aSGH1SfXIooBKjs7mvrv0deg8v+jemegfJg1kq6ji+GJV8CO08VJDEsvjp3O8XJmTTIAjjZbMzagzsdw== +"@semantic-release/github@^9.0.0": + version "9.2.6" + resolved "https://registry.yarnpkg.com/@semantic-release/github/-/github-9.2.6.tgz#0b0b00ab3ab0486cd3aecb4ae2f9f9cf2edd8eae" + integrity sha512-shi+Lrf6exeNZF+sBhK+P011LSbhmIAoUEgEY6SsxF8irJ+J2stwI5jkyDQ+4gzYyDImzV6LCKdYB9FXnQRWKA== dependencies: - "@octokit/core" "^7.0.0" - "@octokit/plugin-paginate-rest" "^14.0.0" - "@octokit/plugin-retry" "^8.0.0" - "@octokit/plugin-throttling" "^11.0.0" + "@octokit/core" "^5.0.0" + "@octokit/plugin-paginate-rest" "^9.0.0" + "@octokit/plugin-retry" "^6.0.0" + "@octokit/plugin-throttling" "^8.0.0" "@semantic-release/error" "^4.0.0" aggregate-error "^5.0.0" debug "^4.3.4" dir-glob "^3.0.1" + globby "^14.0.0" http-proxy-agent "^7.0.0" https-proxy-agent "^7.0.0" - issue-parser "^7.0.0" + issue-parser "^6.0.0" lodash-es "^4.17.21" mime "^4.0.0" p-filter "^4.0.0" - tinyglobby "^0.2.14" - undici "^7.0.0" url-join "^5.0.0" -"@semantic-release/npm@^13.1.1": - version "13.1.3" - resolved "https://registry.yarnpkg.com/@semantic-release/npm/-/npm-13.1.3.tgz#f75bc82e005fcb859932461bfc5583746a31f6c1" - integrity sha512-q7zreY8n9V0FIP1Cbu63D+lXtRAVAIWb30MH5U3TdrfXt6r2MIrWCY0whAImN53qNvSGp0Zt07U95K+Qp9GpEg== +"@semantic-release/npm@^10.0.2": + version "10.0.6" + resolved "https://registry.yarnpkg.com/@semantic-release/npm/-/npm-10.0.6.tgz#1c47a77e79464586fa1c67f148567ef2b9fda315" + integrity sha512-DyqHrGE8aUyapA277BB+4kV0C4iMHh3sHzUWdf0jTgp5NNJxVUz76W1f57FB64Ue03him3CBXxFqQD2xGabxow== dependencies: - "@actions/core" "^2.0.0" "@semantic-release/error" "^4.0.0" aggregate-error "^5.0.0" - env-ci "^11.2.0" - execa "^9.0.0" + execa "^8.0.0" fs-extra "^11.0.0" lodash-es "^4.17.21" nerf-dart "^1.0.0" normalize-url "^8.0.0" - npm "^11.6.2" + npm "^9.5.0" rc "^1.2.8" - read-pkg "^10.0.0" + read-pkg "^8.0.0" registry-auth-token "^5.0.0" semver "^7.1.2" tempy "^3.0.0" -"@semantic-release/release-notes-generator@^14.1.0": - version "14.1.0" - resolved "https://registry.yarnpkg.com/@semantic-release/release-notes-generator/-/release-notes-generator-14.1.0.tgz#ac47bd214b48130e71578d9acefb1b1272854070" - integrity sha512-CcyDRk7xq+ON/20YNR+1I/jP7BYKICr1uKd1HHpROSnnTdGqOTburi4jcRiTYz0cpfhxSloQO3cGhnoot7IEkA== +"@semantic-release/release-notes-generator@^11.0.0": + version "11.0.7" + resolved "https://registry.yarnpkg.com/@semantic-release/release-notes-generator/-/release-notes-generator-11.0.7.tgz#2193b8aa6b8b40297b6cbc5156bc9a7e5cdb9bbd" + integrity sha512-T09QB9ImmNx7Q6hY6YnnEbw/rEJ6a+22LBxfZq+pSAXg/OL/k0siwEm5cK4k1f9dE2Z2mPIjJKKohzUm0jbxcQ== dependencies: - conventional-changelog-angular "^8.0.0" - conventional-changelog-writer "^8.0.0" - conventional-commits-filter "^5.0.0" - conventional-commits-parser "^6.0.0" + conventional-changelog-angular "^6.0.0" + conventional-changelog-writer "^6.0.0" + conventional-commits-filter "^4.0.0" + conventional-commits-parser "^5.0.0" debug "^4.0.0" get-stream "^7.0.0" - import-from-esm "^2.0.0" + import-from "^4.0.0" into-stream "^7.0.0" lodash-es "^4.17.21" - read-package-up "^11.0.0" + read-pkg-up "^10.0.0" "@semrel-extra/topo@^1.14.0": version "1.14.1" @@ -3548,6 +3448,13 @@ resolved "https://registry.yarnpkg.com/@sideway/pinpoint/-/pinpoint-2.0.0.tgz#cff8ffadc372ad29fd3f78277aeb29e632cc70df" integrity sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ== +"@sigstore/bundle@^1.1.0": + version "1.1.0" + resolved "https://registry.yarnpkg.com/@sigstore/bundle/-/bundle-1.1.0.tgz#17f8d813b09348b16eeed66a8cf1c3d6bd3d04f1" + integrity sha512-PFutXEy0SmQxYI4texPw3dd2KewuNqv7OuK1ZFtY2fM754yhvG2KdgwIhRnoEE2uHdtdGNQ8s0lb94dW9sELog== + dependencies: + "@sigstore/protobuf-specs" "^0.2.0" + "@sigstore/bundle@^2.3.2": version "2.3.2" resolved "https://registry.yarnpkg.com/@sigstore/bundle/-/bundle-2.3.2.tgz#ad4dbb95d665405fd4a7a02c8a073dbd01e4e95e" @@ -3555,32 +3462,29 @@ dependencies: "@sigstore/protobuf-specs" "^0.3.2" -"@sigstore/bundle@^4.0.0": - version "4.0.0" - resolved "https://registry.yarnpkg.com/@sigstore/bundle/-/bundle-4.0.0.tgz#854eda43eb6a59352037e49000177c8904572f83" - integrity sha512-NwCl5Y0V6Di0NexvkTqdoVfmjTaQwoLM236r89KEojGmq/jMls8S+zb7yOwAPdXvbwfKDlP+lmXgAL4vKSQT+A== - dependencies: - "@sigstore/protobuf-specs" "^0.5.0" - "@sigstore/core@^1.0.0", "@sigstore/core@^1.1.0": version "1.1.0" resolved "https://registry.yarnpkg.com/@sigstore/core/-/core-1.1.0.tgz#5583d8f7ffe599fa0a89f2bf289301a5af262380" integrity sha512-JzBqdVIyqm2FRQCulY6nbQzMpJJpSiJ8XXWMhtOX9eKgaXXpfNOF53lzQEjIydlStnd/eFtuC1dW4VYdD93oRg== -"@sigstore/core@^3.1.0": - version "3.1.0" - resolved "https://registry.yarnpkg.com/@sigstore/core/-/core-3.1.0.tgz#b418de73f56333ad9e369b915173d8c98e9b96d5" - integrity sha512-o5cw1QYhNQ9IroioJxpzexmPjfCe7gzafd2RY3qnMpxr4ZEja+Jad/U8sgFpaue6bOaF+z7RVkyKVV44FN+N8A== +"@sigstore/protobuf-specs@^0.2.0": + version "0.2.1" + resolved "https://registry.yarnpkg.com/@sigstore/protobuf-specs/-/protobuf-specs-0.2.1.tgz#be9ef4f3c38052c43bd399d3f792c97ff9e2277b" + integrity sha512-XTWVxnWJu+c1oCshMLwnKvz8ZQJJDVOlciMfgpJBQbThVjKTCG8dwyhgLngBD2KN0ap9F/gOV8rFDEx8uh7R2A== "@sigstore/protobuf-specs@^0.3.2": version "0.3.3" resolved "https://registry.yarnpkg.com/@sigstore/protobuf-specs/-/protobuf-specs-0.3.3.tgz#7dd46d68b76c322873a2ef7581ed955af6f4dcde" integrity sha512-RpacQhBlwpBWd7KEJsRKcBQalbV28fvkxwTOJIqhIuDysMMaJW47V4OqW30iJB9uRpqOSxxEAQFdr8tTattReQ== -"@sigstore/protobuf-specs@^0.5.0": - version "0.5.0" - resolved "https://registry.yarnpkg.com/@sigstore/protobuf-specs/-/protobuf-specs-0.5.0.tgz#e5f029edcb3a4329853a09b603011e61043eb005" - integrity sha512-MM8XIwUjN2bwvCg1QvrMtbBmpcSHrkhFSCu1D11NyPvDQ25HEc4oG5/OcQfd/Tlf/OxmKWERDj0zGE23jQaMwA== +"@sigstore/sign@^1.0.0": + version "1.0.0" + resolved "https://registry.yarnpkg.com/@sigstore/sign/-/sign-1.0.0.tgz#6b08ebc2f6c92aa5acb07a49784cb6738796f7b4" + integrity sha512-INxFVNQteLtcfGmcoldzV6Je0sbbfh9I16DM4yJPw3j5+TFP8X6uIiA18mvpEa9yyeycAKgPmOA3X9hVdVTPUA== + dependencies: + "@sigstore/bundle" "^1.1.0" + "@sigstore/protobuf-specs" "^0.2.0" + make-fetch-happen "^11.0.1" "@sigstore/sign@^2.3.2": version "2.3.2" @@ -3594,17 +3498,13 @@ proc-log "^4.2.0" promise-retry "^2.0.1" -"@sigstore/sign@^4.1.0": - version "4.1.0" - resolved "https://registry.yarnpkg.com/@sigstore/sign/-/sign-4.1.0.tgz#63df15a137337b29f463a1d1c51e1f7d4c1db2f1" - integrity sha512-Vx1RmLxLGnSUqx/o5/VsCjkuN5L7y+vxEEwawvc7u+6WtX2W4GNa7b9HEjmcRWohw/d6BpATXmvOwc78m+Swdg== - dependencies: - "@sigstore/bundle" "^4.0.0" - "@sigstore/core" "^3.1.0" - "@sigstore/protobuf-specs" "^0.5.0" - make-fetch-happen "^15.0.3" - proc-log "^6.1.0" - promise-retry "^2.0.1" +"@sigstore/tuf@^1.0.3": + version "1.0.3" + resolved "https://registry.yarnpkg.com/@sigstore/tuf/-/tuf-1.0.3.tgz#2a65986772ede996485728f027b0514c0b70b160" + integrity sha512-2bRovzs0nJZFlCN3rXirE4gwxCn97JNjMmwpecqlbgV9WcxX7WRuIrgzx/X7Ib7MYRbyUTpBYE0s2x6AmZXnlg== + dependencies: + "@sigstore/protobuf-specs" "^0.2.0" + tuf-js "^1.1.7" "@sigstore/tuf@^2.3.4": version "2.3.4" @@ -3614,14 +3514,6 @@ "@sigstore/protobuf-specs" "^0.3.2" tuf-js "^2.2.1" -"@sigstore/tuf@^4.0.0", "@sigstore/tuf@^4.0.1": - version "4.0.1" - resolved "https://registry.yarnpkg.com/@sigstore/tuf/-/tuf-4.0.1.tgz#9b080390936d79ea3b6a893b64baf3123e92d6d3" - integrity sha512-OPZBg8y5Vc9yZjmWCHrlWPMBqW5yd8+wFNl+thMdtcWz3vjVSoJQutF8YkrzI0SLGnkuFof4HSsWUhXrf219Lw== - dependencies: - "@sigstore/protobuf-specs" "^0.5.0" - tuf-js "^4.1.0" - "@sigstore/verify@^1.2.1": version "1.2.1" resolved "https://registry.yarnpkg.com/@sigstore/verify/-/verify-1.2.1.tgz#c7e60241b432890dcb8bd8322427f6062ef819e1" @@ -3631,29 +3523,15 @@ "@sigstore/core" "^1.1.0" "@sigstore/protobuf-specs" "^0.3.2" -"@sigstore/verify@^3.1.0": - version "3.1.0" - resolved "https://registry.yarnpkg.com/@sigstore/verify/-/verify-3.1.0.tgz#4046d4186421db779501fe87fa5acaa5d4d21b08" - integrity sha512-mNe0Iigql08YupSOGv197YdHpPPr+EzDZmfCgMc7RPNaZTw5aLN01nBl6CHJOh3BGtnMIj83EeN4butBchc8Ag== - dependencies: - "@sigstore/bundle" "^4.0.0" - "@sigstore/core" "^3.1.0" - "@sigstore/protobuf-specs" "^0.5.0" - "@sinclair/typebox@^0.27.8": version "0.27.8" resolved "https://registry.yarnpkg.com/@sinclair/typebox/-/typebox-0.27.8.tgz#6667fac16c436b5434a387a34dedb013198f6e6e" integrity sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA== -"@sindresorhus/is@^4.6.0": - version "4.6.0" - resolved "https://registry.yarnpkg.com/@sindresorhus/is/-/is-4.6.0.tgz#3c7c9c46e678feefe7a2e5bb609d3dbd665ffb3f" - integrity sha512-t09vSN3MdfsyCHoFcTRCH/iUtG7OJ0CsjzB8cjAmKc/va/kIgeDI/TxsigdncE/4be734m0cvIYwNaV4i2XqAw== - -"@sindresorhus/merge-streams@^4.0.0": - version "4.0.0" - resolved "https://registry.yarnpkg.com/@sindresorhus/merge-streams/-/merge-streams-4.0.0.tgz#abb11d99aeb6d27f1b563c38147a72d50058e339" - integrity sha512-tlqY9xq5ukxTUZBmoOp+m61cqwQD5pHJtFY3Mn8CA8ps6yghLH/Hw8UPdqg4OLmFW3IFlcXnQNmo/dh8HzXYIQ== +"@sindresorhus/merge-streams@^2.1.0": + version "2.3.0" + resolved "https://registry.yarnpkg.com/@sindresorhus/merge-streams/-/merge-streams-2.3.0.tgz#719df7fb41766bc143369eaa0dd56d8dc87c9958" + integrity sha512-LtoMMhxAlorcGhmFYI+LhPgbPZCkgP6ra1YL604EeF6U98pLlQ3iWIGMdWSC+vWmPBWBNgmDBAhnAobLROJmwg== "@sinonjs/commons@^3.0.0": version "3.0.0" @@ -4204,11 +4082,24 @@ resolved "https://registry.yarnpkg.com/@tsconfig/node16/-/node16-1.0.4.tgz#0b92dcc0cc1c81f6f306a381f28e31b1a56536e9" integrity sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA== +"@tufjs/canonical-json@1.0.0": + version "1.0.0" + resolved "https://registry.yarnpkg.com/@tufjs/canonical-json/-/canonical-json-1.0.0.tgz#eade9fd1f537993bc1f0949f3aea276ecc4fab31" + integrity sha512-QTnf++uxunWvG2z3UFNzAoQPHxnSXOwtaI3iJ+AohhV+5vONuArPjJE7aPXPVXfXJsqrVbZBu9b81AJoSd09IQ== + "@tufjs/canonical-json@2.0.0": version "2.0.0" resolved "https://registry.yarnpkg.com/@tufjs/canonical-json/-/canonical-json-2.0.0.tgz#a52f61a3d7374833fca945b2549bc30a2dd40d0a" integrity sha512-yVtV8zsdo8qFHe+/3kw81dSLyF7D576A5cCFCi4X7B39tWT7SekaEFUnvnWJHz+9qO7qJTah1JbrDjWKqFtdWA== +"@tufjs/models@1.0.4": + version "1.0.4" + resolved "https://registry.yarnpkg.com/@tufjs/models/-/models-1.0.4.tgz#5a689630f6b9dbda338d4b208019336562f176ef" + integrity sha512-qaGV9ltJP0EO25YfFUPhxRVK0evXFIAGicsVXuRim4Ed9cjPxYhNnNJ49SFmbeLgtxpslIkX317IgpfcHPVj/A== + dependencies: + "@tufjs/canonical-json" "1.0.0" + minimatch "^9.0.0" + "@tufjs/models@2.0.1": version "2.0.1" resolved "https://registry.yarnpkg.com/@tufjs/models/-/models-2.0.1.tgz#e429714e753b6c2469af3212e7f320a6973c2812" @@ -4217,14 +4108,6 @@ "@tufjs/canonical-json" "2.0.0" minimatch "^9.0.4" -"@tufjs/models@4.1.0": - version "4.1.0" - resolved "https://registry.yarnpkg.com/@tufjs/models/-/models-4.1.0.tgz#494b39cf5e2f6855d80031246dd236d8086069b3" - integrity sha512-Y8cK9aggNRsqJVaKUlEYs4s7CvQ1b1ta2DVPyAimb0I2qhzjNk+A+mxvll/klL0RlfuIUei8BF7YWiua4kQqww== - dependencies: - "@tufjs/canonical-json" "2.0.0" - minimatch "^10.1.1" - "@tybys/wasm-util@^0.9.0": version "0.9.0" resolved "https://registry.yarnpkg.com/@tybys/wasm-util/-/wasm-util-0.9.0.tgz#3e75eb00604c8d6db470bf18c37b7d984a0e3355" @@ -4482,6 +4365,14 @@ dependencies: "@types/node" "*" +"@types/jsonwebtoken@^9.0.10": + version "9.0.10" + resolved "https://registry.yarnpkg.com/@types/jsonwebtoken/-/jsonwebtoken-9.0.10.tgz#a7932a47177dcd4283b6146f3bd5c26d82647f09" + integrity sha512-asx5hIG9Qmf/1oStypjanR7iKTv0gXQ1Ov/jfrX6kS/EO0OFni8orbmGCn0672NHR3kXHwpAwR+B368ZGN/2rA== + dependencies: + "@types/ms" "*" + "@types/node" "*" + "@types/keygrip@*": version "1.0.5" resolved "https://registry.yarnpkg.com/@types/keygrip/-/keygrip-1.0.5.tgz#2c229a40fffa60cec252d3400410ad6f947e1a6c" @@ -4614,7 +4505,7 @@ dependencies: undici-types "~6.21.0" -"@types/normalize-package-data@^2.4.0", "@types/normalize-package-data@^2.4.3", "@types/normalize-package-data@^2.4.4": +"@types/normalize-package-data@^2.4.0", "@types/normalize-package-data@^2.4.1": version "2.4.4" resolved "https://registry.yarnpkg.com/@types/normalize-package-data/-/normalize-package-data-2.4.4.tgz#56e2cc26c397c038fab0e3a917a12d5c5909e901" integrity sha512-37i+OaWTh9qeK4LSHPsyRC7NahnGotNuZvjLSgcPzblpHB3rrCJxAOgI5gCdKm7coonsaX1Of0ILiTcnZjbfxA== @@ -4896,7 +4787,7 @@ JSONStream@^1.3.5: jsonparse "^1.2.0" through ">=2.2.7 <3" -abbrev@1: +abbrev@1, abbrev@^1.0.0: version "1.1.1" resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.1.tgz#f8f2c887ad10bf67f634f005b6987fed3179aac8" integrity sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q== @@ -4906,11 +4797,6 @@ abbrev@^2.0.0: resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-2.0.0.tgz#cf59829b8b4f03f89dda2771cb7f3653828c89bf" integrity sha512-6/mh1E2u2YgEsCHdY0Yx5oW+61gZU+1vXaoiHHrpKeuRNNgFvS+/jrwHiQhB5apAf5oB7UB7E19ol2R2LKH8hQ== -abbrev@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-4.0.0.tgz#ec933f0e27b6cd60e89b5c6b2a304af42209bb05" - integrity sha512-a1wflyaL0tHtJSmLSOVybYhy22vRih4eduhhrkcjgrWGnRfrZtovJ2FRjxuTtkkj47O/baf0R86QU5OuYpz8fA== - abort-controller@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/abort-controller/-/abort-controller-3.0.0.tgz#eaf54d53b62bae4138e809ca225c8439a6efb392" @@ -4990,6 +4876,13 @@ agentkeepalive@^4.1.3: dependencies: humanize-ms "^1.2.1" +agentkeepalive@^4.2.1: + version "4.6.0" + resolved "https://registry.yarnpkg.com/agentkeepalive/-/agentkeepalive-4.6.0.tgz#35f73e94b3f40bf65f105219c623ad19c136ea6a" + integrity sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ== + dependencies: + humanize-ms "^1.2.1" + aggregate-error@^3.0.0: version "3.1.0" resolved "https://registry.yarnpkg.com/aggregate-error/-/aggregate-error-3.1.0.tgz#92670ff50f5359bdb7a3e0d40d0ec30c5737687a" @@ -5057,12 +4950,10 @@ ansi-escapes@^4.2.1, ansi-escapes@^4.3.2: dependencies: type-fest "^0.21.3" -ansi-escapes@^7.0.0: - version "7.2.0" - resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-7.2.0.tgz#31b25afa3edd3efc09d98c2fee831d460ff06b49" - integrity sha512-g6LhBsl+GBPRWGWsBtutpzBYuIIdBkLEvad5C/va/74Db018+5TZiyA26cZJAr3Rft5lprVqOIPxf5Vid6tqAw== - dependencies: - environment "^1.0.0" +ansi-escapes@^6.2.0: + version "6.2.1" + resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-6.2.1.tgz#76c54ce9b081dad39acec4b5d53377913825fb0f" + integrity sha512-4nJ3yixlEthEJ9Rk4vPcdBRkZvQZlYyu8j4/Mqz5sgIkddmEnH2Yj2ZrnP9S3tQOvSNRUIgVNF/1yPpRAGNRig== ansi-regex@^2.0.0: version "2.1.1" @@ -5084,12 +4975,7 @@ ansi-regex@^5.0.1: resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.1.tgz#082cb2c89c9fe8659a311a53bd6a4dc5301db304" integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ== -ansi-regex@^6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-6.0.1.tgz#3183e38fae9a65d7cb5e53945cd5897d0260a06a" - integrity sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA== - -ansi-regex@^6.1.0: +ansi-regex@^6.2.2: version "6.2.2" resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-6.2.2.tgz#60216eea464d864597ce2832000738a0589650c1" integrity sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg== @@ -5118,7 +5004,7 @@ ansi-styles@^5.0.0: resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-5.2.0.tgz#07449690ad45777d1924ac2abb2fc8895dba836b" integrity sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA== -ansi-styles@^6.2.1: +ansi-styles@^6.1.0: version "6.2.3" resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-6.2.3.tgz#c044d5dcc521a076413472597a1acb1f103c4041" integrity sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg== @@ -5138,11 +5024,6 @@ antlr4@^4.13.1-patch-1: resolved "https://registry.yarnpkg.com/antlr4/-/antlr4-4.13.1-patch-1.tgz#946176f863f890964a050c4f18c47fd6f7e57602" integrity sha512-OjFLWWLzDMV9rdFhpvroCWR4ooktNg9/nvVYSA5z28wuVpU36QUNuioR1XLnQtcjVlf8npjyz593PxnU/f/Cow== -any-promise@^1.0.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/any-promise/-/any-promise-1.3.0.tgz#abc6afeedcea52e809cdc0376aed3ce39635d17f" - integrity sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A== - anymatch@^3.0.3, anymatch@~3.1.2: version "3.1.3" resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-3.1.3.tgz#790c58b19ba1720a84205b57c618d5ad8524973e" @@ -5253,6 +5134,11 @@ are-we-there-yet@^3.0.0: delegates "^1.0.0" readable-stream "^3.6.0" +are-we-there-yet@^4.0.0: + version "4.0.2" + resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-4.0.2.tgz#aed25dd0eae514660d49ac2b2366b175c614785a" + integrity sha512-ncSWAawFhKMJDTdoAeOV+jyW1VCMj5QIAwULIBV0SSR7B/RLPPEQiknKcg/RIIZlUQrxELpsxMiTUoAQ4sIUyg== + arg@^4.1.0: version "4.1.3" resolved "https://registry.yarnpkg.com/arg/-/arg-4.1.3.tgz#269fc7ad5b8e42cb63c896d5666017261c144089" @@ -5567,11 +5453,6 @@ balanced-match@^1.0.0: resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee" integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== -balanced-match@^4.0.2: - version "4.0.4" - resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-4.0.4.tgz#bfb10662feed8196a2c62e7c68e17720c274179a" - integrity sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA== - base64-js@^1.3.1, base64-js@^1.5.1: version "1.5.1" resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.5.1.tgz#1b1b440160a5bf7ad40b650f095963481903930a" @@ -5594,12 +5475,7 @@ before-after-hook@^2.2.0: resolved "https://registry.yarnpkg.com/before-after-hook/-/before-after-hook-2.2.3.tgz#c51e809c81a4e354084422b9b26bad88249c517c" integrity sha512-NzUnlZexiaH/46WDhANlyR2bXRopNg4F/zuSA3OpZnllCUgRaOF2znDioDWrmbNVsuZk6l9pMquQB38cfBZwkQ== -before-after-hook@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/before-after-hook/-/before-after-hook-4.0.0.tgz#cf1447ab9160df6a40f3621da64d6ffc36050cb9" - integrity sha512-q6tR3RPqIB1pMiTRMFcZwuG5T8vwp+vUvEG0vuI6B+Rikh5BfPp2fQ82c925FOs+b0lcFQ8CFrL+KbilfZFhOQ== - -bin-links@^4.0.4: +bin-links@^4.0.1, bin-links@^4.0.4: version "4.0.4" resolved "https://registry.yarnpkg.com/bin-links/-/bin-links-4.0.4.tgz#c3565832b8e287c85f109a02a17027d152a58a63" integrity sha512-cMtq4W5ZsEwcutJrVId+a/tjt8GSbS+h0oNkdl6+6rBuEv8Ot33Bevj5KPm40t309zuhVic8NjpuL42QCiJWWA== @@ -5609,17 +5485,6 @@ bin-links@^4.0.4: read-cmd-shim "^4.0.0" write-file-atomic "^5.0.0" -bin-links@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/bin-links/-/bin-links-6.0.0.tgz#0245114374463a694e161a1e65417e7939ab2eba" - integrity sha512-X4CiKlcV2GjnCMwnKAfbVWpHa++65th9TuzAEYtZoATiOE2DQKhSp4CJlyLoTqdhBKlXjpXjCTYPNNFS33Fi6w== - dependencies: - cmd-shim "^8.0.0" - npm-normalize-package-bin "^5.0.0" - proc-log "^6.0.0" - read-cmd-shim "^6.0.0" - write-file-atomic "^7.0.0" - binary-extensions@^2.0.0: version "2.2.0" resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-2.2.0.tgz#75f502eeaf9ffde42fc98829645be4ea76bd9e2d" @@ -5630,11 +5495,6 @@ binary-extensions@^2.2.0: resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-2.3.0.tgz#f6e14a97858d327252200242d4ccfe522c445522" integrity sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw== -binary-extensions@^3.0.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-3.1.0.tgz#be31cd3aa5c7e3dc42c501e57d4fff87d665e17e" - integrity sha512-Jvvd9hy1w+xUad8+ckQsWA/V1AoyubOvqn0aygjMOVM4BfIaRav1NFS3LsTSDaV4n4FtcCtQXvzep1E6MboqwQ== - bindings@^1.5.0: version "1.5.0" resolved "https://registry.yarnpkg.com/bindings/-/bindings-1.5.0.tgz#10353c9e945334bc0511a6d90b38fbc7c9c504df" @@ -5752,14 +5612,7 @@ brace-expansion@^2.0.1, brace-expansion@^2.0.2: dependencies: balanced-match "^1.0.0" -brace-expansion@^5.0.2: - version "5.0.4" - resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-5.0.4.tgz#614daaecd0a688f660bbbc909a8748c3d80d4336" - integrity sha512-h+DEnpVvxmfVefa4jFbCf5HdH5YMDXRsmKflpf1pILZWRFlTbJpxeU55nJl4Smt5HQaGzg1o6RHFPJaOqnmBDg== - dependencies: - balanced-match "^4.0.2" - -braces@^3.0.3, braces@~3.0.2: +braces@^3.0.1, braces@^3.0.3, braces@~3.0.2: version "3.0.3" resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789" integrity sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA== @@ -5902,17 +5755,41 @@ cacache@^15.2.0: tar "^6.0.2" unique-filename "^1.1.1" -cacache@^18.0.0, cacache@^18.0.3: - version "18.0.4" - resolved "https://registry.yarnpkg.com/cacache/-/cacache-18.0.4.tgz#4601d7578dadb59c66044e157d02a3314682d6a5" - integrity sha512-B+L5iIa9mgcjLbliir2th36yEwPftrzteHYujzsx3dFP/31GCHcIeS8f5MGd80odLOjaOvSpU3EEAmRQptkxLQ== +cacache@^16.1.0: + version "16.1.3" + resolved "https://registry.yarnpkg.com/cacache/-/cacache-16.1.3.tgz#a02b9f34ecfaf9a78c9f4bc16fceb94d5d67a38e" + integrity sha512-/+Emcj9DAXxX4cwlLmRI9c166RuL3w30zp4R7Joiv2cQTtTtA+jeuCAjH3ZlGnYS3tKENSrKhAzVVP9GVyzeYQ== + dependencies: + "@npmcli/fs" "^2.1.0" + "@npmcli/move-file" "^2.0.0" + chownr "^2.0.0" + fs-minipass "^2.1.0" + glob "^8.0.1" + infer-owner "^1.0.4" + lru-cache "^7.7.1" + minipass "^3.1.6" + minipass-collect "^1.0.2" + minipass-flush "^1.0.5" + minipass-pipeline "^1.2.4" + mkdirp "^1.0.4" + p-map "^4.0.0" + promise-inflight "^1.0.1" + rimraf "^3.0.2" + ssri "^9.0.0" + tar "^6.1.11" + unique-filename "^2.0.0" + +cacache@^17.0.0, cacache@^17.0.4, cacache@^17.1.4: + version "17.1.4" + resolved "https://registry.yarnpkg.com/cacache/-/cacache-17.1.4.tgz#b3ff381580b47e85c6e64f801101508e26604b35" + integrity sha512-/aJwG2l3ZMJ1xNAnqbMpA40of9dj/pIH3QfiuQSqjfPJF747VR0J/bHn+/KdNnHKc6XQcWt/AfRSBft82W1d2A== dependencies: "@npmcli/fs" "^3.1.0" fs-minipass "^3.0.0" glob "^10.2.2" - lru-cache "^10.0.1" + lru-cache "^7.7.1" minipass "^7.0.3" - minipass-collect "^2.0.1" + minipass-collect "^1.0.2" minipass-flush "^1.0.5" minipass-pipeline "^1.2.4" p-map "^4.0.0" @@ -5920,22 +5797,23 @@ cacache@^18.0.0, cacache@^18.0.3: tar "^6.1.11" unique-filename "^3.0.0" -cacache@^20.0.0, cacache@^20.0.1, cacache@^20.0.3: - version "20.0.3" - resolved "https://registry.yarnpkg.com/cacache/-/cacache-20.0.3.tgz#bd65205d5e6d86e02bbfaf8e4ce6008f1b81d119" - integrity sha512-3pUp4e8hv07k1QlijZu6Kn7c9+ZpWWk4j3F8N3xPuCExULobqJydKYOTj1FTq58srkJsXvO7LbGAH4C0ZU3WGw== +cacache@^18.0.0, cacache@^18.0.3: + version "18.0.4" + resolved "https://registry.yarnpkg.com/cacache/-/cacache-18.0.4.tgz#4601d7578dadb59c66044e157d02a3314682d6a5" + integrity sha512-B+L5iIa9mgcjLbliir2th36yEwPftrzteHYujzsx3dFP/31GCHcIeS8f5MGd80odLOjaOvSpU3EEAmRQptkxLQ== dependencies: - "@npmcli/fs" "^5.0.0" + "@npmcli/fs" "^3.1.0" fs-minipass "^3.0.0" - glob "^13.0.0" - lru-cache "^11.1.0" + glob "^10.2.2" + lru-cache "^10.0.1" minipass "^7.0.3" minipass-collect "^2.0.1" minipass-flush "^1.0.5" minipass-pipeline "^1.2.4" - p-map "^7.0.2" - ssri "^13.0.0" - unique-filename "^5.0.0" + p-map "^4.0.0" + ssri "^10.0.0" + tar "^6.1.11" + unique-filename "^3.0.0" cache-content-type@^1.0.0: version "1.0.1" @@ -6079,7 +5957,7 @@ chalk@^5.2.0: resolved "https://registry.yarnpkg.com/chalk/-/chalk-5.3.0.tgz#67c20a7ebef70e7f3970a01f90fa210cb6860385" integrity sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w== -chalk@^5.4.1, chalk@^5.6.2: +chalk@^5.3.0: version "5.6.2" resolved "https://registry.yarnpkg.com/chalk/-/chalk-5.6.2.tgz#b1238b6e23ea337af71c7f8a295db5af0c158aea" integrity sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA== @@ -6159,11 +6037,6 @@ chownr@^2.0.0: resolved "https://registry.yarnpkg.com/chownr/-/chownr-2.0.0.tgz#15bfbe53d2eab4cf70f18a8cd68ebe5b3cb1dece" integrity sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ== -chownr@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/chownr/-/chownr-3.0.0.tgz#9855e64ecd240a9cc4267ce8a4aa5d24a1da15e4" - integrity sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g== - ci-info@^3.2.0: version "3.9.0" resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-3.9.0.tgz#4279a62028a7b1f262f3473fc9605f5e218c59b4" @@ -6174,17 +6047,12 @@ ci-info@^4.0.0: resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-4.2.0.tgz#cbd21386152ebfe1d56f280a3b5feccbd96764c7" integrity sha512-cYY9mypksY8NRqgDB1XD1RiJL338v/551niynFTGkZOO2LHuB2OmOYxDIe/ttN9AHwrqdum1360G3ald0W9kCg== -ci-info@^4.3.1: - version "4.3.1" - resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-4.3.1.tgz#355ad571920810b5623e11d40232f443f16f1daa" - integrity sha512-Wdy2Igu8OcBpI2pZePZ5oWjPC38tmDVx5WKUXKwlLYkA0ozo85sLsLvkBbBn/sZaSCMFOGZJ14fvW9t5/d7kdA== - -cidr-regex@5.0.1: - version "5.0.1" - resolved "https://registry.yarnpkg.com/cidr-regex/-/cidr-regex-5.0.1.tgz#4b3972457b06445832929f6f268b477fe0372c1f" - integrity sha512-2Apfc6qH9uwF3QHmlYBA8ExB9VHq+1/Doj9sEMY55TVBcpQ3y/+gmMpcNIBBtfb5k54Vphmta+1IxjMqPlWWAA== +cidr-regex@^3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/cidr-regex/-/cidr-regex-3.1.1.tgz#ba1972c57c66f61875f18fd7dd487469770b571d" + integrity sha512-RBqYd32aDwbCMFJRL6wHOlDNYJsPNTt8vC82ErHF5vKt8QQzxm1FrkW8s/R5pVrXMf17sba09Uoy91PKiddAsw== dependencies: - ip-regex "5.0.0" + ip-regex "^4.1.0" cjs-module-lexer@^1.0.0: version "1.2.3" @@ -6243,18 +6111,6 @@ cli-cursor@^2.1.0: dependencies: restore-cursor "^2.0.0" -cli-highlight@^2.1.11: - version "2.1.11" - resolved "https://registry.yarnpkg.com/cli-highlight/-/cli-highlight-2.1.11.tgz#49736fa452f0aaf4fae580e30acb26828d2dc1bf" - integrity sha512-9KDcoEVwyUXrjcJNvHD0NFc/hiwe/WPVYIleQh2O1N2Zro5gWJZ/K+3DGn8w8P/F6FxOgzyC5bxDyHIgCSPhGg== - dependencies: - chalk "^4.0.0" - highlight.js "^10.7.1" - mz "^2.4.0" - parse5 "^5.1.1" - parse5-htmlparser2-tree-adapter "^6.0.0" - yargs "^16.0.0" - cli-progress@^3.12.0: version "3.12.0" resolved "https://registry.yarnpkg.com/cli-progress/-/cli-progress-3.12.0.tgz#807ee14b66bcc086258e444ad0f19e7d42577942" @@ -6277,7 +6133,7 @@ cli-spinners@^2.5.0: resolved "https://registry.yarnpkg.com/cli-spinners/-/cli-spinners-2.9.1.tgz#9c0b9dad69a6d47cbb4333c14319b060ed395a35" integrity sha512-jHgecW0pxkonBJdrKsqxgRX9AcG+u/5k0Q7WPDfi8AogLAdwxEkyYYNWwZ5GvVFoFx2uiY1eNcSK00fh+1+FyQ== -cli-table3@^0.6.5: +cli-table3@^0.6.3: version "0.6.5" resolved "https://registry.yarnpkg.com/cli-table3/-/cli-table3-0.6.5.tgz#013b91351762739c16a9567c21a04632e449bf2f" integrity sha512-+W/5efTR7y5HRD7gACw9yQjqMVvEMLBHmboM/kPWam+H+Hmyrgjh6YncVKK122YZkXrLudzTuAukUw9FnMf7IQ== @@ -6335,15 +6191,6 @@ cliui@^8.0.1: strip-ansi "^6.0.1" wrap-ansi "^7.0.0" -cliui@^9.0.1: - version "9.0.1" - resolved "https://registry.yarnpkg.com/cliui/-/cliui-9.0.1.tgz#6f7890f386f6f1f79953adc1f78dec46fcc2d291" - integrity sha512-k7ndgKhwoQveBL+/1tqGJYNz097I7WOvwbmmU2AR5+magtbjPWQTS1C5vzGkBC8Ym8UWRzfKUzUUqFLypY4Q+w== - dependencies: - string-width "^7.2.0" - strip-ansi "^7.1.0" - wrap-ansi "^9.0.0" - clone-deep@4.0.1: version "4.0.1" resolved "https://registry.yarnpkg.com/clone-deep/-/clone-deep-4.0.1.tgz#c19fd9bdbbf85942b4fd979c84dcf7d5f07c2387" @@ -6363,11 +6210,6 @@ cmd-shim@6.0.3, cmd-shim@^6.0.0: resolved "https://registry.yarnpkg.com/cmd-shim/-/cmd-shim-6.0.3.tgz#c491e9656594ba17ac83c4bd931590a9d6e26033" integrity sha512-FMabTRlc5t5zjdenF6mS0MBeFZm0XqHqeOkcskKFb/LYCcRQ5fVgLOHVc4Lq9CqABd9zhjwPjMBCJvMCziSVtA== -cmd-shim@^8.0.0: - version "8.0.0" - resolved "https://registry.yarnpkg.com/cmd-shim/-/cmd-shim-8.0.0.tgz#5be238f22f40faf3f7e8c92edc3f5d354f7657b2" - integrity sha512-Jk/BK6NCapZ58BKUxlSI+ouKRbjH1NLZCgJkYoab+vEHUY3f6OzpNBN9u7HFSv9J6TRDGs4PLOHezoKGaFRSCA== - co-body@^6.2.0: version "6.2.0" resolved "https://registry.yarnpkg.com/co-body/-/co-body-6.2.0.tgz#afd776d60e5659f4eee862df83499698eb1aea1b" @@ -6439,7 +6281,7 @@ colors@1.0.3: resolved "https://registry.yarnpkg.com/colors/-/colors-1.0.3.tgz#0433f44d809680fdeb60ed260f1b0c262e82a40b" integrity sha512-pFGrxThWcWQ2MsAz6RtgeWe4NK2kUE1WfsrvvlctdII745EW9I0yflqhe7++M5LEc7bV2c/9/5zc8sFcpL0Drw== -columnify@1.6.0: +columnify@1.6.0, columnify@^1.6.0: version "1.6.0" resolved "https://registry.yarnpkg.com/columnify/-/columnify-1.6.0.tgz#6989531713c9008bb29735e61e37acf5bd553cf3" integrity sha512-lomjuFZKfM6MSAnV9aCZC9sc0qGbmZdfygNv+nCpqVkSKdCxCklLtd16O0EILGkImHw9ZpHkAnHaB+8Zxq5W6Q== @@ -6570,13 +6412,6 @@ conventional-changelog-angular@^6.0.0: dependencies: compare-func "^2.0.0" -conventional-changelog-angular@^8.0.0: - version "8.1.0" - resolved "https://registry.yarnpkg.com/conventional-changelog-angular/-/conventional-changelog-angular-8.1.0.tgz#06223a40f818c5618982fdb92d2b2aac5e24d33e" - integrity sha512-GGf2Nipn1RUCAktxuVauVr1e3r8QrLP/B0lEUsFktmGqc3ddbQkhoJZHJctVU829U1c6mTSWftrVOCHaL85Q3w== - dependencies: - compare-func "^2.0.0" - conventional-changelog-conventionalcommits@^6.1.0: version "6.1.0" resolved "https://registry.yarnpkg.com/conventional-changelog-conventionalcommits/-/conventional-changelog-conventionalcommits-6.1.0.tgz#3bad05f4eea64e423d3d90fc50c17d2c8cf17652" @@ -6619,16 +6454,6 @@ conventional-changelog-writer@^6.0.0: semver "^7.0.0" split "^1.0.1" -conventional-changelog-writer@^8.0.0: - version "8.2.0" - resolved "https://registry.yarnpkg.com/conventional-changelog-writer/-/conventional-changelog-writer-8.2.0.tgz#1b77ef8e45ccc4559e02a23a34d50c15d2051e5a" - integrity sha512-Y2aW4596l9AEvFJRwFGJGiQjt2sBYTjPD18DdvxX9Vpz0Z7HQ+g1Z+6iYDAm1vR3QOJrDBkRHixHK/+FhkR6Pw== - dependencies: - conventional-commits-filter "^5.0.0" - handlebars "^4.7.7" - meow "^13.0.0" - semver "^7.5.2" - conventional-commits-filter@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/conventional-commits-filter/-/conventional-commits-filter-3.0.0.tgz#bf1113266151dd64c49cd269e3eb7d71d7015ee2" @@ -6637,10 +6462,10 @@ conventional-commits-filter@^3.0.0: lodash.ismatch "^4.4.0" modify-values "^1.0.1" -conventional-commits-filter@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/conventional-commits-filter/-/conventional-commits-filter-5.0.0.tgz#72811f95d379e79d2d39d5c0c53c9351ef284e86" - integrity sha512-tQMagCOC59EVgNZcC5zl7XqO30Wki9i9J3acbUvkaosCT6JX3EeFwJD7Qqp4MCikRnzS18WXV3BLIQ66ytu6+Q== +conventional-commits-filter@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/conventional-commits-filter/-/conventional-commits-filter-4.0.0.tgz#845d713e48dc7d1520b84ec182e2773c10c7bf7f" + integrity sha512-rnpnibcSOdFcdclpFwWa+pPlZJhXE7l+XK04zxhbWrhgpR96h33QLz8hITTXbcYICxVr3HZFtbtUAQ+4LdBo9A== conventional-commits-parser@^4.0.0: version "4.0.0" @@ -6652,12 +6477,15 @@ conventional-commits-parser@^4.0.0: meow "^8.1.2" split2 "^3.2.2" -conventional-commits-parser@^6.0.0: - version "6.2.1" - resolved "https://registry.yarnpkg.com/conventional-commits-parser/-/conventional-commits-parser-6.2.1.tgz#855e53c4792b1feaf93649eff5d75e0dbc2c63ad" - integrity sha512-20pyHgnO40rvfI0NGF/xiEoFMkXDtkF8FwHvk5BokoFoCuTQRI8vrNCNFWUOfuolKJMm1tPCHc8GgYEtr1XRNA== +conventional-commits-parser@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/conventional-commits-parser/-/conventional-commits-parser-5.0.0.tgz#57f3594b81ad54d40c1b4280f04554df28627d9a" + integrity sha512-ZPMl0ZJbw74iS9LuX9YIAiW8pfM5p3yh2o/NbXHbkFuZzY5jvdi5jFycEOkmBW5H5I7nA+D6f3UcsCLP2vvSEA== dependencies: - meow "^13.0.0" + JSONStream "^1.3.5" + is-text-path "^2.0.0" + meow "^12.0.1" + split2 "^4.0.0" conventional-recommended-bump@7.0.1: version "7.0.1" @@ -6672,11 +6500,6 @@ conventional-recommended-bump@7.0.1: git-semver-tags "^5.0.0" meow "^8.1.2" -convert-hrtime@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/convert-hrtime/-/convert-hrtime-5.0.0.tgz#f2131236d4598b95de856926a67100a0a97e9fa3" - integrity sha512-lOETlkIeYSJWcbbcvjRKGxVMXJR+8+OQb/mTPbA4ObPMytYIsUbuOE0Jzy60hjARYszq1id0j8KgVhC+WGZVTg== - convert-source-map@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-2.0.0.tgz#4b560f649fc4e918dd0ab75cf4961e8bc882d82a" @@ -6753,7 +6576,7 @@ cosmiconfig-typescript-loader@^4.0.0: resolved "https://registry.yarnpkg.com/cosmiconfig-typescript-loader/-/cosmiconfig-typescript-loader-4.4.0.tgz#f3feae459ea090f131df5474ce4b1222912319f9" integrity sha512-BabizFdC3wBHhbI4kJh0VkQP9GkBfoHPydD0COMce1nJ1kJAB3F2TmJ/I7diULBKtmEWSwEbuN/KDtgnmUUVmw== -cosmiconfig@9.0.0, cosmiconfig@^9.0.0: +cosmiconfig@9.0.0: version "9.0.0" resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-9.0.0.tgz#34c3fc58287b915f3ae905ab6dc3de258b55ad9d" integrity sha512-itvL5h8RETACmOTFc4UfIyB2RfEHi71Ax6E/PivVxq9NseKbOWpeyHEOIbmAw1rs8Ak0VursQNww7lf7YtUwzg== @@ -7113,10 +6936,10 @@ diff@^4.0.1: resolved "https://registry.yarnpkg.com/diff/-/diff-4.0.2.tgz#60f3aecb89d5fae520c11aa19efc2bb982aade7d" integrity sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A== -diff@^8.0.2: - version "8.0.3" - resolved "https://registry.yarnpkg.com/diff/-/diff-8.0.3.tgz#c7da3d9e0e8c283bb548681f8d7174653720c2d5" - integrity sha512-qejHi7bcSD4hQAZE0tNAawRK1ZtafHDmMTMkrrIGgSLl7hTnQHmKCeB45xAcbfTqK2zowkM3j3bHt/4b/ARbYQ== +diff@^5.1.0: + version "5.2.2" + resolved "https://registry.yarnpkg.com/diff/-/diff-5.2.2.tgz#0a4742797281d09cfa699b79ea32d27723623bad" + integrity sha512-vtcDfH3TOjP8UekytvnHH1o1P4FcUdt4eQ1Y+Abap1tk/OB2MWQvcwS2ClCd1zuIhc3JKOx6p3kod8Vfys3E+A== dir-glob@^3.0.0, dir-glob@^3.0.1: version "3.0.1" @@ -7241,6 +7064,11 @@ duplexer2@~0.1.0: dependencies: readable-stream "^2.0.2" +eastasianwidth@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/eastasianwidth/-/eastasianwidth-0.2.0.tgz#696ce2ec0aa0e6ea93a397ffcf24aa7840c827cb" + integrity sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA== + ecdsa-sig-formatter@1.0.11: version "1.0.11" resolved "https://registry.yarnpkg.com/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz#ae0f0fa2d85045ef14a817daa3ce9acd0489e5bf" @@ -7275,20 +7103,15 @@ emittery@^0.13.1: resolved "https://registry.yarnpkg.com/emittery/-/emittery-0.13.1.tgz#c04b8c3457490e0847ae51fced3af52d338e3dad" integrity sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ== -emoji-regex@^10.3.0: - version "10.6.0" - resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-10.6.0.tgz#bf3d6e8f7f8fd22a65d9703475bc0147357a6b0d" - integrity sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A== - emoji-regex@^8.0.0: version "8.0.0" resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37" integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A== -emojilib@^2.4.0: - version "2.4.0" - resolved "https://registry.yarnpkg.com/emojilib/-/emojilib-2.4.0.tgz#ac518a8bb0d5f76dda57289ccb2fdf9d39ae721e" - integrity sha512-5U0rVMU5Y2n2+ykNLQqMoqklN9ICBT/KsvC1Gz6vqHbz2AXXGkG+Pm5rMWk/8Vjrr/mY9985Hi8DYzn1F09Nyw== +emoji-regex@^9.2.2: + version "9.2.2" + resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-9.2.2.tgz#840c8803b0d8047f4ff0cf963176b32d4ef3ed72" + integrity sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg== encodeurl@^1.0.2, encodeurl@~1.0.2: version "1.0.2" @@ -7326,12 +7149,12 @@ entities@^4.2.0, entities@^4.4.0: resolved "https://registry.yarnpkg.com/entities/-/entities-4.5.0.tgz#5d268ea5e7113ec74c4d033b79ea5a35a488fb48" integrity sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw== -env-ci@^11.0.0, env-ci@^11.2.0: - version "11.2.0" - resolved "https://registry.yarnpkg.com/env-ci/-/env-ci-11.2.0.tgz#e7386afdf752962c587e7f3d3fb64d87d68e82c6" - integrity sha512-D5kWfzkmaOQDioPmiviWAVtKmpPT4/iJmMVQxWxMPJTFyTkdc5JQUfc5iXEeWxcOdsYTKSAiA/Age4NUOqKsRA== +env-ci@^9.0.0: + version "9.1.1" + resolved "https://registry.yarnpkg.com/env-ci/-/env-ci-9.1.1.tgz#f081684c64a639c6ff5cb801bd70464bd40498a4" + integrity sha512-Im2yEWeF4b2RAMAaWvGioXk6m0UNaIjD8hj28j2ij5ldnIFrDQT0+pzDvpbRkcjurhXhf/AsBKv8P2rtmGi9Aw== dependencies: - execa "^8.0.0" + execa "^7.0.0" java-properties "^1.0.2" env-paths@^2.2.0, env-paths@^2.2.1: @@ -7344,11 +7167,6 @@ envinfo@7.13.0: resolved "https://registry.yarnpkg.com/envinfo/-/envinfo-7.13.0.tgz#81fbb81e5da35d74e814941aeab7c325a606fb31" integrity sha512-cvcaMr7KqXVh4nyzGTVqTum+gAiL265x5jUWQIDLq//zOGbW+gSW/C+OWLleY/rs9Qole6AZLMXPbtIFQbqu+Q== -environment@^1.0.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/environment/-/environment-1.1.0.tgz#8e86c66b180f363c7ab311787e0259665f45a9f1" - integrity sha512-xUtoPkMggbz0MPyPiIWr1Kp4aeWJjDZ6SMvURhimjdZgsRuDplF5/s9hcgGhyXMhs+6vpnuoiZ2kFiu3FMnS8Q== - err-code@^2.0.2: version "2.0.3" resolved "https://registry.yarnpkg.com/err-code/-/err-code-2.0.3.tgz#23c2f3b756ffdfc608d30e27c9a941024807e7f9" @@ -7361,6 +7179,13 @@ error-ex@^1.3.1: dependencies: is-arrayish "^0.2.1" +error-ex@^1.3.2: + version "1.3.4" + resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.4.tgz#b3a8d8bb6f92eecc1629e3e27d3c8607a8a32414" + integrity sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ== + dependencies: + is-arrayish "^0.2.1" + es-abstract@^1.22.1: version "1.22.3" resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.22.3.tgz#48e79f5573198de6dee3589195727f4f74bc4f32" @@ -7591,7 +7416,7 @@ escape-string-regexp@4.0.0, escape-string-regexp@^4.0.0: resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz#14ba83a5d373e3d311e5afca29cf5bfad965bf34" integrity sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA== -escape-string-regexp@5.0.0: +escape-string-regexp@5.0.0, escape-string-regexp@^5.0.0: version "5.0.0" resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz#4683126b500b61762f2dbebace1806e8be31b1c8" integrity sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw== @@ -7921,7 +7746,7 @@ execa@^5.0.0: signal-exit "^3.0.3" strip-final-newline "^2.0.0" -execa@^7.1.1: +execa@^7.0.0, execa@^7.1.1: version "7.2.0" resolved "https://registry.yarnpkg.com/execa/-/execa-7.2.0.tgz#657e75ba984f42a70f38928cedc87d6f2d4fe4e9" integrity sha512-UduyVP7TLB5IcAQl+OzLyLcS/l32W/GLg+AhHJ+ow40FOk2U3SAllPwR44v4vmdFwIWqpdwxxpQbF1n5ta9seA== @@ -7951,24 +7776,6 @@ execa@^8.0.0: signal-exit "^4.1.0" strip-final-newline "^3.0.0" -execa@^9.0.0: - version "9.6.1" - resolved "https://registry.yarnpkg.com/execa/-/execa-9.6.1.tgz#5b90acedc6bdc0fa9b9a6ddf8f9cbb0c75a7c471" - integrity sha512-9Be3ZoN4LmYR90tUoVu2te2BsbzHfhJyfEiAVfz7N5/zv+jduIfLrV2xdQXOHbaD6KgpGdO9PRPM1Y4Q9QkPkA== - dependencies: - "@sindresorhus/merge-streams" "^4.0.0" - cross-spawn "^7.0.6" - figures "^6.1.0" - get-stream "^9.0.0" - human-signals "^8.0.1" - is-plain-obj "^4.1.0" - is-stream "^4.0.1" - npm-run-path "^6.0.0" - pretty-ms "^9.2.0" - signal-exit "^4.1.0" - strip-final-newline "^4.0.0" - yoctocolors "^2.1.1" - exit@^0.1.2: version "0.1.2" resolved "https://registry.yarnpkg.com/exit/-/exit-0.1.2.tgz#0632638f8d877cc82107d30a0fff1a17cba1cd0c" @@ -8174,11 +7981,6 @@ fast-content-type-parse@^1.0.0, fast-content-type-parse@^1.1.0: resolved "https://registry.yarnpkg.com/fast-content-type-parse/-/fast-content-type-parse-1.1.0.tgz#4087162bf5af3294d4726ff29b334f72e3a1092c" integrity sha512-fBHHqSTFLVnR61C+gltJuE5GkVQMV0S2nqUO8TJ+5Z3qAKG8vAx4FKai1s5jq/inV1+sREynIWSuQ6HgoSXpDQ== -fast-content-type-parse@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/fast-content-type-parse/-/fast-content-type-parse-3.0.0.tgz#5590b6c807cc598be125e6740a9fde589d2b7afb" - integrity sha512-ZvLdcY8P+N8mGQJahJV5G4U88CSvT1rP8ApL6uETe88MBXrBHAkZlSEySdUlyztF7ccb+Znos3TFqaepHxdhBg== - fast-decode-uri-component@^1.0.0, fast-decode-uri-component@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/fast-decode-uri-component/-/fast-decode-uri-component-1.0.1.tgz#46f8b6c22b30ff7a81357d4f59abfae938202543" @@ -8205,7 +8007,7 @@ fast-glob@^3.2.9: merge2 "^1.3.0" micromatch "^4.0.4" -fast-glob@^3.3.2: +fast-glob@^3.3.2, fast-glob@^3.3.3: version "3.3.3" resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.3.3.tgz#d06d585ce8dba90a16b0505c543c3ccfb3aeb818" integrity sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg== @@ -8450,11 +8252,6 @@ fdir@^6.4.3: resolved "https://registry.yarnpkg.com/fdir/-/fdir-6.4.6.tgz#2b268c0232697063111bbf3f64810a2a741ba281" integrity sha512-hiFoqpyZcfNm1yc4u8oWCf9A2c4D3QjCrks3zmoVKVxpQRzmPNar1hUJcBG2RQHvEVGDN+Jm81ZheVLAQMK6+w== -fdir@^6.5.0: - version "6.5.0" - resolved "https://registry.yarnpkg.com/fdir/-/fdir-6.5.0.tgz#ed2ab967a331ade62f18d077dae192684d50d350" - integrity sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg== - figures@3.2.0, figures@^3.0.0: version "3.2.0" resolved "https://registry.yarnpkg.com/figures/-/figures-3.2.0.tgz#625c18bd293c604dc4a8ddb2febf0c88341746af" @@ -8469,12 +8266,13 @@ figures@^2.0.0: dependencies: escape-string-regexp "^1.0.5" -figures@^6.0.0, figures@^6.1.0: - version "6.1.0" - resolved "https://registry.yarnpkg.com/figures/-/figures-6.1.0.tgz#935479f51865fa7479f6fa94fc6fc7ac14e62c4a" - integrity sha512-d+l3qxjSesT4V7v2fh+QnmFnUWv9lSpjarhShNTgBOfA0ttejbQUAlHLitbjkoRiDulW0OPoQPYIGhIC8ohejg== +figures@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/figures/-/figures-5.0.0.tgz#126cd055052dea699f8a54e8c9450e6ecfc44d5f" + integrity sha512-ej8ksPF4x6e5wvK9yevct0UCXh8TTFlWGVLlgjZuoBH1HwjIfKE/IdL5mq89sFA7zELi1VhKpmtDnrs7zWyeyg== dependencies: - is-unicode-supported "^2.0.0" + escape-string-regexp "^5.0.0" + is-unicode-supported "^1.2.0" file-entry-cache@^6.0.1: version "6.0.1" @@ -8577,11 +8375,6 @@ find-my-way@^8.0.0: fast-querystring "^1.0.0" safe-regex2 "^3.1.0" -find-up-simple@^1.0.0, find-up-simple@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/find-up-simple/-/find-up-simple-1.0.1.tgz#18fb90ad49e45252c4d7fca56baade04fa3fca1e" - integrity sha512-afd4O7zpqHeRyg4PfDQsXmlDe2PfdHtJt6Akt8jOWaApLOZk5JXs6VMR29lz03pRe9mpykrRCYIYxaJYcfpncQ== - find-up@^2.0.0: version "2.1.0" resolved "https://registry.yarnpkg.com/find-up/-/find-up-2.1.0.tgz#45d1b7e506c717ddd482775a2b77920a3c0c57a7" @@ -8605,13 +8398,20 @@ find-up@^5.0.0: locate-path "^6.0.0" path-exists "^4.0.0" -find-versions@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/find-versions/-/find-versions-6.0.0.tgz#fda285d3bb7c0c098f09e0727c54d31735f0c7d1" - integrity sha512-2kCCtc+JvcZ86IGAz3Z2Y0A1baIz9fL31pH/0S1IqZr9Iwnjq8izfPtrCyQKO6TLMPELLsQMre7VDqeIKCsHkA== +find-up@^6.3.0: + version "6.3.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-6.3.0.tgz#2abab3d3280b2dc7ac10199ef324c4e002c8c790" + integrity sha512-v2ZsoEuVHYy8ZIlYqwPe/39Cy+cFDzp4dXPaxNvkEuouymu+2Jbz0PxpKarJHYJTmv2HWT3O382qY8l4jMWthw== + dependencies: + locate-path "^7.1.0" + path-exists "^5.0.0" + +find-versions@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/find-versions/-/find-versions-5.1.0.tgz#973f6739ce20f5e439a27eba8542a4b236c8e685" + integrity sha512-+iwzCJ7C5v5KgcBuueqVoNiHVoQpwiUK5XFLjf0affFTep+Wcw93tPvmb8tqujDNmzhBDPddnWV/qgWSXgq+Hg== dependencies: semver-regex "^4.0.5" - super-regex "^1.0.0" fishery@^2.2.2: version "2.2.2" @@ -8663,6 +8463,14 @@ for-each@^0.3.5: dependencies: is-callable "^1.2.7" +foreground-child@^3.1.0: + version "3.3.1" + resolved "https://registry.yarnpkg.com/foreground-child/-/foreground-child-3.3.1.tgz#32e8e9ed1b68a3497befb9ac2b6adf92a638576f" + integrity sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw== + dependencies: + cross-spawn "^7.0.6" + signal-exit "^4.0.1" + forest-cli@5.3.8: version "5.3.8" resolved "https://registry.yarnpkg.com/forest-cli/-/forest-cli-5.3.8.tgz#61956f4c7363e7de50649dc222c4eea10d47d4c2" @@ -8800,7 +8608,7 @@ fs-extra@^11.2.0: jsonfile "^6.0.1" universalify "^2.0.0" -fs-minipass@^2.0.0: +fs-minipass@^2.0.0, fs-minipass@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/fs-minipass/-/fs-minipass-2.1.0.tgz#7f5036fdbf12c63c169190cbe4199c852271f9fb" integrity sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg== @@ -8829,11 +8637,6 @@ function-bind@^1.1.2: resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.2.tgz#2c02d864d97f3ea6c8830c464cbd11ab6eab7a1c" integrity sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA== -function-timeout@^1.0.1: - version "1.0.2" - resolved "https://registry.yarnpkg.com/function-timeout/-/function-timeout-1.0.2.tgz#e5a7b6ffa523756ff20e1231bbe37b5f373aadd5" - integrity sha512-939eZS4gJ3htTHAldmyyuzlrD58P03fHG49v2JfFXbV6OhvZKRC9j2yAtdHw/zrp2zXHuv05zMIy40F0ge7spA== - function.prototype.name@^1.1.6: version "1.1.6" resolved "https://registry.yarnpkg.com/function.prototype.name/-/function.prototype.name-1.1.6.tgz#cdf315b7d90ee77a4c6ee216c3c3362da07533fd" @@ -8890,6 +8693,20 @@ gauge@^4.0.3: strip-ansi "^6.0.1" wide-align "^1.1.5" +gauge@^5.0.0: + version "5.0.2" + resolved "https://registry.yarnpkg.com/gauge/-/gauge-5.0.2.tgz#7ab44c11181da9766333f10db8cd1e4b17fd6c46" + integrity sha512-pMaFftXPtiGIHCJHdcUUx9Rby/rFT/Kkt3fIIGCs+9PMDIljSyRiqraTlxNtBReJRDfUefpa263RQ3vnp5G/LQ== + dependencies: + aproba "^1.0.3 || ^2.0.0" + color-support "^1.1.3" + console-control-strings "^1.1.0" + has-unicode "^2.0.1" + signal-exit "^4.0.1" + string-width "^4.2.3" + strip-ansi "^6.0.1" + wide-align "^1.1.5" + generate-function@^2.3.1: version "2.3.1" resolved "https://registry.yarnpkg.com/generate-function/-/generate-function-2.3.1.tgz#f069617690c10c868e73b8465746764f97c3479f" @@ -8912,11 +8729,6 @@ get-caller-file@^2.0.5: resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e" integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg== -get-east-asian-width@^1.0.0: - version "1.4.0" - resolved "https://registry.yarnpkg.com/get-east-asian-width/-/get-east-asian-width-1.4.0.tgz#9bc4caa131702b4b61729cb7e42735bc550c9ee6" - integrity sha512-QZjmEOC+IT1uk6Rx0sX22V6uHWVwbdbxf1faPqJ1QhLdGgsRGCZoyaQBm/piRdJy/D2um6hM1UP7ZEeQ4EkP+Q== - get-intrinsic@^1.0.2, get-intrinsic@^1.1.1, get-intrinsic@^1.1.3, get-intrinsic@^1.2.0, get-intrinsic@^1.2.1, get-intrinsic@^1.2.2: version "1.2.2" resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.2.2.tgz#281b7622971123e1ef4b3c90fd7539306da93f3b" @@ -9009,14 +8821,6 @@ get-stream@^8.0.1: resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-8.0.1.tgz#def9dfd71742cd7754a7761ed43749a27d02eca2" integrity sha512-VaUJspBffn/LMCJVoMvSAdmscJyS1auj5Zulnn5UoYcY531UWmdwhRWkcGKnGU93m5HSXP9LP2usOryrBtQowA== -get-stream@^9.0.0: - version "9.0.1" - resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-9.0.1.tgz#95157d21df8eb90d1647102b63039b1df60ebd27" - integrity sha512-kVCxPF3vQM/N0B1PmoqVUqgHP+EeVjmZSQn+1oCRPxd2P21P2F19lIgbR3HBosbB1PUhOAoctJnfEn2GbN2eZA== - dependencies: - "@sec-ant/readable-stream" "^0.4.1" - is-stream "^4.0.1" - get-symbol-description@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/get-symbol-description/-/get-symbol-description-1.0.0.tgz#7fdb81c900101fbd564dd5f1a30af5aadc1e58d6" @@ -9123,23 +8927,17 @@ glob-parent@^5.1.2, glob-parent@~5.1.2: dependencies: is-glob "^4.0.1" -glob@>=10.5.0, glob@^10.2.2, glob@^10.3.10, glob@^9.2.0: - version "13.0.2" - resolved "https://registry.yarnpkg.com/glob/-/glob-13.0.2.tgz#74b28859255e319c84d1aed1a0a5b5248bfea227" - integrity sha512-035InabNu/c1lW0tzPhAgapKctblppqsKKG9ZaNzbr+gXwWMjXoiyGSyB9sArzrjG7jY+zntRq5ZSUYemrnWVQ== +glob@^10.2.2, glob@^10.3.10: + version "10.5.0" + resolved "https://registry.yarnpkg.com/glob/-/glob-10.5.0.tgz#8ec0355919cd3338c28428a23d4f24ecc5fe738c" + integrity sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg== dependencies: - minimatch "^10.1.2" - minipass "^7.1.2" - path-scurry "^2.0.0" - -glob@^13.0.0: - version "13.0.0" - resolved "https://registry.yarnpkg.com/glob/-/glob-13.0.0.tgz#9d9233a4a274fc28ef7adce5508b7ef6237a1be3" - integrity sha512-tvZgpqk6fz4BaNZ66ZsRaZnbHvP/jG3uKJvAZOwEVUL4RTA5nJeeLYfyN9/VA8NX/V3IBG+hkeuGpKjvELkVhA== - dependencies: - minimatch "^10.1.1" + foreground-child "^3.1.0" + jackspeak "^3.1.2" + minimatch "^9.0.4" minipass "^7.1.2" - path-scurry "^2.0.0" + package-json-from-dist "^1.0.0" + path-scurry "^1.11.1" glob@^7.1.3, glob@^7.1.4: version "7.2.3" @@ -9153,6 +8951,27 @@ glob@^7.1.3, glob@^7.1.4: once "^1.3.0" path-is-absolute "^1.0.0" +glob@^8.0.1: + version "8.1.0" + resolved "https://registry.yarnpkg.com/glob/-/glob-8.1.0.tgz#d388f656593ef708ee3e34640fdfb99a9fd1c33e" + integrity sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ== + dependencies: + fs.realpath "^1.0.0" + inflight "^1.0.4" + inherits "2" + minimatch "^5.0.1" + once "^1.3.0" + +glob@^9.2.0: + version "9.3.5" + resolved "https://registry.yarnpkg.com/glob/-/glob-9.3.5.tgz#ca2ed8ca452781a3009685607fdf025a899dfe21" + integrity sha512-e1LleDykUz2Iu+MTYdkSsuWX8lvAjAcs0Xef0lNIu0S2wOAzuTxCJtcd9S3cijlwYF18EsU3rzb8jPVobxDh9Q== + dependencies: + fs.realpath "^1.0.0" + minimatch "^8.0.2" + minipass "^4.2.4" + path-scurry "^1.6.1" + global-dirs@^0.1.1: version "0.1.1" resolved "https://registry.yarnpkg.com/global-dirs/-/global-dirs-0.1.1.tgz#b319c0dd4607f353f3be9cca4c72fc148c49f445" @@ -9199,6 +9018,18 @@ globby@^11.1.0: merge2 "^1.4.1" slash "^3.0.0" +globby@^14.0.0: + version "14.1.0" + resolved "https://registry.yarnpkg.com/globby/-/globby-14.1.0.tgz#138b78e77cf5a8d794e327b15dce80bf1fb0a73e" + integrity sha512-0Ia46fDOaT7k4og1PDW4YbodWWr3scS2vAr2lTbsplOt2WkKp0vQbkI9wKis/T5LV/dqPjO3bpS/z6GTJB82LA== + dependencies: + "@sindresorhus/merge-streams" "^2.1.0" + fast-glob "^3.3.3" + ignore "^7.0.3" + path-type "^6.0.0" + slash "^5.1.0" + unicorn-magic "^0.3.0" + gopd@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/gopd/-/gopd-1.0.1.tgz#29ff76de69dac7489b7c0918a5788e56477c332c" @@ -9370,20 +9201,15 @@ hasown@^2.0.2: resolved "https://registry.yarnpkg.com/heap/-/heap-0.2.7.tgz#1e6adf711d3f27ce35a81fe3b7bd576c2260a8fc" integrity sha512-2bsegYkkHO+h/9MGbn6KWcE45cHZgPANo5LXF7EvWdT0yT2EguSVO1nDgU5c8+ZOPwp2vMNa7YFsJhVcDR9Sdg== -highlight.js@^10.7.1: - version "10.7.3" - resolved "https://registry.yarnpkg.com/highlight.js/-/highlight.js-10.7.3.tgz#697272e3991356e40c3cac566a74eef681756531" - integrity sha512-tzcUFauisWKNHaRkN4Wjl/ZA07gENAjFl3J/c480dprkGTg5EQstgaNFqBfUqCq54kZRIEcreTsAgF/m2quD7A== - -hono@>=4.11.10, hono@^4.11.4: +hono@^4.11.4: version "4.12.3" resolved "https://registry.yarnpkg.com/hono/-/hono-4.12.3.tgz#fd8dd1127c30956a9d58c1b0c4535d21c1ef3e16" integrity sha512-SFsVSjp8sj5UumXOOFlkZOG6XS9SJDKw0TbwFeV+AJ8xlST8kxK5Z/5EYa111UY8732lK2S/xB653ceuaoGwpg== -hook-std@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/hook-std/-/hook-std-4.0.0.tgz#8ad817e2405f0634fa128822a8b27054a8120262" - integrity sha512-IHI4bEVOt3vRUDJ+bFA9VUJlo7SzvFARPNLw75pqSmAOP2HmTWfFJtPvLBrDrlgjEYXY9zs7SFdHPQaJShkSCQ== +hook-std@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/hook-std/-/hook-std-3.0.0.tgz#47038a01981e07ce9d83a6a3b2eb98cad0f7bd58" + integrity sha512-jHRQzjSDzMtFy34AGj1DN+vq54WVuhSvKgrHf0OMiFQTwDD4L/qqofVEWjLOBMTn5+lCD3fPg32W9yOfnEJTTw== hosted-git-info@^2.1.4: version "2.8.9" @@ -9397,6 +9223,13 @@ hosted-git-info@^4.0.0, hosted-git-info@^4.0.1: dependencies: lru-cache "^6.0.0" +hosted-git-info@^6.0.0, hosted-git-info@^6.1.1, hosted-git-info@^6.1.3: + version "6.1.3" + resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-6.1.3.tgz#2ee1a14a097a1236bddf8672c35b613c46c55946" + integrity sha512-HVJyzUrLIL1c0QmviVh5E8VGyUS7xCFPS6yydaVd1UegW+ibV/CohqTH9MkOLDp5o+rb82DMo77PTuc9F/8GKw== + dependencies: + lru-cache "^7.5.1" + hosted-git-info@^7.0.0, hosted-git-info@^7.0.2: version "7.0.2" resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-7.0.2.tgz#9b751acac097757667f30114607ef7b661ff4f17" @@ -9404,13 +9237,6 @@ hosted-git-info@^7.0.0, hosted-git-info@^7.0.2: dependencies: lru-cache "^10.0.1" -hosted-git-info@^9.0.0, hosted-git-info@^9.0.2: - version "9.0.2" - resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-9.0.2.tgz#b38c8a802b274e275eeeccf9f4a1b1a0a8557ada" - integrity sha512-M422h7o/BR3rmCQ8UHi7cyyMqKltdP9Uo+J2fXK+RSAY+wTcKOIRyhTuKv4qn+DJf3g+PL890AzId5KZpX+CBg== - dependencies: - lru-cache "^11.1.0" - html-escaper@^2.0.0: version "2.0.2" resolved "https://registry.yarnpkg.com/html-escaper/-/html-escaper-2.0.2.tgz#dfd60027da36a36dfcbe236262c00a5822681453" @@ -9556,11 +9382,6 @@ human-signals@^5.0.0: resolved "https://registry.yarnpkg.com/human-signals/-/human-signals-5.0.0.tgz#42665a284f9ae0dade3ba41ebc37eb4b852f3a28" integrity sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ== -human-signals@^8.0.1: - version "8.0.1" - resolved "https://registry.yarnpkg.com/human-signals/-/human-signals-8.0.1.tgz#f08bb593b6d1db353933d06156cedec90abe51fb" - integrity sha512-eKCa6bwnJhvxj14kZk5NCPc6Hb6BdsU9DZcOnmQKSnO1VKrfV0zCvtttPZUsBvjmNDn8rpcJfpwSYnHBjc95MQ== - humanize-ms@^1.2.1: version "1.2.1" resolved "https://registry.yarnpkg.com/humanize-ms/-/humanize-ms-1.2.1.tgz#c46e3159a293f6b896da29316d8b6fe8bb79bbed" @@ -9616,25 +9437,23 @@ ignore-by-default@^1.0.1: resolved "https://registry.yarnpkg.com/ignore-by-default/-/ignore-by-default-1.0.1.tgz#48ca6d72f6c6a3af00a9ad4ae6876be3889e2b09" integrity sha512-Ius2VYcGNk7T90CppJqcIkS5ooHUZyIQK+ClZfMfMNFEF9VSE73Fq+906u/CWu92x4gzZMWOwfFYckPObzdEbA== -ignore-walk@^6.0.4: +ignore-walk@^6.0.0, ignore-walk@^6.0.4: version "6.0.5" resolved "https://registry.yarnpkg.com/ignore-walk/-/ignore-walk-6.0.5.tgz#ef8d61eab7da169078723d1f82833b36e200b0dd" integrity sha512-VuuG0wCnjhnylG1ABXT3dAuIpTNDs/G8jlpmwXY03fXoXy/8ZK8/T+hMzt8L4WnrLCJgdybqgPagnF/f97cg3A== dependencies: minimatch "^9.0.0" -ignore-walk@^8.0.0: - version "8.0.0" - resolved "https://registry.yarnpkg.com/ignore-walk/-/ignore-walk-8.0.0.tgz#380c173badc3a18c57ff33440753f0052f572b14" - integrity sha512-FCeMZT4NiRQGh+YkeKMtWrOmBgWjHjMJ26WQWrRQyoyzqevdaGSakUaJW5xQYmjLlUVk2qUnCjYVBax9EKKg8A== - dependencies: - minimatch "^10.0.3" - ignore@^5.0.4, ignore@^5.2.0: version "5.3.0" resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.3.0.tgz#67418ae40d34d6999c95ff56016759c718c82f78" integrity sha512-g7dmpshy+gD7mh88OC9NwSGTKoc3kyLAZQRU1mt53Aw/vnvfXnbC+F/7F7QoYVKbV+KNvJx8wArewKy1vXMtlg== +ignore@^7.0.3: + version "7.0.5" + resolved "https://registry.yarnpkg.com/ignore/-/ignore-7.0.5.tgz#4cb5f6cd7d4c7ab0365738c7aea888baa6d7efd9" + integrity sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg== + image-size@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/image-size/-/image-size-1.0.2.tgz#d778b6d0ab75b2737c1556dd631652eb963bc486" @@ -9655,13 +9474,10 @@ import-fresh@^3.0.0, import-fresh@^3.2.1, import-fresh@^3.3.0: parent-module "^1.0.0" resolve-from "^4.0.0" -import-from-esm@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/import-from-esm/-/import-from-esm-2.0.0.tgz#184eb9aad4f557573bd6daf967ad5911b537797a" - integrity sha512-YVt14UZCgsX1vZQ3gKjkWVdBdHQ6eu3MPU1TBgL1H5orXe2+jWD006WCPPtOuwlQm10NuzOW5WawiF1Q9veW8g== - dependencies: - debug "^4.3.4" - import-meta-resolve "^4.0.0" +import-from@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/import-from/-/import-from-4.0.0.tgz#2710b8d66817d232e16f4166e319248d3d5492e2" + integrity sha512-P9J71vT5nLlDeV8FHs5nNxaLbrpfAV5cF5srvbZfpwpcJoM/xZR3hiv+q+SAnuSmuGbXMWud063iIMx/V/EWZQ== import-local@3.1.0, import-local@^3.0.2: version "3.1.0" @@ -9671,11 +9487,6 @@ import-local@3.1.0, import-local@^3.0.2: pkg-dir "^4.2.0" resolve-cwd "^3.0.0" -import-meta-resolve@^4.0.0: - version "4.2.0" - resolved "https://registry.yarnpkg.com/import-meta-resolve/-/import-meta-resolve-4.2.0.tgz#08cb85b5bd37ecc8eb1e0f670dc2767002d43734" - integrity sha512-Iqv2fzaTQN28s/FwZAoFq0ZSs/7hMAHJVX+w8PZl3cY19Pxk6jFFalxQoIfW2826i/fDLXv8IiEZRIT0lDuWcg== - imurmurhash@^0.1.4: version "0.1.4" resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea" @@ -9691,11 +9502,6 @@ indent-string@^5.0.0: resolved "https://registry.yarnpkg.com/indent-string/-/indent-string-5.0.0.tgz#4fd2980fccaf8622d14c64d694f4cf33c81951a5" integrity sha512-m6FAo/spmsW2Ab2fU35JTYwtOKa2yAwXSwgjSv1TJzh4Mh7mC3lzAOVLBprb72XsTrgkEIsl7YrFNAiDiRhIGg== -index-to-position@^1.1.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/index-to-position/-/index-to-position-1.2.0.tgz#c800eb34dacf4dbf96b9b06c7eb78d5f704138b4" - integrity sha512-Yg7+ztRkqslMAS2iFaU+Oa4KTSidr63OsFGlOrJoW981kIYO3CGCS3wA95P1mUi/IVSJkn0D479KTJpVpvFNuw== - infer-owner@^1.0.4: version "1.0.4" resolved "https://registry.yarnpkg.com/infer-owner/-/infer-owner-1.0.4.tgz#c4cefcaa8e51051c2a40ba2ce8a3d27295af9467" @@ -9734,16 +9540,11 @@ ini@^1.3.2, ini@^1.3.4, ini@^1.3.8, ini@~1.3.0: resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.8.tgz#a29da425b48806f34767a4efce397269af28432c" integrity sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew== -ini@^4.1.3: +ini@^4.1.0, ini@^4.1.1, ini@^4.1.3: version "4.1.3" resolved "https://registry.yarnpkg.com/ini/-/ini-4.1.3.tgz#4c359675a6071a46985eb39b14e4a2c0ec98a795" integrity sha512-X7rqawQBvfdjS10YU1y1YVreA3SsLrW9dX2CewP2EbBJM4ypVNLDkO5y04gejPwKIY9lR+7r9gn3rFPt/kmWFg== -ini@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/ini/-/ini-6.0.0.tgz#efc7642b276f6a37d22fdf56ef50889d7146bf30" - integrity sha512-IBTdIkzZNOpqm7q3dRqJvMaldXjDHWkEDfrwGEQTs5eaQMWV+djAhR+wahyNNMAa+qpbDUhBMVt4ZKNwpPm7xQ== - init-package-json@6.0.3: version "6.0.3" resolved "https://registry.yarnpkg.com/init-package-json/-/init-package-json-6.0.3.tgz#2552fba75b6eed2495dc97f44183e2e5a5bcf8b0" @@ -9757,18 +9558,18 @@ init-package-json@6.0.3: validate-npm-package-license "^3.0.4" validate-npm-package-name "^5.0.0" -init-package-json@^8.2.4: - version "8.2.4" - resolved "https://registry.yarnpkg.com/init-package-json/-/init-package-json-8.2.4.tgz#dc3c1c13e6b2da9631acb5b4763f5d5523133647" - integrity sha512-SqX/+tPl3sZD+IY0EuMiM1kK1B45h+P6JQPo3Q9zlqNINX2XiX3x/WSbYGFqS6YCkODNbGb3L5RawMrYE/cfKw== +init-package-json@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/init-package-json/-/init-package-json-5.0.0.tgz#030cf0ea9c84cfc1b0dc2e898b45d171393e4b40" + integrity sha512-kBhlSheBfYmq3e0L1ii+VKe3zBTLL5lDCDWR+f9dLmEGSB3MqLlMlsolubSsyI88Bg6EA+BIMlomAnQ1SwgQBw== dependencies: - "@npmcli/package-json" "^7.0.0" - npm-package-arg "^13.0.0" - promzard "^3.0.1" - read "^5.0.1" - semver "^7.7.2" + npm-package-arg "^10.0.0" + promzard "^1.0.0" + read "^2.0.0" + read-package-json "^6.0.0" + semver "^7.3.5" validate-npm-package-license "^3.0.4" - validate-npm-package-name "^7.0.0" + validate-npm-package-name "^5.0.0" inquirer@6.2.0: version "6.2.0" @@ -9850,10 +9651,10 @@ ip-address@^5.8.9: lodash "^4.17.15" sprintf-js "1.1.2" -ip-regex@5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/ip-regex/-/ip-regex-5.0.0.tgz#cd313b2ae9c80c07bd3851e12bf4fa4dc5480632" - integrity sha512-fOCG6lhoKKakwv+C6KdsOnGvgXnmgfmp0myi3bcNwj3qfwPAxRKWEuFhvEFF7ceYIz6+1jRZ+yguLFAmUNPEfw== +ip-regex@^4.1.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/ip-regex/-/ip-regex-4.3.0.tgz#687275ab0f57fa76978ff8f4dddc8a23d5990db5" + integrity sha512-B9ZWJxHHOHUhUjCPrMpLD4xEq35bUTClHM1S6CBU5ixQnkZmwipwgc96vAd7AAGM9TGHvJR+Uss+/Ak6UphK+Q== ip6@0.0.4: version "0.0.4" @@ -9989,12 +9790,12 @@ is-ci@3.0.1: dependencies: ci-info "^3.2.0" -is-cidr@^6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/is-cidr/-/is-cidr-6.0.1.tgz#125e9dead938b6fa996aa500662a5e9f88f338f4" - integrity sha512-JIJlvXodfsoWFAvvjB7Elqu8qQcys2SZjkIJCLdk4XherUqZ6+zH7WIpXkp4B3ZxMH0Fz7zIsZwyvs6JfM0csw== +is-cidr@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/is-cidr/-/is-cidr-4.0.2.tgz#94c7585e4c6c77ceabf920f8cde51b8c0fda8814" + integrity sha512-z4a1ENUajDbEl/Q6/pVBpTR1nBjjEE1X7qb7bmWYanNnPoKAvUCPFKeXV6Fe4mgTkWKBqiHIcwsI3SndiO5FeA== dependencies: - cidr-regex "5.0.1" + cidr-regex "^3.1.1" is-core-module@^2.13.0, is-core-module@^2.5.0: version "2.13.1" @@ -10003,7 +9804,7 @@ is-core-module@^2.13.0, is-core-module@^2.5.0: dependencies: hasown "^2.0.0" -is-core-module@^2.16.1: +is-core-module@^2.16.1, is-core-module@^2.8.1: version "2.16.1" resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.16.1.tgz#2a98801a849f43e2add644fbb6bc6229b19a4ef4" integrity sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w== @@ -10164,11 +9965,6 @@ is-plain-obj@^2.0.0: resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-2.1.0.tgz#45e42e37fccf1f40da8e5f76ee21515840c09287" integrity sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA== -is-plain-obj@^4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-4.1.0.tgz#d65025edec3657ce032fd7db63c97883eaed71f0" - integrity sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg== - is-plain-object@^2.0.4: version "2.0.4" resolved "https://registry.yarnpkg.com/is-plain-object/-/is-plain-object-2.0.4.tgz#2c163b3fafb1b606d9d17928f05c2a1c38e07677" @@ -10267,11 +10063,6 @@ is-stream@^3.0.0: resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-3.0.0.tgz#e6bfd7aa6bef69f4f472ce9bb681e3e57b4319ac" integrity sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA== -is-stream@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-4.0.1.tgz#375cf891e16d2e4baec250b85926cffc14720d9b" - integrity sha512-Dnz92NInDqYckGEUJv689RbRiTSEHCQ7wOVeALbkOz999YpqT46yMRIGtSNl2iCL1waAZSx40+h59NV/EwzV/A== - is-string@^1.0.5, is-string@^1.0.7: version "1.0.7" resolved "https://registry.yarnpkg.com/is-string/-/is-string-1.0.7.tgz#0dd12bf2006f255bb58f695110eff7491eebc0fd" @@ -10310,6 +10101,13 @@ is-text-path@^1.0.1: dependencies: text-extensions "^1.0.0" +is-text-path@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-text-path/-/is-text-path-2.0.0.tgz#b2484e2b720a633feb2e85b67dc193ff72c75636" + integrity sha512-+oDTluR6WEjdXEJMnC2z6A4FRwFoYuvShVVEGsS7ewc0UTi2QtAKMDJuL4BDEVt+5T7MjFo12RP8ghOM75oKJw== + dependencies: + text-extensions "^2.0.0" + is-typed-array@^1.1.10, is-typed-array@^1.1.12, is-typed-array@^1.1.9: version "1.1.12" resolved "https://registry.yarnpkg.com/is-typed-array/-/is-typed-array-1.1.12.tgz#d0bab5686ef4a76f7a73097b95470ab199c57d4a" @@ -10336,10 +10134,10 @@ is-unicode-supported@^0.1.0: resolved "https://registry.yarnpkg.com/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz#3f26c76a809593b52bfa2ecb5710ed2779b522a7" integrity sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw== -is-unicode-supported@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/is-unicode-supported/-/is-unicode-supported-2.1.0.tgz#09f0ab0de6d3744d48d265ebb98f65d11f2a9b3a" - integrity sha512-mE00Gnza5EEB3Ds0HfMyllZzbBrmLOX3vfWoj9A9PEnTfratQ/BcaJOuMhnkhjXvb2+FkY3VuHqtAGpTPmglFQ== +is-unicode-supported@^1.2.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/is-unicode-supported/-/is-unicode-supported-1.3.0.tgz#d824984b616c292a2e198207d4a609983842f714" + integrity sha512-43r2mRvz+8JRIKnWJ+3j8JtjRKZ6GmjzfaE/qiBJnikNnYv/6bagRJ1kUhNk8R5EX/GkobD+r+sfxCPJsiKBLQ== is-weakmap@^2.0.2: version "2.0.2" @@ -10407,10 +10205,10 @@ isobject@^3.0.1: resolved "https://registry.yarnpkg.com/isobject/-/isobject-3.0.1.tgz#4e431e92b11a9731636aa1f9c8d1ccbcfdab78df" integrity sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg== -issue-parser@^7.0.0: - version "7.0.1" - resolved "https://registry.yarnpkg.com/issue-parser/-/issue-parser-7.0.1.tgz#8a053e5a4952c75bb216204e454b4fc7d4cc9637" - integrity sha512-3YZcUUR2Wt1WsapF+S/WiA2WmlW0cWAoPccMqne7AxEBhCdFeTPjfv/Axb8V2gyCgY3nRw+ksZ3xSUX+R47iAg== +issue-parser@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/issue-parser/-/issue-parser-6.0.0.tgz#b1edd06315d4f2044a9755daf85fdafde9b4014a" + integrity sha512-zKa/Dxq2lGsBIXQ7CUZWTHfvxPC2ej0KfO7fIPqLlHB9J2hJ7rGhZ5rilhuufylr4RXYPzJUeFjKxz305OsNlA== dependencies: lodash.capitalize "^4.2.1" lodash.escaperegexp "^4.1.2" @@ -10481,6 +10279,15 @@ iterare@1.2.1: resolved "https://registry.yarnpkg.com/iterare/-/iterare-1.2.1.tgz#139c400ff7363690e33abffa33cbba8920f00042" integrity sha512-RKYVTCjAnRthyJes037NX/IiqeidgN1xc3j1RjFfECFp28A1GVwK9nA+i0rJPaHqSZwygLzRnFlzUuHFoWWy+Q== +jackspeak@^3.1.2: + version "3.4.3" + resolved "https://registry.yarnpkg.com/jackspeak/-/jackspeak-3.4.3.tgz#8833a9d89ab4acde6188942bd1c53b6390ed5a8a" + integrity sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw== + dependencies: + "@isaacs/cliui" "^8.0.2" + optionalDependencies: + "@pkgjs/parseargs" "^0.11.0" + jake@^10.8.5: version "10.8.7" resolved "https://registry.yarnpkg.com/jake/-/jake-10.8.7.tgz#63a32821177940c33f356e0ba44ff9d34e1c7d8f" @@ -10900,10 +10707,10 @@ js-tokens@^4.0.0: resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ== -js-yaml@4.1.0, js-yaml@4.1.1, js-yaml@^4.1.1: - version "4.1.1" - resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.1.tgz#854c292467705b699476e1a2decc0c8a3458806b" - integrity sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA== +js-yaml@4.1.0, js-yaml@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.0.tgz#c1fb65f8f5017901cdd2c951864ba18458a10602" + integrity sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA== dependencies: argparse "^2.0.1" @@ -10915,10 +10722,10 @@ js-yaml@^3.10.0, js-yaml@^3.13.1, js-yaml@^3.14.1: argparse "^1.0.7" esprima "^4.0.0" -js-yaml@^4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.0.tgz#c1fb65f8f5017901cdd2c951864ba18458a10602" - integrity sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA== +js-yaml@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.1.tgz#854c292467705b699476e1a2decc0c8a3458806b" + integrity sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA== dependencies: argparse "^2.0.1" @@ -10973,16 +10780,11 @@ json-parse-even-better-errors@^3.0.0: resolved "https://registry.yarnpkg.com/json-parse-even-better-errors/-/json-parse-even-better-errors-3.0.0.tgz#2cb2ee33069a78870a0c7e3da560026b89669cf7" integrity sha512-iZbGHafX/59r39gPwVPRBGw0QQKnA7tte5pSMrhWOW7swGsVvVTjmfyAV9pNqk8YGT7tRCdxRu8uzcgZwoDooA== -json-parse-even-better-errors@^3.0.2: +json-parse-even-better-errors@^3.0.1, json-parse-even-better-errors@^3.0.2: version "3.0.2" resolved "https://registry.yarnpkg.com/json-parse-even-better-errors/-/json-parse-even-better-errors-3.0.2.tgz#b43d35e89c0f3be6b5fbbe9dc6c82467b30c28da" integrity sha512-fi0NG4bPjCHunUJffmLd0gxssIgkNmArMvis4iNah6Owg1MCJjWhEcDLmsK6iGkJq3tHwbDkTlce70/tmXN4cQ== -json-parse-even-better-errors@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/json-parse-even-better-errors/-/json-parse-even-better-errors-5.0.0.tgz#93c89f529f022e5dadc233409324f0167b1e903e" - integrity sha512-ZF1nxZ28VhQouRWhUcVlUIN3qwSgPuswK05s/HIaoetAoE/9tngVmCHjSxmSQPav1nd+lPtTL0YZ/2AFdR/iYQ== - json-schema-ref-resolver@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/json-schema-ref-resolver/-/json-schema-ref-resolver-1.0.1.tgz#6586f483b76254784fc1d2120f717bdc9f0a99bf" @@ -11404,70 +11206,78 @@ libnpmaccess@8.0.6: npm-package-arg "^11.0.2" npm-registry-fetch "^17.0.1" -libnpmaccess@^10.0.3: - version "10.0.3" - resolved "https://registry.yarnpkg.com/libnpmaccess/-/libnpmaccess-10.0.3.tgz#856dc29fd35050159dff0039337aab503367586b" - integrity sha512-JPHTfWJxIK+NVPdNMNGnkz4XGX56iijPbe0qFWbdt68HL+kIvSzh+euBL8npLZvl2fpaxo+1eZSdoG15f5YdIQ== - dependencies: - npm-package-arg "^13.0.0" - npm-registry-fetch "^19.0.0" - -libnpmdiff@^8.0.12: - version "8.0.12" - resolved "https://registry.yarnpkg.com/libnpmdiff/-/libnpmdiff-8.0.12.tgz#c55c80e0cb196588174989f36c285750fe7de048" - integrity sha512-M33yWsbxCUv4fwquYNxdRl//mX8CcmY+pHhZZ+f8ihKh+yfcQw2jROv0sJQ3eX5FzRVJKdCdH7nM0cNlHy83DQ== - dependencies: - "@npmcli/arborist" "^9.1.9" - "@npmcli/installed-package-contents" "^4.0.0" - binary-extensions "^3.0.0" - diff "^8.0.2" - minimatch "^10.0.3" - npm-package-arg "^13.0.0" - pacote "^21.0.2" - tar "^7.5.1" - -libnpmexec@^10.1.11: - version "10.1.11" - resolved "https://registry.yarnpkg.com/libnpmexec/-/libnpmexec-10.1.11.tgz#6ccc19f2d81c0eeb4f72f2fe09e8fc1637f5ec7f" - integrity sha512-228ZmYSfElpfywVFO3FMieLkFUDNknExXLLJoFcKJbyrucHc8KgDW4i9F4uJGNrbPvDqDtm7hcSEvrneN0Anqg== - dependencies: - "@npmcli/arborist" "^9.1.9" - "@npmcli/package-json" "^7.0.0" - "@npmcli/run-script" "^10.0.0" +libnpmaccess@^7.0.2: + version "7.0.3" + resolved "https://registry.yarnpkg.com/libnpmaccess/-/libnpmaccess-7.0.3.tgz#9878b75c5cf36ddfff167dd47c1a6cf1fa21193c" + integrity sha512-It+fk/NRdRfv5giLhaVeyebGi/0S2LDSAwuZ0AGQ4x//PtCVb2Hj29wgSHe+XEL+RUkvLBkxbRV+DqLtOzuVTQ== + dependencies: + npm-package-arg "^10.1.0" + npm-registry-fetch "^14.0.3" + +libnpmdiff@^5.0.20: + version "5.0.21" + resolved "https://registry.yarnpkg.com/libnpmdiff/-/libnpmdiff-5.0.21.tgz#9d3036595a4cf393e1de07df98a40607a054d333" + integrity sha512-Zx+o/qnGoX46osnInyQQ5KI8jn2wIqXXiu4TJzE8GFd+o6kbyblJf+ihG81M1+yHK3AzkD1m4KK3+UTPXh/hBw== + dependencies: + "@npmcli/arborist" "^6.5.0" + "@npmcli/disparity-colors" "^3.0.0" + "@npmcli/installed-package-contents" "^2.0.2" + binary-extensions "^2.2.0" + diff "^5.1.0" + minimatch "^9.0.0" + npm-package-arg "^10.1.0" + pacote "^15.0.8" + tar "^6.1.13" + +libnpmexec@^6.0.4: + version "6.0.5" + resolved "https://registry.yarnpkg.com/libnpmexec/-/libnpmexec-6.0.5.tgz#36eb7e5a94a653478c8dd66b4a967cadf3f2540d" + integrity sha512-yN/7uJ3iYCPaKagHfrqXuCFLKn2ddcnYpEyC/tVhisHULC95uCy8AhUdNkThRXzhFqqptejO25ZfoWOGrdqnxA== + dependencies: + "@npmcli/arborist" "^6.5.0" + "@npmcli/run-script" "^6.0.0" ci-info "^4.0.0" - npm-package-arg "^13.0.0" - pacote "^21.0.2" - proc-log "^6.0.0" - promise-retry "^2.0.1" - read "^5.0.1" + npm-package-arg "^10.1.0" + npmlog "^7.0.1" + pacote "^15.0.8" + proc-log "^3.0.0" + read "^2.0.0" + read-package-json-fast "^3.0.2" semver "^7.3.7" - signal-exit "^4.1.0" - walk-up-path "^4.0.0" + walk-up-path "^3.0.1" -libnpmfund@^7.0.12: - version "7.0.12" - resolved "https://registry.yarnpkg.com/libnpmfund/-/libnpmfund-7.0.12.tgz#0a8afd552c0e9d56b8e5904599406d62f2a640be" - integrity sha512-Jg4zvboAkI35JFoywEleJa9eU0ZIkMOZH3gt16VoexaYV3yVTjjIr4ZVnPx+MfsLo28y6DHQ8RgN4PFuKt1bhg== +libnpmfund@^4.2.1: + version "4.2.2" + resolved "https://registry.yarnpkg.com/libnpmfund/-/libnpmfund-4.2.2.tgz#4e50507212e64fcb6a396e4c02369f6c0fc40369" + integrity sha512-qnkP09tpryxD/iPYasHM7+yG4ZVe0e91sBVI/R8HJ1+ajeR9poWDckwiN2LEWGvtV/T/dqB++6A1NLrA5NPryw== dependencies: - "@npmcli/arborist" "^9.1.9" + "@npmcli/arborist" "^6.5.0" -libnpmorg@^8.0.1: - version "8.0.1" - resolved "https://registry.yarnpkg.com/libnpmorg/-/libnpmorg-8.0.1.tgz#975b61c2635f7edc07552ab8a455ce026decb88c" - integrity sha512-/QeyXXg4hqMw0ESM7pERjIT2wbR29qtFOWIOug/xO4fRjS3jJJhoAPQNsnHtdwnCqgBdFpGQ45aIdFFZx2YhTA== +libnpmhook@^9.0.3: + version "9.0.4" + resolved "https://registry.yarnpkg.com/libnpmhook/-/libnpmhook-9.0.4.tgz#43d893e19944a2e729b2b165a74f84a69443880d" + integrity sha512-bYD8nJiPnqeMtSsRc5bztqSh6/v16M0jQjLeO959HJqf9ZRWKRpVnFx971Rz5zbPGOB2BrQa6iopsh5vons5ww== dependencies: aproba "^2.0.0" - npm-registry-fetch "^19.0.0" + npm-registry-fetch "^14.0.3" -libnpmpack@^9.0.12: - version "9.0.12" - resolved "https://registry.yarnpkg.com/libnpmpack/-/libnpmpack-9.0.12.tgz#1514e3caa44f47896089bfa7f474beb8a10de21a" - integrity sha512-32j+CIrJhVngbqGUbhnpNFnPi6rkx6NP1lRO1OHf4aoZ57ad+mTkS788FfeAoXoiJDmfmAqgZejXRmEfy7s6Sg== +libnpmorg@^5.0.4: + version "5.0.5" + resolved "https://registry.yarnpkg.com/libnpmorg/-/libnpmorg-5.0.5.tgz#baaba5c77bdfa6808975be9134a330f84b3fa4d4" + integrity sha512-0EbtEIFthVlmaj0hhC3LlEEXUZU3vKfJwfWL//iAqKjHreMhCD3cgdkld+UeWYDgsZzwzvXmopoY0l38I0yx9Q== dependencies: - "@npmcli/arborist" "^9.1.9" - "@npmcli/run-script" "^10.0.0" - npm-package-arg "^13.0.0" - pacote "^21.0.2" + aproba "^2.0.0" + npm-registry-fetch "^14.0.3" + +libnpmpack@^5.0.20: + version "5.0.21" + resolved "https://registry.yarnpkg.com/libnpmpack/-/libnpmpack-5.0.21.tgz#bcc608279840448fa8c28d8df0f326694d0b6061" + integrity sha512-mQd3pPx7Xf6i2A6QnYcCmgq34BmfVG3HJvpl422B5dLKfi9acITqcJiJ2K7adhxPKZMF5VbP2+j391cs5w+xww== + dependencies: + "@npmcli/arborist" "^6.5.0" + "@npmcli/run-script" "^6.0.0" + npm-package-arg "^10.1.0" + pacote "^15.0.8" libnpmpublish@9.0.9: version "9.0.9" @@ -11483,44 +11293,44 @@ libnpmpublish@9.0.9: sigstore "^2.2.0" ssri "^10.0.6" -libnpmpublish@^11.1.3: - version "11.1.3" - resolved "https://registry.yarnpkg.com/libnpmpublish/-/libnpmpublish-11.1.3.tgz#fcda5c113798155fa111e04be63c9599d38ae4c2" - integrity sha512-NVPTth/71cfbdYHqypcO9Lt5WFGTzFEcx81lWd7GDJIgZ95ERdYHGUfCtFejHCyqodKsQkNEx2JCkMpreDty/A== +libnpmpublish@^7.5.1: + version "7.5.2" + resolved "https://registry.yarnpkg.com/libnpmpublish/-/libnpmpublish-7.5.2.tgz#1b2780a4a56429d6dea332174286179b8d6f930c" + integrity sha512-azAxjEjAgBkbPHUGsGdMbTScyiLcTKdEnNYwGS+9yt+fUsNyiYn8hNH3+HeWKaXzFjvxi50MrHw1yp1gg5pumQ== dependencies: - "@npmcli/package-json" "^7.0.0" ci-info "^4.0.0" - npm-package-arg "^13.0.0" - npm-registry-fetch "^19.0.0" - proc-log "^6.0.0" + normalize-package-data "^5.0.0" + npm-package-arg "^10.1.0" + npm-registry-fetch "^14.0.3" + proc-log "^3.0.0" semver "^7.3.7" - sigstore "^4.0.0" - ssri "^13.0.0" + sigstore "^1.4.0" + ssri "^10.0.1" -libnpmsearch@^9.0.1: - version "9.0.1" - resolved "https://registry.yarnpkg.com/libnpmsearch/-/libnpmsearch-9.0.1.tgz#674a88ffc9ab5826feb34c2c66e90797b38f4c2e" - integrity sha512-oKw58X415ERY/BOGV3jQPVMcep8YeMRWMzuuqB0BAIM5VxicOU1tQt19ExCu4SV77SiTOEoziHxGEgJGw3FBYQ== +libnpmsearch@^6.0.2: + version "6.0.3" + resolved "https://registry.yarnpkg.com/libnpmsearch/-/libnpmsearch-6.0.3.tgz#f6001910b4a68341c2aa3f6f9505e665ed98759e" + integrity sha512-4FLTFsygxRKd+PL32WJlFN1g6gkfx3d90PjgSgd6kl9nJ55sZQAqNyi1M7QROKB4kN8JCNCphK8fQYDMg5bCcg== dependencies: - npm-registry-fetch "^19.0.0" + npm-registry-fetch "^14.0.3" -libnpmteam@^8.0.2: - version "8.0.2" - resolved "https://registry.yarnpkg.com/libnpmteam/-/libnpmteam-8.0.2.tgz#0417161bfcd155f5e8391cc2b6a05260ccbf1f41" - integrity sha512-ypLrDUQoi8EhG+gzx5ENMcYq23YjPV17Mfvx4nOnQiHOi8vp47+4GvZBrMsEM4yeHPwxguF/HZoXH4rJfHdH/w== +libnpmteam@^5.0.3: + version "5.0.4" + resolved "https://registry.yarnpkg.com/libnpmteam/-/libnpmteam-5.0.4.tgz#255ac22d94e4b9e911456bf97c1dc1013df03659" + integrity sha512-yN2zxNb8Urvvo7fTWRcP3E/KPtpZJXFweDWcl+H/s3zopGDI9ahpidddGVG98JhnPl3vjqtZvFGU3/sqVTfuIw== dependencies: aproba "^2.0.0" - npm-registry-fetch "^19.0.0" + npm-registry-fetch "^14.0.3" -libnpmversion@^8.0.3: - version "8.0.3" - resolved "https://registry.yarnpkg.com/libnpmversion/-/libnpmversion-8.0.3.tgz#f50030c72a85e35b70a4ea4c075347f1999f9fe5" - integrity sha512-Avj1GG3DT6MGzWOOk3yA7rORcMDUPizkIGbI8glHCO7WoYn3NYNmskLDwxg2NMY1Tyf2vrHAqTuSG58uqd1lJg== +libnpmversion@^4.0.2: + version "4.0.3" + resolved "https://registry.yarnpkg.com/libnpmversion/-/libnpmversion-4.0.3.tgz#f4d85d3eb6bdbf7de8d9317abda92528e84b1a53" + integrity sha512-eD1O5zr0ko5pjOdz+2NyTEzP0kzKG8VIVyU+hIsz61cRmTrTxFRJhVBNOI1Q/inifkcM/UTl8EMfa0vX48zfoQ== dependencies: - "@npmcli/git" "^7.0.0" - "@npmcli/run-script" "^10.0.0" - json-parse-even-better-errors "^5.0.0" - proc-log "^6.0.0" + "@npmcli/git" "^4.0.1" + "@npmcli/run-script" "^6.0.0" + json-parse-even-better-errors "^3.0.0" + proc-log "^3.0.0" semver "^7.3.7" lie@~3.3.0: @@ -11583,6 +11393,11 @@ lines-and-columns@^1.1.6: resolved "https://registry.yarnpkg.com/lines-and-columns/-/lines-and-columns-1.2.4.tgz#eca284f75d2965079309dc0ad9255abb2ebc1632" integrity sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg== +lines-and-columns@^2.0.3: + version "2.0.4" + resolved "https://registry.yarnpkg.com/lines-and-columns/-/lines-and-columns-2.0.4.tgz#d00318855905d2660d8c0822e3f5a4715855fc42" + integrity sha512-wM1+Z03eypVAVUCE7QdSqpVIvelbOakn1M0bPDoA4SGWPx3sNDVUiMo3L6To6WWGClB7VyXnhQ4Sn7gxiJbE6A== + link-check@^5.2.0: version "5.2.0" resolved "https://registry.yarnpkg.com/link-check/-/link-check-5.2.0.tgz#595a339d305900bed8c1302f4342a29c366bf478" @@ -11642,6 +11457,13 @@ locate-path@^6.0.0: dependencies: p-locate "^5.0.0" +locate-path@^7.1.0: + version "7.2.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-7.2.0.tgz#69cb1779bd90b35ab1e771e1f2f89a202c2a8a8a" + integrity sha512-gvVijfZvn7R+2qyPX8mAuKcFGDf6Nc61GdvGafQsHL0sBIxfKzA+usWn4GFC/bk+QdwPUD4kWFJLhElipq+0VA== + dependencies: + p-locate "^6.0.0" + lodash-es@^4.17.21: version "4.17.23" resolved "https://registry.yarnpkg.com/lodash-es/-/lodash-es-4.17.23.tgz#58c4360fd1b5d33afc6c0bbd3d1149349b1138e0" @@ -11832,16 +11654,11 @@ lru-cache@^10.0.1: dependencies: semver "^7.3.5" -lru-cache@^10.2.2: +lru-cache@^10.2.0, lru-cache@^10.2.2: version "10.4.3" resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-10.4.3.tgz#410fc8a17b70e598013df257c2446b7f3383f119" integrity sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ== -lru-cache@^11.0.0, lru-cache@^11.1.0, lru-cache@^11.2.1: - version "11.2.4" - resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-11.2.4.tgz#ecb523ebb0e6f4d837c807ad1abaea8e0619770d" - integrity sha512-B5Y16Jr9LB9dHVkh6ZevG+vAbOsNOYCX+sXvFWFu7B3Iz5mijW3zdbMyhsh8ANd2mSWBYdJgnqi+mL7/LrOPYg== - lru-cache@^5.1.1: version "5.1.1" resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-5.1.1.tgz#1da27e6710271947695daf6848e847f01d84b920" @@ -11856,7 +11673,7 @@ lru-cache@^6.0.0: dependencies: yallist "^4.0.0" -lru-cache@^7.14.1: +lru-cache@^7.14.1, lru-cache@^7.4.4, lru-cache@^7.5.1, lru-cache@^7.7.1: version "7.18.3" resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-7.18.3.tgz#f793896e0fd0e954a59dfdd82f0773808df6aa89" integrity sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA== @@ -11883,15 +11700,6 @@ luxon@^3.2.1: resolved "https://registry.yarnpkg.com/luxon/-/luxon-3.4.4.tgz#cf20dc27dc532ba41a169c43fdcc0063601577af" integrity sha512-zobTr7akeGHnv7eBOXcRgMeCP6+uyYsczwmeRCauvpvaAltgNyTbLH/+VaEAPUeWBT+1GuNmz4wC/6jtQzbbVA== -make-asynchronous@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/make-asynchronous/-/make-asynchronous-1.0.1.tgz#5ff174bae4e4371746debff112103545037373ee" - integrity sha512-T9BPOmEOhp6SmV25SwLVcHK4E6JyG/coH3C6F1NjNXSziv/fd4GmsqMk8YR6qpPOswfaOCApSNkZv6fxoaYFcQ== - dependencies: - p-event "^6.0.0" - type-fest "^4.6.0" - web-worker "1.2.0" - make-dir@4.0.0, make-dir@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-4.0.0.tgz#c3c2307a771277cd9638305f915c29ae741b614e" @@ -11919,6 +11727,49 @@ make-error@1.x, make-error@^1.1.1: resolved "https://registry.yarnpkg.com/make-error/-/make-error-1.3.6.tgz#2eb2e37ea9b67c4891f684a1394799af484cf7a2" integrity sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw== +make-fetch-happen@^10.0.3: + version "10.2.1" + resolved "https://registry.yarnpkg.com/make-fetch-happen/-/make-fetch-happen-10.2.1.tgz#f5e3835c5e9817b617f2770870d9492d28678164" + integrity sha512-NgOPbRiaQM10DYXvN3/hhGVI2M5MtITFryzBGxHM5p4wnFxsVCbxkrBrDsk+EZ5OB4jEOT7AjDxtdF+KVEFT7w== + dependencies: + agentkeepalive "^4.2.1" + cacache "^16.1.0" + http-cache-semantics "^4.1.0" + http-proxy-agent "^5.0.0" + https-proxy-agent "^5.0.0" + is-lambda "^1.0.1" + lru-cache "^7.7.1" + minipass "^3.1.6" + minipass-collect "^1.0.2" + minipass-fetch "^2.0.3" + minipass-flush "^1.0.5" + minipass-pipeline "^1.2.4" + negotiator "^0.6.3" + promise-retry "^2.0.1" + socks-proxy-agent "^7.0.0" + ssri "^9.0.0" + +make-fetch-happen@^11.0.0, make-fetch-happen@^11.0.1, make-fetch-happen@^11.1.1: + version "11.1.1" + resolved "https://registry.yarnpkg.com/make-fetch-happen/-/make-fetch-happen-11.1.1.tgz#85ceb98079584a9523d4bf71d32996e7e208549f" + integrity sha512-rLWS7GCSTcEujjVBs2YqG7Y4643u8ucvCJeSRqiLYhesrDuzeuFIk37xREzAsfQaqzl8b9rNCE4m6J8tvX4Q8w== + dependencies: + agentkeepalive "^4.2.1" + cacache "^17.0.0" + http-cache-semantics "^4.1.1" + http-proxy-agent "^5.0.0" + https-proxy-agent "^5.0.0" + is-lambda "^1.0.1" + lru-cache "^7.7.1" + minipass "^5.0.0" + minipass-fetch "^3.0.0" + minipass-flush "^1.0.5" + minipass-pipeline "^1.2.4" + negotiator "^0.6.3" + promise-retry "^2.0.1" + socks-proxy-agent "^7.0.0" + ssri "^10.0.0" + make-fetch-happen@^13.0.0, make-fetch-happen@^13.0.1: version "13.0.1" resolved "https://registry.yarnpkg.com/make-fetch-happen/-/make-fetch-happen-13.0.1.tgz#273ba2f78f45e1f3a6dca91cede87d9fa4821e36" @@ -11937,23 +11788,6 @@ make-fetch-happen@^13.0.0, make-fetch-happen@^13.0.1: promise-retry "^2.0.1" ssri "^10.0.0" -make-fetch-happen@^15.0.0, make-fetch-happen@^15.0.1, make-fetch-happen@^15.0.3: - version "15.0.3" - resolved "https://registry.yarnpkg.com/make-fetch-happen/-/make-fetch-happen-15.0.3.tgz#1578d72885f2b3f9e5daa120b36a14fc31a84610" - integrity sha512-iyyEpDty1mwW3dGlYXAJqC/azFn5PPvgKVwXayOGBSmKLxhKZ9fg4qIan2ePpp1vJIwfFiO34LAPZgq9SZW9Aw== - dependencies: - "@npmcli/agent" "^4.0.0" - cacache "^20.0.1" - http-cache-semantics "^4.1.1" - minipass "^7.0.2" - minipass-fetch "^5.0.0" - minipass-flush "^1.0.5" - minipass-pipeline "^1.2.4" - negotiator "^1.0.0" - proc-log "^6.0.0" - promise-retry "^2.0.1" - ssri "^13.0.0" - make-fetch-happen@^9.1.0: version "9.1.0" resolved "https://registry.yarnpkg.com/make-fetch-happen/-/make-fetch-happen-9.1.0.tgz#53085a09e7971433e6765f7971bf63f4e05cb968" @@ -12004,7 +11838,7 @@ mariadb@^3.0.2: iconv-lite "^0.6.3" lru-cache "^10.0.1" -markdown-it@>=14.1.1, markdown-it@^14.1.0: +markdown-it@^14.1.0: version "14.1.1" resolved "https://registry.yarnpkg.com/markdown-it/-/markdown-it-14.1.1.tgz#856f90b66fc39ae70affd25c1b18b581d7deee1f" integrity sha512-BuU2qnTti9YKgK5N+IeMubp14ZUKUUw7yeJbkjtosvHiP0AZ5c8IAgEMk79D0eC8F23r4Ac/q8cAIFdm2FtyoA== @@ -12045,29 +11879,28 @@ markdown-table@^2.0.0: dependencies: repeat-string "^1.0.0" -marked-terminal@^7.3.0: - version "7.3.0" - resolved "https://registry.yarnpkg.com/marked-terminal/-/marked-terminal-7.3.0.tgz#7a86236565f3dd530f465ffce9c3f8b62ef270e8" - integrity sha512-t4rBvPsHc57uE/2nJOLmMbZCQ4tgAccAED3ngXQqW6g+TxA488JzJ+FK3lQkzBQOI1mRV/r/Kq+1ZlJ4D0owQw== - dependencies: - ansi-escapes "^7.0.0" - ansi-regex "^6.1.0" - chalk "^5.4.1" - cli-highlight "^2.1.11" - cli-table3 "^0.6.5" - node-emoji "^2.2.0" - supports-hyperlinks "^3.1.0" - -marked@^15.0.0: - version "15.0.12" - resolved "https://registry.yarnpkg.com/marked/-/marked-15.0.12.tgz#30722c7346e12d0a2d0207ab9b0c4f0102d86c4e" - integrity sha512-8dD6FusOQSrpv9Z1rdNMdlSgQOIP880DHqnohobOmYLElGEqAL/JvxvuxZO16r4HtjTlfPRDC1hbvxC9dPN2nA== +marked-terminal@^5.1.1: + version "5.2.0" + resolved "https://registry.yarnpkg.com/marked-terminal/-/marked-terminal-5.2.0.tgz#c5370ec2bae24fb2b34e147b731c94fa933559d3" + integrity sha512-Piv6yNwAQXGFjZSaiNljyNFw7jKDdGrw70FSbtxEyldLsyeuV5ZHm/1wW++kWbrOF1VPnUgYOhB2oLL0ZpnekA== + dependencies: + ansi-escapes "^6.2.0" + cardinal "^2.1.1" + chalk "^5.2.0" + cli-table3 "^0.6.3" + node-emoji "^1.11.0" + supports-hyperlinks "^2.3.0" marked@^4.1.0: version "4.3.0" resolved "https://registry.yarnpkg.com/marked/-/marked-4.3.0.tgz#796362821b019f734054582038b116481b456cf3" integrity sha512-PRsaiG84bK+AMvxziE/lCFss8juXjNaWzVbN5tXAm4XjeaS9NAHhop+PjQxz2A9h8Q4M/xGmzP8vqNwy6JeK0A== +marked@^5.0.0: + version "5.1.2" + resolved "https://registry.yarnpkg.com/marked/-/marked-5.1.2.tgz#62b5ccfc75adf72ca3b64b2879b551d89e77677f" + integrity sha512-ahRPGXJpjMjwSOlBoTMZAK7ATXkli5qCPxZ21TG44rx1KEo44bii4ekgTDQPNRQ4Kh7JMb9Ub1PVk1NxRSsorg== + math-expression-evaluator@^2.0.0: version "2.0.7" resolved "https://registry.yarnpkg.com/math-expression-evaluator/-/math-expression-evaluator-2.0.7.tgz#dc99a80ce2bf7f9b7df878126feb5c506c1fdf5f" @@ -12196,11 +12029,6 @@ meow@^12.0.1: resolved "https://registry.yarnpkg.com/meow/-/meow-12.1.1.tgz#e558dddbab12477b69b2e9a2728c327f191bace6" integrity sha512-BhXM0Au22RwUneMPwSCnyhTOizdWoIEPU9sp0Aqa1PnDMR5Wv2FGXYDjuzJEIX+Eo2Rb8xuYe5jrnm5QowQFkw== -meow@^13.0.0: - version "13.2.0" - resolved "https://registry.yarnpkg.com/meow/-/meow-13.2.0.tgz#6b7d63f913f984063b3cc261b6e8800c4cd3474f" - integrity sha512-pxQJQzB6djGPXh08dacEloMFopsOqGVRKFPYvPOt9XDZ1HasbgDZA74CJGreSU4G3Ak7EFJGoiH2auq+yXISgA== - meow@^8.0.0, meow@^8.1.2: version "8.1.2" resolved "https://registry.yarnpkg.com/meow/-/meow-8.1.2.tgz#bcbe45bda0ee1729d350c03cffc8395a36c4e897" @@ -12301,7 +12129,15 @@ micromark@^2.11.3, micromark@~2.11.0, micromark@~2.11.3: debug "^4.0.0" parse-entities "^2.0.0" -micromatch@4.0.2, micromatch@^4.0.0, micromatch@^4.0.2, micromatch@^4.0.4, micromatch@^4.0.8: +micromatch@4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.2.tgz#4fcb0999bf9fbc2fcbdd212f6d629b9a56c39259" + integrity sha512-y7FpHSbMUMoyPbYUSzO6PaZ6FyRnQOpHuKwbo1G+Knck95XVU4QAiKdGEnj5wwoS7PlOgthX/09u5iFJ+aYf5Q== + dependencies: + braces "^3.0.1" + picomatch "^2.0.5" + +micromatch@^4.0.0, micromatch@^4.0.2, micromatch@^4.0.4, micromatch@^4.0.8: version "4.0.8" resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.8.tgz#d66fa18f3a47076789320b9b1af32bd86d9fa202" integrity sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA== @@ -12407,13 +12243,6 @@ minimatch@9.0.3: dependencies: brace-expansion "^2.0.1" -minimatch@^10.0.3, minimatch@^10.1.1, minimatch@^10.1.2: - version "10.2.4" - resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-10.2.4.tgz#465b3accbd0218b8281f5301e27cedc697f96fde" - integrity sha512-oRjTw/97aTBN0RHbYCdtF1MQfvusSIBQM0IZEgzl6426+8jSC0nF1a/GmnVLpfB9yyr6g6FTqWqiZVbxrtaCIg== - dependencies: - brace-expansion "^5.0.2" - minimatch@^3.0.4, minimatch@^3.0.5, minimatch@^3.1.1, minimatch@^3.1.2: version "3.1.5" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.5.tgz#580c88f8d5445f2bd6aa8f3cadefa0de79fbd69e" @@ -12428,7 +12257,14 @@ minimatch@^5.0.1: dependencies: brace-expansion "^2.0.1" -minimatch@^9.0.0, minimatch@^9.0.4, minimatch@^9.0.5: +minimatch@^8.0.2: + version "8.0.7" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-8.0.7.tgz#954766e22da88a3e0a17ad93b58c15c9d8a579de" + integrity sha512-V+1uQNdzybxa14e/p00HZnQNNcTjnRJjDxg2V8wtkjFctq4M7hXFws4oekyTP0Jebeq7QYtpFyOeBAjc88zvYg== + dependencies: + brace-expansion "^2.0.1" + +minimatch@^9.0.0, minimatch@^9.0.3, minimatch@^9.0.4, minimatch@^9.0.5: version "9.0.9" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.9.tgz#9b0cb9fcb78087f6fd7eababe2511c4d3d60574e" integrity sha512-OBwBN9AL4dqmETlpS2zasx+vTeWclWzkblfZk7KTA5j3jeOONz/tRCnZomUyvNg83wL5Zv9Ss6HMJXAgL8R2Yg== @@ -12474,25 +12310,25 @@ minipass-fetch@^1.3.2: optionalDependencies: encoding "^0.1.12" -minipass-fetch@^3.0.0: - version "3.0.4" - resolved "https://registry.yarnpkg.com/minipass-fetch/-/minipass-fetch-3.0.4.tgz#4d4d9b9f34053af6c6e597a64be8e66e42bf45b7" - integrity sha512-jHAqnA728uUpIaFm7NWsCnqKT6UqZz7GcI/bDpPATuwYyKwJwW0remxSCxUlKiEty+eopHGa3oc8WxgQ1FFJqg== +minipass-fetch@^2.0.3: + version "2.1.2" + resolved "https://registry.yarnpkg.com/minipass-fetch/-/minipass-fetch-2.1.2.tgz#95560b50c472d81a3bc76f20ede80eaed76d8add" + integrity sha512-LT49Zi2/WMROHYoqGgdlQIZh8mLPZmOrN2NdJjMXxYe4nkN6FUyuPuOAOedNJDrx0IRGg9+4guZewtp8hE6TxA== dependencies: - minipass "^7.0.3" + minipass "^3.1.6" minipass-sized "^1.0.3" minizlib "^2.1.2" optionalDependencies: encoding "^0.1.13" -minipass-fetch@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/minipass-fetch/-/minipass-fetch-5.0.0.tgz#644ed3fa172d43b3163bb32f736540fc138c4afb" - integrity sha512-fiCdUALipqgPWrOVTz9fw0XhcazULXOSU6ie40DDbX1F49p1dBrSRBuswndTx1x3vEb/g0FT7vC4c4C2u/mh3A== +minipass-fetch@^3.0.0: + version "3.0.4" + resolved "https://registry.yarnpkg.com/minipass-fetch/-/minipass-fetch-3.0.4.tgz#4d4d9b9f34053af6c6e597a64be8e66e42bf45b7" + integrity sha512-jHAqnA728uUpIaFm7NWsCnqKT6UqZz7GcI/bDpPATuwYyKwJwW0remxSCxUlKiEty+eopHGa3oc8WxgQ1FFJqg== dependencies: minipass "^7.0.3" minipass-sized "^1.0.3" - minizlib "^3.0.1" + minizlib "^2.1.2" optionalDependencies: encoding "^0.1.13" @@ -12503,7 +12339,15 @@ minipass-flush@^1.0.5: dependencies: minipass "^3.0.0" -minipass-pipeline@^1.2.2, minipass-pipeline@^1.2.4: +minipass-json-stream@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/minipass-json-stream/-/minipass-json-stream-1.0.2.tgz#5121616c77a11c406c3ffa77509e0b77bb267ec3" + integrity sha512-myxeeTm57lYs8pH2nxPzmEEg8DGIgW+9mv6D4JZD2pa81I/OBjeU7PtICXV6c9eRGTA5JMDsuIPUZRCyBMYNhg== + dependencies: + jsonparse "^1.3.1" + minipass "^3.0.0" + +minipass-pipeline@^1.2.2, minipass-pipeline@^1.2.4: version "1.2.4" resolved "https://registry.yarnpkg.com/minipass-pipeline/-/minipass-pipeline-1.2.4.tgz#68472f79711c084657c067c5c6ad93cddea8214c" integrity sha512-xuIq7cIOt09RPRJ19gdi4b+RiNvDFYe5JH+ggNvBqGqpQXcru3PcRmOZuHBKWK1Txf9+cQ+HMVN4d6z46LZP7A== @@ -12517,14 +12361,29 @@ minipass-sized@^1.0.3: dependencies: minipass "^3.0.0" -minipass@^3.0.0, minipass@^3.1.0, minipass@^3.1.1, minipass@^3.1.3: +minipass@^3.0.0, minipass@^3.1.0, minipass@^3.1.1, minipass@^3.1.3, minipass@^3.1.6: version "3.3.6" resolved "https://registry.yarnpkg.com/minipass/-/minipass-3.3.6.tgz#7bba384db3a1520d18c9c0e5251c3444e95dd94a" integrity sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw== dependencies: yallist "^4.0.0" -minipass@^7.0.2, minipass@^7.0.4, minipass@^7.1.1, minipass@^7.1.2: +minipass@^4.2.4: + version "4.2.8" + resolved "https://registry.yarnpkg.com/minipass/-/minipass-4.2.8.tgz#f0010f64393ecfc1d1ccb5f582bcaf45f48e1a3a" + integrity sha512-fNzuVyifolSLFL4NzpF+wEF4qrgqaaKX0haXPQEdQ7NKAN+WecoKMHV09YcuL/DHxrUsYQOK3MiuDf7Ip2OXfQ== + +minipass@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/minipass/-/minipass-5.0.0.tgz#3e9788ffb90b694a5d0ec94479a45b5d8738133d" + integrity sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ== + +"minipass@^5.0.0 || ^6.0.2 || ^7.0.0": + version "7.1.3" + resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.1.3.tgz#79389b4eb1bb2d003a9bba87d492f2bd37bdc65b" + integrity sha512-tEBHqDnIoM/1rXME1zgka9g6Q2lcoCkxHLuc7ODJ5BxbP5d4c2Z5cGgtXAku59200Cx7diuHTOYfSBD8n6mm8A== + +minipass@^7.0.2, minipass@^7.0.4, minipass@^7.1.2: version "7.1.2" resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.1.2.tgz#93a9626ce5e5e66bd4db86849e7515e92340a707" integrity sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw== @@ -12534,7 +12393,7 @@ minipass@^7.0.3: resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.0.4.tgz#dbce03740f50a4786ba994c1fb908844d27b038c" integrity sha512-jYofLM5Dam9279rdkWzqHozUo4ybjdZmCsDHePy5V/PbBcVMiSZR97gmAy45aqi8CK1lG2ECd356FU86avfwUQ== -minizlib@^2.0.0, minizlib@^2.1.2: +minizlib@^2.0.0, minizlib@^2.1.1, minizlib@^2.1.2: version "2.1.2" resolved "https://registry.yarnpkg.com/minizlib/-/minizlib-2.1.2.tgz#e90d3466ba209b932451508a11ce3d3632145931" integrity sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg== @@ -12542,13 +12401,6 @@ minizlib@^2.0.0, minizlib@^2.1.2: minipass "^3.0.0" yallist "^4.0.0" -minizlib@^3.0.1, minizlib@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/minizlib/-/minizlib-3.1.0.tgz#6ad76c3a8f10227c9b51d1c9ac8e30b27f5a251c" - integrity sha512-KZxYo1BUkWD2TVFLr0MQoM8vUUigWD3LlD83a/75BqC+4qE0Hb1Vo5v1FgcfaNXvfXzr+5EhQ6ing/CaBijTlw== - dependencies: - minipass "^7.1.2" - mkdirp-classic@^0.5.2, mkdirp-classic@^0.5.3: version "0.5.3" resolved "https://registry.yarnpkg.com/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz#fa10c9115cc6d8865be221ba47ee9bed78601113" @@ -12706,7 +12558,7 @@ mute-stream@0.0.8: resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.8.tgz#1630c42b2251ff81e2a283de96a5497ea92e5e0d" integrity sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA== -mute-stream@^1.0.0: +mute-stream@^1.0.0, mute-stream@~1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-1.0.0.tgz#e31bd9fe62f0aed23520aa4324ea6671531e013e" integrity sha512-avsJQhyd+680gKXyG/sQc0nXaC6rBkPOfyHYcFb9+hdkqQkR9bdnkJ0AMZhke0oesPqIO+mFFJ+IdBc7mst4IA== @@ -12716,11 +12568,6 @@ mute-stream@^2.0.0: resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-2.0.0.tgz#a5446fc0c512b71c83c44d908d5c7b7b4c493b2b" integrity sha512-WWdIxpyjEn+FhQJQQv9aQAYlHoNVdzIzUySNV1gHUPDSdZJ3yZn7pAAbQcV7B56Mvu881q9FZV+0Vx2xC44VWA== -mute-stream@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-3.0.0.tgz#cd8014dd2acb72e1e91bb67c74f0019e620ba2d1" - integrity sha512-dkEJPVvun4FryqBmZ5KhDo0K9iDXAwn08tMLDinNdRBNPcYEDiWYysLcc6k3mjTMlbP9KyylvRpd4wFtwrT9rw== - mysql2@3.9.8: version "3.9.8" resolved "https://registry.yarnpkg.com/mysql2/-/mysql2-3.9.8.tgz#fe8a0f975f2c495ed76ca988ddc5505801dc49ce" @@ -12749,15 +12596,6 @@ mysql2@^3.0.1: seq-queue "^0.0.5" sqlstring "^2.3.2" -mz@^2.4.0: - version "2.7.0" - resolved "https://registry.yarnpkg.com/mz/-/mz-2.7.0.tgz#95008057a56cafadc2bc63dde7f9ff6955948e32" - integrity sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q== - dependencies: - any-promise "^1.0.0" - object-assign "^4.0.1" - thenify-all "^1.0.0" - named-placeholders@^1.1.3: version "1.1.3" resolved "https://registry.yarnpkg.com/named-placeholders/-/named-placeholders-1.1.3.tgz#df595799a36654da55dda6152ba7a137ad1d9351" @@ -12865,15 +12703,12 @@ node-addon-api@^7.0.0: resolved "https://registry.yarnpkg.com/node-addon-api/-/node-addon-api-7.1.1.tgz#1aba6693b0f255258a049d621329329322aad558" integrity sha512-5m3bsyrjFWE1xf7nz7YXdN4udnVtXK6/Yfgn5qnahL6bCkf2yKt4k3nuTKAtT4r3IG8JNR2ncsIMdZuAzJjHQQ== -node-emoji@^2.2.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/node-emoji/-/node-emoji-2.2.0.tgz#1d000e3c76e462577895be1b436f4aa2d6760eb0" - integrity sha512-Z3lTE9pLaJF47NyMhd4ww1yFTAP8YhYI8SleJiHzM46Fgpm5cnNzSl9XfzFNqbaz+VlJrIj3fXQ4DeN1Rjm6cw== +node-emoji@^1.11.0: + version "1.11.0" + resolved "https://registry.yarnpkg.com/node-emoji/-/node-emoji-1.11.0.tgz#69a0150e6946e2f115e9d7ea4df7971e2628301c" + integrity sha512-wo2DpQkQp7Sjm2A0cq+sN7EHKO6Sl0ctXeBdFZrL9T9+UywORbufTcTZxom8YqpLQt/FqNMUkOpkZrJVYSKD3A== dependencies: - "@sindresorhus/is" "^4.6.0" - char-regex "^1.0.2" - emojilib "^2.4.0" - skin-tone "^2.0.0" + lodash "^4.17.21" node-fetch@2.6.7: version "2.6.7" @@ -12921,21 +12756,22 @@ node-gyp@^10.0.0: tar "^6.2.1" which "^4.0.0" -node-gyp@^12.1.0: - version "12.1.0" - resolved "https://registry.yarnpkg.com/node-gyp/-/node-gyp-12.1.0.tgz#302fc2d3fec36975cfb8bfee7a6bf6b7f0be9553" - integrity sha512-W+RYA8jBnhSr2vrTtlPYPc1K+CSjGpVDRZxcqJcERZ8ND3A1ThWPHRwctTx3qC3oW99jt726jhdz3Y6ky87J4g== +node-gyp@^9.0.0, node-gyp@^9.4.1: + version "9.4.1" + resolved "https://registry.yarnpkg.com/node-gyp/-/node-gyp-9.4.1.tgz#8a1023e0d6766ecb52764cc3a734b36ff275e185" + integrity sha512-OQkWKbjQKbGkMf/xqI1jjy3oCTgMKJac58G2+bjZb3fza6gW2YrCSdMQYaoTb70crvE//Gngr4f0AgVHmqHvBQ== dependencies: env-paths "^2.2.0" exponential-backoff "^3.1.1" + glob "^7.1.4" graceful-fs "^4.2.6" - make-fetch-happen "^15.0.0" - nopt "^9.0.0" - proc-log "^6.0.0" + make-fetch-happen "^10.0.3" + nopt "^6.0.0" + npmlog "^6.0.0" + rimraf "^3.0.2" semver "^7.3.5" - tar "^7.5.2" - tinyglobby "^0.2.12" - which "^6.0.0" + tar "^6.1.2" + which "^2.0.2" node-int64@^0.4.0: version "0.4.0" @@ -13003,20 +12839,20 @@ nopt@^5.0.0: dependencies: abbrev "1" -nopt@^7.0.0, nopt@^7.2.1: +nopt@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/nopt/-/nopt-6.0.0.tgz#245801d8ebf409c6df22ab9d95b65e1309cdb16d" + integrity sha512-ZwLpbTgdhuZUnZzjd7nb1ZV+4DoiC6/sfiVKok72ym/4Tlf+DFdlHYmT2JPmcNNWV6Pi3SDf1kT+A4r9RTuT9g== + dependencies: + abbrev "^1.0.0" + +nopt@^7.0.0, nopt@^7.2.0, nopt@^7.2.1: version "7.2.1" resolved "https://registry.yarnpkg.com/nopt/-/nopt-7.2.1.tgz#1cac0eab9b8e97c9093338446eddd40b2c8ca1e7" integrity sha512-taM24ViiimT/XntxbPyJQzCG+p4EKOpgD3mxFwW38mGjVUrfERQOeY4EDHjdnptttfHuHQXFx+lTP08Q+mLa/w== dependencies: abbrev "^2.0.0" -nopt@^9.0.0: - version "9.0.0" - resolved "https://registry.yarnpkg.com/nopt/-/nopt-9.0.0.tgz#6bff0836b2964d24508b6b41b5a9a49c4f4a1f96" - integrity sha512-Zhq3a+yFKrYwSBluL4H9XP3m3y5uvQkB/09CwDruCiRmR/UJYnn9W4R48ry0uGC70aeTPKLynBtscP9efFFcPw== - dependencies: - abbrev "^4.0.0" - nopt@~1.0.10: version "1.0.10" resolved "https://registry.yarnpkg.com/nopt/-/nopt-1.0.10.tgz#6ddd21bd2a31417b92727dd585f8a6f37608ebee" @@ -13044,6 +12880,16 @@ normalize-package-data@^3.0.0, normalize-package-data@^3.0.3: semver "^7.3.4" validate-npm-package-license "^3.0.1" +normalize-package-data@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-5.0.0.tgz#abcb8d7e724c40d88462b84982f7cbf6859b4588" + integrity sha512-h9iPVIfrVZ9wVYQnxFgtw1ugSvGEMOlyPWWtm8BMJhnwyEL/FLbYbTY3V3PpjI/BUK67n9PEWDu6eHzu1fB15Q== + dependencies: + hosted-git-info "^6.0.0" + is-core-module "^2.8.1" + semver "^7.3.5" + validate-npm-package-license "^3.0.4" + normalize-package-data@^6.0.0, normalize-package-data@^6.0.1: version "6.0.2" resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-6.0.2.tgz#a7bc22167fe24025412bcff0a9651eb768b03506" @@ -13053,15 +12899,6 @@ normalize-package-data@^6.0.0, normalize-package-data@^6.0.1: semver "^7.3.5" validate-npm-package-license "^3.0.4" -normalize-package-data@^8.0.0: - version "8.0.0" - resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-8.0.0.tgz#bdce7ff2d6ba891b853e179e45a5337766e304a7" - integrity sha512-RWk+PI433eESQ7ounYxIp67CYuVsS1uYSonX3kA6ps/3LWfjVQa/ptEg6Y3T6uAMq1mWpX9PQ+qx+QaHpsc7gQ== - dependencies: - hosted-git-info "^9.0.0" - semver "^7.3.5" - validate-npm-package-license "^3.0.4" - normalize-path@^3.0.0, normalize-path@~3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-3.0.0.tgz#0dcd69ff23a1c9b11fd0978316644a0388216a65" @@ -13072,10 +12909,10 @@ normalize-url@^8.0.0: resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-8.1.0.tgz#d33504f67970decf612946fd4880bc8c0983486d" integrity sha512-X06Mfd/5aKsRHc0O0J5CUedwnPmnDtLF2+nq+KN9KSDlJHkPuh0JUviWjEWMe0SW/9TDdSLVPuk7L5gGTIA1/w== -npm-audit-report@^7.0.0: - version "7.0.0" - resolved "https://registry.yarnpkg.com/npm-audit-report/-/npm-audit-report-7.0.0.tgz#c384ac4afede55f21b30778202ad568e54644c35" - integrity sha512-bluLL4xwGr/3PERYz50h2Upco0TJMDcLcymuFnfDWeGO99NqH724MNzhWi5sXXuXf2jbytFF0LyR8W+w1jTI6A== +npm-audit-report@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/npm-audit-report/-/npm-audit-report-5.0.0.tgz#83ac14aeff249484bde81eff53c3771d5048cf95" + integrity sha512-EkXrzat7zERmUhHaoren1YhTxFwsOu5jypE84k6632SXTHcQE1z8V51GC6GVZt8LxkC+tbBcKMUBZAgk8SUSbw== npm-bundled@^3.0.0: version "3.0.0" @@ -13084,37 +12921,18 @@ npm-bundled@^3.0.0: dependencies: npm-normalize-package-bin "^3.0.0" -npm-bundled@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/npm-bundled/-/npm-bundled-5.0.0.tgz#5025d847cfd06c7b8d9432df01695d0133d9ee80" - integrity sha512-JLSpbzh6UUXIEoqPsYBvVNVmyrjVZ1fzEFbqxKkTJQkWBO3xFzFT+KDnSKQWwOQNbuWRwt5LSD6HOTLGIWzfrw== - dependencies: - npm-normalize-package-bin "^5.0.0" - -npm-install-checks@^6.0.0, npm-install-checks@^6.2.0: +npm-install-checks@^6.0.0, npm-install-checks@^6.2.0, npm-install-checks@^6.3.0: version "6.3.0" resolved "https://registry.yarnpkg.com/npm-install-checks/-/npm-install-checks-6.3.0.tgz#046552d8920e801fa9f919cad569545d60e826fe" integrity sha512-W29RiK/xtpCGqn6f3ixfRYGk+zRyr+Ew9F2E20BfXxT5/euLdA/Nm7fO7OeTGuAmTs30cpgInyJ0cYe708YTZw== dependencies: semver "^7.1.1" -npm-install-checks@^8.0.0: - version "8.0.0" - resolved "https://registry.yarnpkg.com/npm-install-checks/-/npm-install-checks-8.0.0.tgz#f5d18e909bb8318d85093e9d8f36ac427c1cbe30" - integrity sha512-ScAUdMpyzkbpxoNekQ3tNRdFI8SJ86wgKZSQZdUxT+bj0wVFpsEMWnkXP0twVe1gJyNF5apBWDJhhIbgrIViRA== - dependencies: - semver "^7.1.1" - npm-normalize-package-bin@^3.0.0: version "3.0.1" resolved "https://registry.yarnpkg.com/npm-normalize-package-bin/-/npm-normalize-package-bin-3.0.1.tgz#25447e32a9a7de1f51362c61a559233b89947832" integrity sha512-dMxCf+zZ+3zeQZXKxmyuCKlIDPGuv8EF940xbkC4kQVDTtqoh6rJFO+JTKSA6/Rwi0getWmtuy4Itup0AMcaDQ== -npm-normalize-package-bin@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/npm-normalize-package-bin/-/npm-normalize-package-bin-5.0.0.tgz#2b207ff260f2e525ddce93356614e2f736728f89" - integrity sha512-CJi3OS4JLsNMmr2u07OJlhcrPxCeOeP/4xq67aWNai6TNWWbTrlNDgl8NcFKVlcBKp18GPj+EzbNIgrBfZhsag== - npm-package-arg@11.0.2: version "11.0.2" resolved "https://registry.yarnpkg.com/npm-package-arg/-/npm-package-arg-11.0.2.tgz#1ef8006c4a9e9204ddde403035f7ff7d718251ca" @@ -13125,6 +12943,16 @@ npm-package-arg@11.0.2: semver "^7.3.5" validate-npm-package-name "^5.0.0" +npm-package-arg@^10.0.0, npm-package-arg@^10.1.0: + version "10.1.0" + resolved "https://registry.yarnpkg.com/npm-package-arg/-/npm-package-arg-10.1.0.tgz#827d1260a683806685d17193073cc152d3c7e9b1" + integrity sha512-uFyyCEmgBfZTtrKk/5xDfHp6+MdrqGotX/VoOyEEl3mBwiEE5FlBaePanazJSVMPT7vKepcjYBY2ztg9A3yPIA== + dependencies: + hosted-git-info "^6.0.0" + proc-log "^3.0.0" + semver "^7.3.5" + validate-npm-package-name "^5.0.0" + npm-package-arg@^11.0.0, npm-package-arg@^11.0.2: version "11.0.3" resolved "https://registry.yarnpkg.com/npm-package-arg/-/npm-package-arg-11.0.3.tgz#dae0c21199a99feca39ee4bfb074df3adac87e2d" @@ -13135,16 +12963,6 @@ npm-package-arg@^11.0.0, npm-package-arg@^11.0.2: semver "^7.3.5" validate-npm-package-name "^5.0.0" -npm-package-arg@^13.0.0, npm-package-arg@^13.0.2: - version "13.0.2" - resolved "https://registry.yarnpkg.com/npm-package-arg/-/npm-package-arg-13.0.2.tgz#72a80f2afe8329860e63854489415e9e9a2f78a7" - integrity sha512-IciCE3SY3uE84Ld8WZU23gAPPV9rIYod4F+rc+vJ7h7cwAJt9Vk6TVsK60ry7Uj3SRS3bqRRIGuTp9YVlk6WNA== - dependencies: - hosted-git-info "^9.0.0" - proc-log "^6.0.0" - semver "^7.3.5" - validate-npm-package-name "^7.0.0" - npm-packlist@8.0.2, npm-packlist@^8.0.0: version "8.0.2" resolved "https://registry.yarnpkg.com/npm-packlist/-/npm-packlist-8.0.2.tgz#5b8d1d906d96d21c85ebbeed2cf54147477c8478" @@ -13152,22 +12970,21 @@ npm-packlist@8.0.2, npm-packlist@^8.0.0: dependencies: ignore-walk "^6.0.4" -npm-packlist@^10.0.1: - version "10.0.3" - resolved "https://registry.yarnpkg.com/npm-packlist/-/npm-packlist-10.0.3.tgz#e22c039357faf81a75d1b0cdf53dd113f2bed9c7" - integrity sha512-zPukTwJMOu5X5uvm0fztwS5Zxyvmk38H/LfidkOMt3gbZVCyro2cD/ETzwzVPcWZA3JOyPznfUN/nkyFiyUbxg== +npm-packlist@^7.0.0: + version "7.0.4" + resolved "https://registry.yarnpkg.com/npm-packlist/-/npm-packlist-7.0.4.tgz#033bf74110eb74daf2910dc75144411999c5ff32" + integrity sha512-d6RGEuRrNS5/N84iglPivjaJPxhDbZmlbTwTDX2IbcRHG5bZCdtysYMhwiPvcF4GisXHGn7xsxv+GQ7T/02M5Q== dependencies: - ignore-walk "^8.0.0" - proc-log "^6.0.0" + ignore-walk "^6.0.0" -npm-pick-manifest@^11.0.1, npm-pick-manifest@^11.0.3: - version "11.0.3" - resolved "https://registry.yarnpkg.com/npm-pick-manifest/-/npm-pick-manifest-11.0.3.tgz#76cf6593a351849006c36b38a7326798e2a76d13" - integrity sha512-buzyCfeoGY/PxKqmBqn1IUJrZnUi1VVJTdSSRPGI60tJdUhUoSQFhs0zycJokDdOznQentgrpf8LayEHyyYlqQ== +npm-pick-manifest@^8.0.0, npm-pick-manifest@^8.0.1, npm-pick-manifest@^8.0.2: + version "8.0.2" + resolved "https://registry.yarnpkg.com/npm-pick-manifest/-/npm-pick-manifest-8.0.2.tgz#2159778d9c7360420c925c1a2287b5a884c713aa" + integrity sha512-1dKY+86/AIiq1tkKVD3l0WI+Gd3vkknVGAggsFeBkTvbhMQ1OND/LKkYv4JtXPKUJ8bOTCyLiqEg2P6QNdK+Gg== dependencies: - npm-install-checks "^8.0.0" - npm-normalize-package-bin "^5.0.0" - npm-package-arg "^13.0.0" + npm-install-checks "^6.0.0" + npm-normalize-package-bin "^3.0.0" + npm-package-arg "^10.0.0" semver "^7.3.5" npm-pick-manifest@^9.0.0, npm-pick-manifest@^9.0.1: @@ -13180,13 +12997,26 @@ npm-pick-manifest@^9.0.0, npm-pick-manifest@^9.0.1: npm-package-arg "^11.0.0" semver "^7.3.5" -npm-profile@^12.0.1: - version "12.0.1" - resolved "https://registry.yarnpkg.com/npm-profile/-/npm-profile-12.0.1.tgz#f5aa0d931a4a75013a7521c86c30048e497310de" - integrity sha512-Xs1mejJ1/9IKucCxdFMkiBJUre0xaxfCpbsO7DB7CadITuT4k68eI05HBlw4kj+Em1rsFMgeFNljFPYvPETbVQ== +npm-profile@^7.0.1: + version "7.0.1" + resolved "https://registry.yarnpkg.com/npm-profile/-/npm-profile-7.0.1.tgz#a37dae08b22e662ece2c6e08946f9fcd9fdef663" + integrity sha512-VReArOY/fCx5dWL66cbJ2OMogTQAVVQA//8jjmjkarboki3V7UJ0XbGFW+khRwiAJFQjuH0Bqr/yF7Y5RZdkMQ== + dependencies: + npm-registry-fetch "^14.0.0" + proc-log "^3.0.0" + +npm-registry-fetch@^14.0.0, npm-registry-fetch@^14.0.3, npm-registry-fetch@^14.0.5: + version "14.0.5" + resolved "https://registry.yarnpkg.com/npm-registry-fetch/-/npm-registry-fetch-14.0.5.tgz#fe7169957ba4986a4853a650278ee02e568d115d" + integrity sha512-kIDMIo4aBm6xg7jOttupWZamsZRkAqMqwqqbVXnUqstY5+tapvv6bkH/qMR76jdgV+YljEUCyWx3hRYMrJiAgA== dependencies: - npm-registry-fetch "^19.0.0" - proc-log "^6.0.0" + make-fetch-happen "^11.0.0" + minipass "^5.0.0" + minipass-fetch "^3.0.0" + minipass-json-stream "^1.0.1" + minizlib "^2.1.2" + npm-package-arg "^10.0.0" + proc-log "^3.0.0" npm-registry-fetch@^17.0.0, npm-registry-fetch@^17.0.1, npm-registry-fetch@^17.1.0: version "17.1.0" @@ -13202,20 +13032,6 @@ npm-registry-fetch@^17.0.0, npm-registry-fetch@^17.0.1, npm-registry-fetch@^17.1 npm-package-arg "^11.0.0" proc-log "^4.0.0" -npm-registry-fetch@^19.0.0, npm-registry-fetch@^19.1.1: - version "19.1.1" - resolved "https://registry.yarnpkg.com/npm-registry-fetch/-/npm-registry-fetch-19.1.1.tgz#51e96d21f409a9bc4f96af218a8603e884459024" - integrity sha512-TakBap6OM1w0H73VZVDf44iFXsOS3h+L4wVMXmbWOQroZgFhMch0juN6XSzBNlD965yIKvWg2dfu7NSiaYLxtw== - dependencies: - "@npmcli/redact" "^4.0.0" - jsonparse "^1.3.1" - make-fetch-happen "^15.0.0" - minipass "^7.0.2" - minipass-fetch "^5.0.0" - minizlib "^3.0.1" - npm-package-arg "^13.0.0" - proc-log "^6.0.0" - npm-run-path@^2.0.0: version "2.0.2" resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-2.0.2.tgz#35a9232dfa35d7067b4cb2ddf2357b1871536c5f" @@ -13237,90 +13053,86 @@ npm-run-path@^5.1.0: dependencies: path-key "^4.0.0" -npm-run-path@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-6.0.0.tgz#25cfdc4eae04976f3349c0b1afc089052c362537" - integrity sha512-9qny7Z9DsQU8Ou39ERsPU4OZQlSTP47ShQzuKZ6PRXpYLtIFgl/DEBYEXKlvcEa+9tHVcK8CF81Y2V72qaZhWA== - dependencies: - path-key "^4.0.0" - unicorn-magic "^0.3.0" - -npm-user-validate@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/npm-user-validate/-/npm-user-validate-4.0.0.tgz#f3c7e8360e46c651dbaf2fc4eea8f66df51ae6df" - integrity sha512-TP+Ziq/qPi/JRdhaEhnaiMkqfMGjhDLoh/oRfW+t5aCuIfJxIUxvwk6Sg/6ZJ069N/Be6gs00r+aZeJTfS9uHQ== +npm-user-validate@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/npm-user-validate/-/npm-user-validate-2.0.1.tgz#097afbf0a2351e2a8f478f1ba07960b368f2a25c" + integrity sha512-d17PKaF2h8LSGFl5j4b1gHOJt1fgH7YUcCm1kNSJvaLWWKXlBsuUvx0bBEkr0qhsVA9XP5LtRZ83hdlhm2QkgA== -npm@^11.6.2: - version "11.7.0" - resolved "https://registry.yarnpkg.com/npm/-/npm-11.7.0.tgz#897fa4af764b64fa384b50e071636e7497d4f6de" - integrity sha512-wiCZpv/41bIobCoJ31NStIWKfAxxYyD1iYnWCtiyns8s5v3+l8y0HCP/sScuH6B5+GhIfda4HQKiqeGZwJWhFw== +npm@^9.5.0: + version "9.9.4" + resolved "https://registry.yarnpkg.com/npm/-/npm-9.9.4.tgz#572bef36e61852c5a391bb3b4eb86c231b1365cd" + integrity sha512-NzcQiLpqDuLhavdyJ2J3tGJ/ni/ebcqHVFZkv1C4/6lblraUPbPgCJ4Vhb4oa3FFhRa2Yj9gA58jGH/ztKueNQ== dependencies: "@isaacs/string-locale-compare" "^1.1.0" - "@npmcli/arborist" "^9.1.9" - "@npmcli/config" "^10.4.5" - "@npmcli/fs" "^5.0.0" - "@npmcli/map-workspaces" "^5.0.3" - "@npmcli/metavuln-calculator" "^9.0.3" - "@npmcli/package-json" "^7.0.4" - "@npmcli/promise-spawn" "^9.0.1" - "@npmcli/redact" "^4.0.0" - "@npmcli/run-script" "^10.0.3" - "@sigstore/tuf" "^4.0.0" - abbrev "^4.0.0" + "@npmcli/arborist" "^6.5.0" + "@npmcli/config" "^6.4.0" + "@npmcli/fs" "^3.1.0" + "@npmcli/map-workspaces" "^3.0.4" + "@npmcli/package-json" "^4.0.1" + "@npmcli/promise-spawn" "^6.0.2" + "@npmcli/run-script" "^6.0.2" + abbrev "^2.0.0" archy "~1.0.0" - cacache "^20.0.3" - chalk "^5.6.2" - ci-info "^4.3.1" + cacache "^17.1.4" + chalk "^5.3.0" + ci-info "^4.0.0" cli-columns "^4.0.0" + cli-table3 "^0.6.3" + columnify "^1.6.0" fastest-levenshtein "^1.0.16" fs-minipass "^3.0.3" - glob "^13.0.0" + glob "^10.3.10" graceful-fs "^4.2.11" - hosted-git-info "^9.0.2" - ini "^6.0.0" - init-package-json "^8.2.4" - is-cidr "^6.0.1" - json-parse-even-better-errors "^5.0.0" - libnpmaccess "^10.0.3" - libnpmdiff "^8.0.12" - libnpmexec "^10.1.11" - libnpmfund "^7.0.12" - libnpmorg "^8.0.1" - libnpmpack "^9.0.12" - libnpmpublish "^11.1.3" - libnpmsearch "^9.0.1" - libnpmteam "^8.0.2" - libnpmversion "^8.0.3" - make-fetch-happen "^15.0.3" - minimatch "^10.1.1" - minipass "^7.1.1" + hosted-git-info "^6.1.3" + ini "^4.1.1" + init-package-json "^5.0.0" + is-cidr "^4.0.2" + json-parse-even-better-errors "^3.0.1" + libnpmaccess "^7.0.2" + libnpmdiff "^5.0.20" + libnpmexec "^6.0.4" + libnpmfund "^4.2.1" + libnpmhook "^9.0.3" + libnpmorg "^5.0.4" + libnpmpack "^5.0.20" + libnpmpublish "^7.5.1" + libnpmsearch "^6.0.2" + libnpmteam "^5.0.3" + libnpmversion "^4.0.2" + make-fetch-happen "^11.1.1" + minimatch "^9.0.3" + minipass "^7.0.4" minipass-pipeline "^1.2.4" ms "^2.1.2" - node-gyp "^12.1.0" - nopt "^9.0.0" - npm-audit-report "^7.0.0" - npm-install-checks "^8.0.0" - npm-package-arg "^13.0.2" - npm-pick-manifest "^11.0.3" - npm-profile "^12.0.1" - npm-registry-fetch "^19.1.1" - npm-user-validate "^4.0.0" - p-map "^7.0.4" - pacote "^21.0.4" - parse-conflict-json "^5.0.1" - proc-log "^6.1.0" + node-gyp "^9.4.1" + nopt "^7.2.0" + normalize-package-data "^5.0.0" + npm-audit-report "^5.0.0" + npm-install-checks "^6.3.0" + npm-package-arg "^10.1.0" + npm-pick-manifest "^8.0.2" + npm-profile "^7.0.1" + npm-registry-fetch "^14.0.5" + npm-user-validate "^2.0.0" + npmlog "^7.0.1" + p-map "^4.0.0" + pacote "^15.2.0" + parse-conflict-json "^3.0.1" + proc-log "^3.0.0" qrcode-terminal "^0.12.0" - read "^5.0.1" - semver "^7.7.3" - spdx-expression-parse "^4.0.0" - ssri "^13.0.0" - supports-color "^10.2.2" - tar "^7.5.2" + read "^2.1.0" + semver "^7.6.0" + sigstore "^1.9.0" + spdx-expression-parse "^3.0.1" + ssri "^10.0.5" + supports-color "^9.4.0" + tar "^6.2.1" text-table "~0.2.0" - tiny-relative-date "^2.0.2" + tiny-relative-date "^1.3.0" treeverse "^3.0.0" - validate-npm-package-name "^7.0.0" - which "^6.0.0" + validate-npm-package-name "^5.0.0" + which "^3.0.1" + write-file-atomic "^5.0.1" npmlog@^5.0.1: version "5.0.1" @@ -13342,6 +13154,16 @@ npmlog@^6.0.0: gauge "^4.0.3" set-blocking "^2.0.0" +npmlog@^7.0.1: + version "7.0.1" + resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-7.0.1.tgz#7372151a01ccb095c47d8bf1d0771a4ff1f53ac8" + integrity sha512-uJ0YFk/mCQpLBt+bxN88AKd+gyqZvZDbtiNxk6Waqcj2aPRyfVx8ITawkyQynxUagInjdYT1+qj4NfA5KJJUxg== + dependencies: + are-we-there-yet "^4.0.0" + console-control-strings "^1.1.0" + gauge "^5.0.0" + set-blocking "^2.0.0" + nth-check@^2.0.1: version "2.1.1" resolved "https://registry.yarnpkg.com/nth-check/-/nth-check-2.1.1.tgz#c9eab428effce36cd6b92c924bdb000ef1f1ed1d" @@ -13400,7 +13222,7 @@ nth-check@^2.0.1: "@nx/nx-win32-arm64-msvc" "20.8.1" "@nx/nx-win32-x64-msvc" "20.8.1" -object-assign@^4, object-assign@^4.0.1, object-assign@^4.1.1: +object-assign@^4, object-assign@^4.1.1: version "4.1.1" resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" integrity sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg== @@ -13685,13 +13507,6 @@ p-each-series@^3.0.0: resolved "https://registry.yarnpkg.com/p-each-series/-/p-each-series-3.0.0.tgz#d1aed5e96ef29864c897367a7d2a628fdc960806" integrity sha512-lastgtAdoH9YaLyDa5i5z64q+kzOcQHsQ5SsZJD3q0VEyI8mq872S3geuNbRUQLVAE9siMfgKrpj7MloKFHruw== -p-event@^6.0.0: - version "6.0.1" - resolved "https://registry.yarnpkg.com/p-event/-/p-event-6.0.1.tgz#8f62a1e3616d4bc01fce3abda127e0383ef4715b" - integrity sha512-Q6Bekk5wpzW5qIyUP4gdMEujObYstZl6DMMOSenwBvV0BlE5LkDwkjs5yHbZmdCEq2o4RJx4tE1vwxFVf2FG1w== - dependencies: - p-timeout "^6.1.2" - p-filter@^4.0.0: version "4.1.0" resolved "https://registry.yarnpkg.com/p-filter/-/p-filter-4.1.0.tgz#fe0aa794e2dfad8ecf595a39a245484fcd09c6e4" @@ -13730,6 +13545,13 @@ p-limit@^3.0.2, p-limit@^3.1.0: dependencies: yocto-queue "^0.1.0" +p-limit@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-4.0.0.tgz#914af6544ed32bfa54670b061cafcbd04984b644" + integrity sha512-5b0R4txpzjPWVw/cXXUResoD4hb6U/x9BH08L7nw+GN1sezDzPdxeRvpc9c433fZhBan/wusjbCsqwqm4EIBIQ== + dependencies: + yocto-queue "^1.0.0" + p-locate@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-2.0.0.tgz#20a0103b222a70c8fd39cc2e580680f3dde5ec43" @@ -13751,6 +13573,13 @@ p-locate@^5.0.0: dependencies: p-limit "^3.0.2" +p-locate@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-6.0.0.tgz#3da9a49d4934b901089dca3302fa65dc5a05c04f" + integrity sha512-wPrq66Llhl7/4AGC6I+cqxT07LhXvWL08LNXz1fENOw0Ap4sRZZ/gZpTTJ5jpurzzzfS2W/Ge9BY3LgLjCShcw== + dependencies: + p-limit "^4.0.0" + p-map-series@2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/p-map-series/-/p-map-series-2.1.0.tgz#7560d4c452d9da0c07e692fdbfe6e2c81a2a91f2" @@ -13768,11 +13597,6 @@ p-map@^7.0.1: resolved "https://registry.yarnpkg.com/p-map/-/p-map-7.0.3.tgz#7ac210a2d36f81ec28b736134810f7ba4418cdb6" integrity sha512-VkndIv2fIB99swvQoA65bm+fsmt6UNdGeIB0oxBs+WhAhdh08QA04JXpI7rbB9r08/nkbysKoya9rtDERYOYMA== -p-map@^7.0.2, p-map@^7.0.4: - version "7.0.4" - resolved "https://registry.yarnpkg.com/p-map/-/p-map-7.0.4.tgz#b81814255f542e252d5729dca4d66e5ec14935b8" - integrity sha512-tkAQEw8ysMzmkhgw8k+1U/iPhWNhykKnSk4Rd5zLoPJCuJaGRPo6YposrZgaxHKzDHdDWWZvE/Sk7hsL2X/CpQ== - p-pipe@3.1.0: version "3.1.0" resolved "https://registry.yarnpkg.com/p-pipe/-/p-pipe-3.1.0.tgz#48b57c922aa2e1af6a6404cb7c6bf0eb9cc8e60e" @@ -13826,11 +13650,6 @@ p-timeout@^3.2.0: dependencies: p-finally "^1.0.0" -p-timeout@^6.1.2: - version "6.1.4" - resolved "https://registry.yarnpkg.com/p-timeout/-/p-timeout-6.1.4.tgz#418e1f4dd833fa96a2e3f532547dd2abdb08dbc2" - integrity sha512-MyIV3ZA/PmyBN/ud8vV9XzwTrNtR4jFrObymZYnZqMmW0zA8Z17vnT0rBgFE/TlohB+YCHqXMgZzb3Csp49vqg== - p-timeout@^7.0.0: version "7.0.1" resolved "https://registry.yarnpkg.com/p-timeout/-/p-timeout-7.0.1.tgz#95680a6aa693c530f14ac337b8bd32d4ec6ae4f0" @@ -13853,11 +13672,40 @@ p-waterfall@2.1.1: dependencies: p-reduce "^2.0.0" +package-json-from-dist@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz#4f1471a010827a86f94cfd9b0727e36d267de505" + integrity sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw== + packet-reader@1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/packet-reader/-/packet-reader-1.0.0.tgz#9238e5480dedabacfe1fe3f2771063f164157d74" integrity sha512-HAKu/fG3HpHFO0AA8WE8q2g+gBJaZ9MG7fcKk+IJPLTGAD6Psw4443l+9DGRbOIh3/aXr7Phy0TjilYivJo5XQ== +pacote@^15.0.0, pacote@^15.0.8, pacote@^15.2.0: + version "15.2.0" + resolved "https://registry.yarnpkg.com/pacote/-/pacote-15.2.0.tgz#0f0dfcc3e60c7b39121b2ac612bf8596e95344d3" + integrity sha512-rJVZeIwHTUta23sIZgEIM62WYwbmGbThdbnkt81ravBplQv+HjyroqnLRNH2+sLJHcGZmLRmhPwACqhfTcOmnA== + dependencies: + "@npmcli/git" "^4.0.0" + "@npmcli/installed-package-contents" "^2.0.1" + "@npmcli/promise-spawn" "^6.0.1" + "@npmcli/run-script" "^6.0.0" + cacache "^17.0.0" + fs-minipass "^3.0.0" + minipass "^5.0.0" + npm-package-arg "^10.0.0" + npm-packlist "^7.0.0" + npm-pick-manifest "^8.0.0" + npm-registry-fetch "^14.0.0" + proc-log "^3.0.0" + promise-retry "^2.0.1" + read-package-json "^6.0.0" + read-package-json-fast "^3.0.0" + sigstore "^1.3.0" + ssri "^10.0.0" + tar "^6.1.11" + pacote@^18.0.0, pacote@^18.0.6: version "18.0.6" resolved "https://registry.yarnpkg.com/pacote/-/pacote-18.0.6.tgz#ac28495e24f4cf802ef911d792335e378e86fac7" @@ -13881,29 +13729,6 @@ pacote@^18.0.0, pacote@^18.0.6: ssri "^10.0.0" tar "^6.1.11" -pacote@^21.0.0, pacote@^21.0.2, pacote@^21.0.4: - version "21.0.4" - resolved "https://registry.yarnpkg.com/pacote/-/pacote-21.0.4.tgz#59cd2a2b5a4c8c1b625f33991a96b136d1c05d95" - integrity sha512-RplP/pDW0NNNDh3pnaoIWYPvNenS7UqMbXyvMqJczosiFWTeGGwJC2NQBLqKf4rGLFfwCOnntw1aEp9Jiqm1MA== - dependencies: - "@npmcli/git" "^7.0.0" - "@npmcli/installed-package-contents" "^4.0.0" - "@npmcli/package-json" "^7.0.0" - "@npmcli/promise-spawn" "^9.0.0" - "@npmcli/run-script" "^10.0.0" - cacache "^20.0.0" - fs-minipass "^3.0.0" - minipass "^7.0.2" - npm-package-arg "^13.0.0" - npm-packlist "^10.0.1" - npm-pick-manifest "^11.0.1" - npm-registry-fetch "^19.0.0" - proc-log "^6.0.0" - promise-retry "^2.0.1" - sigstore "^4.0.0" - ssri "^13.0.0" - tar "^7.4.3" - pako@~1.0.2: version "1.0.11" resolved "https://registry.yarnpkg.com/pako/-/pako-1.0.11.tgz#6c9599d340d54dfd3946380252a35705a6b992bf" @@ -13916,7 +13741,7 @@ parent-module@^1.0.0: dependencies: callsites "^3.0.0" -parse-conflict-json@^3.0.0: +parse-conflict-json@^3.0.0, parse-conflict-json@^3.0.1: version "3.0.1" resolved "https://registry.yarnpkg.com/parse-conflict-json/-/parse-conflict-json-3.0.1.tgz#67dc55312781e62aa2ddb91452c7606d1969960c" integrity sha512-01TvEktc68vwbJOtWZluyWeVGWjP+bZwXtPDMQVbBKzbJ/vZBif0L69KH1+cHv1SZ6e0FKLvjyHe8mqsIqYOmw== @@ -13925,15 +13750,6 @@ parse-conflict-json@^3.0.0: just-diff "^6.0.0" just-diff-apply "^5.2.0" -parse-conflict-json@^5.0.1: - version "5.0.1" - resolved "https://registry.yarnpkg.com/parse-conflict-json/-/parse-conflict-json-5.0.1.tgz#db4acd7472fb400c9808eb86611c2ff72f4c84ba" - integrity sha512-ZHEmNKMq1wyJXNwLxyHnluPfRAFSIliBvbK/UiOceROt4Xh9Pz0fq49NytIaeaCUf5VR86hwQ/34FCcNU5/LKQ== - dependencies: - json-parse-even-better-errors "^5.0.0" - just-diff "^6.0.0" - just-diff-apply "^5.2.0" - parse-entities@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/parse-entities/-/parse-entities-2.0.0.tgz#53c6eb5b9314a1f4ec99fa0fdf7ce01ecda0cbe8" @@ -13964,19 +13780,16 @@ parse-json@^5.0.0, parse-json@^5.2.0: json-parse-even-better-errors "^2.3.0" lines-and-columns "^1.1.6" -parse-json@^8.0.0, parse-json@^8.3.0: - version "8.3.0" - resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-8.3.0.tgz#88a195a2157025139a2317a4f2f9252b61304ed5" - integrity sha512-ybiGyvspI+fAoRQbIPRddCcSTV9/LsJbf0e/S85VLowVGzRmokfneg2kwVW/KU5rOXrPSbF1qAKPMgNTqqROQQ== +parse-json@^7.0.0: + version "7.1.1" + resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-7.1.1.tgz#68f7e6f0edf88c54ab14c00eb700b753b14e2120" + integrity sha512-SgOTCX/EZXtZxBE5eJ97P4yGM5n37BwRU+YMsH4vNzFqJV/oWFXXCmwFlgWUM4PrakybVOueJJ6pwHqSVhTFDw== dependencies: - "@babel/code-frame" "^7.26.2" - index-to-position "^1.1.0" - type-fest "^4.39.1" - -parse-ms@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/parse-ms/-/parse-ms-4.0.0.tgz#c0c058edd47c2a590151a718990533fd62803df4" - integrity sha512-TXfryirbmq34y8QBwgqCVLi+8oA3oWx2eAnSn62ITyEhEYaWRlVZ2DvMM9eZbMs/RfxPu/PK/aBLyGj4IrqMHw== + "@babel/code-frame" "^7.21.4" + error-ex "^1.3.2" + json-parse-even-better-errors "^3.0.0" + lines-and-columns "^2.0.3" + type-fest "^3.8.0" parse-path@^7.0.0: version "7.0.0" @@ -13992,13 +13805,6 @@ parse-url@^8.1.0: dependencies: parse-path "^7.0.0" -parse5-htmlparser2-tree-adapter@^6.0.0: - version "6.0.1" - resolved "https://registry.yarnpkg.com/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-6.0.1.tgz#2cdf9ad823321140370d4dbf5d3e92c7c8ddc6e6" - integrity sha512-qPuWvbLgvDGilKc5BoicRovlT4MtYT6JfJyBOMDsKoiT+GiuP5qyrPCnR9HcPECIJJmZh5jRndyNThnhhb/vlA== - dependencies: - parse5 "^6.0.1" - parse5-htmlparser2-tree-adapter@^7.0.0: version "7.0.0" resolved "https://registry.yarnpkg.com/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-7.0.0.tgz#23c2cc233bcf09bb7beba8b8a69d46b08c62c2f1" @@ -14007,16 +13813,6 @@ parse5-htmlparser2-tree-adapter@^7.0.0: domhandler "^5.0.2" parse5 "^7.0.0" -parse5@^5.1.1: - version "5.1.1" - resolved "https://registry.yarnpkg.com/parse5/-/parse5-5.1.1.tgz#f68e4e5ba1852ac2cadc00f4555fff6c2abb6178" - integrity sha512-ugq4DFI0Ptb+WWjAdOK16+u/nHfiIrcE+sh8kZMaM0WllQKLI9rOUq6c2b7cwPkXdzfQESqvoqK6ug7U/Yyzug== - -parse5@^6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/parse5/-/parse5-6.0.1.tgz#e1a1c085c569b3dc08321184f19a39cc27f7c30b" - integrity sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw== - parse5@^7.0.0: version "7.1.2" resolved "https://registry.yarnpkg.com/parse5/-/parse5-7.1.2.tgz#0736bebbfd77793823240a23b7fc5e010b7f8e32" @@ -14047,6 +13843,11 @@ path-exists@^4.0.0: resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-4.0.0.tgz#513bdbe2d3b95d7762e8c1137efa195c6c61b5b3" integrity sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w== +path-exists@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-5.0.0.tgz#a6aad9489200b21fab31e49cf09277e5116fb9e7" + integrity sha512-RjhtfwJOxzcFmNOi6ltcbcu4Iu+FL3zEj83dk4kAS+fVpTxXLO1b38RvJgT/0QwvV/L3aY9TAnyv0EOqW4GoMQ== + path-is-absolute@^1.0.0: version "1.0.1" resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" @@ -14072,13 +13873,13 @@ path-parse@^1.0.7: resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== -path-scurry@^2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/path-scurry/-/path-scurry-2.0.1.tgz#4b6572376cfd8b811fca9cd1f5c24b3cbac0fe10" - integrity sha512-oWyT4gICAu+kaA7QWk/jvCHWarMKNs6pXOGWKDTr7cw4IGcUbW+PeTfbaQiLGheFRpjo6O9J0PmyMfQPjH71oA== +path-scurry@^1.11.1, path-scurry@^1.6.1: + version "1.11.1" + resolved "https://registry.yarnpkg.com/path-scurry/-/path-scurry-1.11.1.tgz#7960a668888594a0720b12a911d1a742ab9f11d2" + integrity sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA== dependencies: - lru-cache "^11.0.0" - minipass "^7.1.2" + lru-cache "^10.2.0" + minipass "^5.0.0 || ^6.0.2 || ^7.0.0" path-to-regexp@0.1.12, path-to-regexp@~0.1.12: version "0.1.12" @@ -14117,6 +13918,11 @@ path-type@^4.0.0: resolved "https://registry.yarnpkg.com/path-type/-/path-type-4.0.0.tgz#84ed01c0a7ba380afe09d90a8c180dcd9d03043b" integrity sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw== +path-type@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/path-type/-/path-type-6.0.0.tgz#2f1bb6791a91ce99194caede5d6c5920ed81eb51" + integrity sha512-Vj7sf++t5pBD637NSfkxpHSMfWaeig5+DKWLhcqIYx6mWQz5hdJTGDVMQiJcw1ZYkhs7AazKDGpRVji1LJCZUQ== + peek-readable@^4.1.0: version "4.1.0" resolved "https://registry.yarnpkg.com/peek-readable/-/peek-readable-4.1.0.tgz#4ece1111bf5c2ad8867c314c81356847e8a62e72" @@ -14223,7 +14029,12 @@ picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.2.3, picomatch@^2.3.1: resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42" integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA== -picomatch@^4.0.2, picomatch@^4.0.3: +picomatch@^2.0.5: + version "2.3.2" + resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.2.tgz#5a942915e26b372dc0f0e6753149a16e6b1c5601" + integrity sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA== + +picomatch@^4.0.2: version "4.0.3" resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-4.0.3.tgz#796c76136d1eead715db1e7bad785dedd695a042" integrity sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q== @@ -14355,14 +14166,6 @@ postcss-selector-parser@^6.0.10: cssesc "^3.0.0" util-deprecate "^1.0.2" -postcss-selector-parser@^7.0.0: - version "7.1.1" - resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-7.1.1.tgz#e75d2e0d843f620e5df69076166f4e16f891cb9f" - integrity sha512-orRsuYpJVw8LdAwqqLykBj9ecS5/cRHlI5+nvTo8LcCKmzDmqVORXtOIYEEQuL9D4BxtA1lm5isAqzQZCoQ6Eg== - dependencies: - cssesc "^3.0.0" - util-deprecate "^1.0.2" - postgres-array@~2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/postgres-array/-/postgres-array-2.0.0.tgz#48f8fce054fbc69671999329b8834b772652d82e" @@ -14457,23 +14260,16 @@ pretty-format@^29.0.0, pretty-format@^29.7.0: ansi-styles "^5.0.0" react-is "^18.0.0" -pretty-ms@^9.2.0: - version "9.3.0" - resolved "https://registry.yarnpkg.com/pretty-ms/-/pretty-ms-9.3.0.tgz#dd2524fcb3c326b4931b2272dfd1e1a8ed9a9f5a" - integrity sha512-gjVS5hOP+M3wMm5nmNOucbIrqudzs9v/57bWRHQWLYklXqoXKrVfYW2W9+glfGsqtPgpiz5WwyEEB+ksXIx3gQ== - dependencies: - parse-ms "^4.0.0" +proc-log@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/proc-log/-/proc-log-3.0.0.tgz#fb05ef83ccd64fd7b20bbe9c8c1070fc08338dd8" + integrity sha512-++Vn7NS4Xf9NacaU9Xq3URUuqZETPsf8L4j5/ckhaRYsfPeRyzGw+iDjFhV/Jr3uNmTvvddEJFWh5R1gRgUH8A== proc-log@^4.0.0, proc-log@^4.1.0, proc-log@^4.2.0: version "4.2.0" resolved "https://registry.yarnpkg.com/proc-log/-/proc-log-4.2.0.tgz#b6f461e4026e75fdfe228b265e9f7a00779d7034" integrity sha512-g8+OnU/L2v+wyiVK+D5fA34J7EH8jZ8DDlvwhRCMxmMj7UCBvxiO1mGeN+36JXIKF4zevU4kRBd8lVgG9vLelA== -proc-log@^6.0.0, proc-log@^6.1.0: - version "6.1.0" - resolved "https://registry.yarnpkg.com/proc-log/-/proc-log-6.1.0.tgz#18519482a37d5198e231133a70144a50f21f0215" - integrity sha512-iG+GYldRf2BQ0UDUAd6JQ/RwzaQy6mXmsk/IzlYyal4A4SNFw54MeH4/tLkF4I5WoWG9SQwuqWzS99jaFQHBuQ== - process-nextick-args@~2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-2.0.1.tgz#7820d9b16120cc55ca9ae7792680ae7dba6d7fe2" @@ -14509,11 +14305,6 @@ proggy@^2.0.0: resolved "https://registry.yarnpkg.com/proggy/-/proggy-2.0.0.tgz#154bb0e41d3125b518ef6c79782455c2c47d94e1" integrity sha512-69agxLtnI8xBs9gUGqEnK26UfiexpHy+KUpBQWabiytQjnn5wFY8rklAi7GRfABIuPNnQ/ik48+LGLkYYJcy4A== -proggy@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/proggy/-/proggy-4.0.0.tgz#85fa89d7c81bc3fb77992a80f47bb1e17c610fa3" - integrity sha512-MbA4R+WQT76ZBm/5JUpV9yqcJt92175+Y0Bodg3HgiXzrmKu7Ggq+bpn6y6wHH+gN9NcyKn3yg1+d47VaKwNAQ== - progress@2.0.3, progress@^2.0.3: version "2.0.3" resolved "https://registry.yarnpkg.com/progress/-/progress-2.0.3.tgz#7e8cf8d8f5b8f239c1bc68beb4eb78567d572ef8" @@ -14524,6 +14315,11 @@ promise-all-reject-late@^1.0.0: resolved "https://registry.yarnpkg.com/promise-all-reject-late/-/promise-all-reject-late-1.0.1.tgz#f8ebf13483e5ca91ad809ccc2fcf25f26f8643c2" integrity sha512-vuf0Lf0lOxyQREH7GDIOUMLS7kz+gs8i6B+Yi8dC68a2sychGrHTJYghMBD6k7eUcH0H5P73EckCA48xijWqXw== +promise-call-limit@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/promise-call-limit/-/promise-call-limit-1.0.2.tgz#f64b8dd9ef7693c9c7613e7dfe8d6d24de3031ea" + integrity sha512-1vTUnfI2hzui8AEIixbdAJlFY4LFDXqQswy/2eOlThAscXCY4It8FdVuI0fMJGAB2aWGbdQf/gv0skKYXmdrHA== + promise-call-limit@^3.0.1: version "3.0.2" resolved "https://registry.yarnpkg.com/promise-call-limit/-/promise-call-limit-3.0.2.tgz#524b7f4b97729ff70417d93d24f46f0265efa4f9" @@ -14562,13 +14358,6 @@ promzard@^1.0.0: dependencies: read "^3.0.1" -promzard@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/promzard/-/promzard-3.0.1.tgz#e42b9b75197661e5707dc7077da8dfd3bdfd9e3d" - integrity sha512-M5mHhWh+Adz0BIxgSrqcc6GTCSconR7zWQV9vnOSptNtr6cSFlApLc28GbQhuN6oOWBQeV2C0bNE47JCY/zu3Q== - dependencies: - read "^5.0.0" - propagate@^2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/propagate/-/propagate-2.0.1.tgz#40cdedab18085c792334e64f0ac17256d38f9a45" @@ -14630,7 +14419,14 @@ qrcode-terminal@^0.12.0: resolved "https://registry.yarnpkg.com/qrcode-terminal/-/qrcode-terminal-0.12.0.tgz#bb5b699ef7f9f0505092a3748be4464fe71b5819" integrity sha512-EXtzRZmC+YGmGlDFbXKxQiMZNwCLEO6BANKXG4iCtSIM0yqc/pappSx3RIKr4r0uh5JsBckOXeKrB3Iz7mdQpQ== -qs@6.13.0, qs@>=6.14.1, qs@^6.11.2, qs@^6.14.0, qs@^6.14.1, qs@^6.5.2, qs@~6.14.0: +qs@6.13.0: + version "6.13.0" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.13.0.tgz#6ca3bd58439f7e245655798997787b0d88a51906" + integrity sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg== + dependencies: + side-channel "^1.0.6" + +qs@^6.11.2, qs@^6.14.0, qs@^6.14.1, qs@^6.5.2, qs@~6.14.0: version "6.14.2" resolved "https://registry.yarnpkg.com/qs/-/qs-6.14.2.tgz#b5634cf9d9ad9898e31fba3504e866e8efb6798c" integrity sha512-V/yCWTTF7VJ9hIh18Ugr2zhJMP01MY7c5kh4J870L7imm6/DIzBsNLTXzMwUA3yZ5b/KBqLx8Kp3uRvd7xSe3Q== @@ -14727,11 +14523,6 @@ read-cmd-shim@4.0.0, read-cmd-shim@^4.0.0: resolved "https://registry.yarnpkg.com/read-cmd-shim/-/read-cmd-shim-4.0.0.tgz#640a08b473a49043e394ae0c7a34dd822c73b9bb" integrity sha512-yILWifhaSEEytfXI76kB9xEEiG1AiozaCJZ83A87ytjRiN+jVibXjedjCRNjoZviinhG+4UkalO3mWTd8u5O0Q== -read-cmd-shim@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/read-cmd-shim/-/read-cmd-shim-6.0.0.tgz#98f5c8566e535829f1f8afb1595aaf05fd0f3970" - integrity sha512-1zM5HuOfagXCBWMN83fuFI/x+T/UhZ7k+KIzhrHXcQoeX5+7gmaDYjELQHmmzIodumBHeByBJT4QYS7ufAgs7A== - read-package-json-fast@^3.0.0, read-package-json-fast@^3.0.2: version "3.0.2" resolved "https://registry.yarnpkg.com/read-package-json-fast/-/read-package-json-fast-3.0.2.tgz#394908a9725dc7a5f14e70c8e7556dff1d2b1049" @@ -14740,23 +14531,24 @@ read-package-json-fast@^3.0.0, read-package-json-fast@^3.0.2: json-parse-even-better-errors "^3.0.0" npm-normalize-package-bin "^3.0.0" -read-package-up@^11.0.0: - version "11.0.0" - resolved "https://registry.yarnpkg.com/read-package-up/-/read-package-up-11.0.0.tgz#71fb879fdaac0e16891e6e666df22de24a48d5ba" - integrity sha512-MbgfoNPANMdb4oRBNg5eqLbB2t2r+o5Ua1pNt8BqGp4I0FJZhuVSOj3PaBPni4azWuSzEdNn2evevzVmEk1ohQ== +read-package-json@^6.0.0: + version "6.0.4" + resolved "https://registry.yarnpkg.com/read-package-json/-/read-package-json-6.0.4.tgz#90318824ec456c287437ea79595f4c2854708836" + integrity sha512-AEtWXYfopBj2z5N5PbkAOeNHRPUg5q+Nen7QLxV8M2zJq1ym6/lCz3fYNTCXe19puu2d06jfHhrP7v/S2PtMMw== dependencies: - find-up-simple "^1.0.0" - read-pkg "^9.0.0" - type-fest "^4.6.0" + glob "^10.2.2" + json-parse-even-better-errors "^3.0.0" + normalize-package-data "^5.0.0" + npm-normalize-package-bin "^3.0.0" -read-package-up@^12.0.0: - version "12.0.0" - resolved "https://registry.yarnpkg.com/read-package-up/-/read-package-up-12.0.0.tgz#7ae889586f397b7a291ca59ce08caf7e9f68a61c" - integrity sha512-Q5hMVBYur/eQNWDdbF4/Wqqr9Bjvtrw2kjGxxBbKLbx8bVCL8gcArjTy8zDUuLGQicftpMuU0riQNcAsbtOVsw== +read-pkg-up@^10.0.0: + version "10.1.0" + resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-10.1.0.tgz#2d13ab732d2f05d6e8094167c2112e2ee50644f4" + integrity sha512-aNtBq4jR8NawpKJQldrQcSW9y/d+KWH4v24HWkHljOZ7H0av+YTGANBzRh9A5pw7v/bLVsLVPpOhJ7gHNVy8lA== dependencies: - find-up-simple "^1.0.1" - read-pkg "^10.0.0" - type-fest "^5.2.0" + find-up "^6.3.0" + read-pkg "^8.1.0" + type-fest "^4.2.0" read-pkg-up@^3.0.0: version "3.0.0" @@ -14775,17 +14567,6 @@ read-pkg-up@^7.0.1: read-pkg "^5.2.0" type-fest "^0.8.1" -read-pkg@^10.0.0: - version "10.0.0" - resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-10.0.0.tgz#06401f0331115e9fba9880cb3f2ae1efa3db00e4" - integrity sha512-A70UlgfNdKI5NSvTTfHzLQj7NJRpJ4mT5tGafkllJ4wh71oYuGm/pzphHcmW4s35iox56KSK721AihodoXSc/A== - dependencies: - "@types/normalize-package-data" "^2.4.4" - normalize-package-data "^8.0.0" - parse-json "^8.3.0" - type-fest "^5.2.0" - unicorn-magic "^0.3.0" - read-pkg@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-3.0.0.tgz#9cbc686978fee65d16c00e2b19c237fcf6e38389" @@ -14805,16 +14586,22 @@ read-pkg@^5.2.0: parse-json "^5.0.0" type-fest "^0.6.0" -read-pkg@^9.0.0: - version "9.0.1" - resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-9.0.1.tgz#b1b81fb15104f5dbb121b6bbdee9bbc9739f569b" - integrity sha512-9viLL4/n1BJUCT1NXVTdS1jtm80yDEgR5T4yCelII49Mbj0v1rZdKqj7zCiYdbB0CuCgdrvHcNogAKTFPBocFA== +read-pkg@^8.0.0, read-pkg@^8.1.0: + version "8.1.0" + resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-8.1.0.tgz#6cf560b91d90df68bce658527e7e3eee75f7c4c7" + integrity sha512-PORM8AgzXeskHO/WEv312k9U03B8K9JSiWF/8N9sUuFjBa+9SF2u6K7VClzXwDXab51jCd8Nd36CNM+zR97ScQ== dependencies: - "@types/normalize-package-data" "^2.4.3" + "@types/normalize-package-data" "^2.4.1" normalize-package-data "^6.0.0" - parse-json "^8.0.0" - type-fest "^4.6.0" - unicorn-magic "^0.1.0" + parse-json "^7.0.0" + type-fest "^4.2.0" + +read@^2.0.0, read@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/read/-/read-2.1.0.tgz#69409372c54fe3381092bc363a00650b6ac37218" + integrity sha512-bvxi1QLJHcaywCAEsAk4DG3nVoqiY2Csps3qzWalhj5hFqRn1d/OixkFXtLO1PrgHUcAP0FNaSY/5GYNfENFFQ== + dependencies: + mute-stream "~1.0.0" read@^3.0.1: version "3.0.1" @@ -14823,13 +14610,6 @@ read@^3.0.1: dependencies: mute-stream "^1.0.0" -read@^5.0.0, read@^5.0.1: - version "5.0.1" - resolved "https://registry.yarnpkg.com/read/-/read-5.0.1.tgz#e6b0a84743406182fdfc20b2418a11b39b7ef837" - integrity sha512-+nsqpqYkkpet2UVPG8ZiuE8d113DK4vHYEoEhcrXBAlPiq6di7QRTuNiKQAbaRYegobuX2BpZ6QjanKOXnJdTA== - dependencies: - mute-stream "^3.0.0" - readable-stream@3, readable-stream@^3.0.0, readable-stream@^3.0.2, readable-stream@^3.1.1, readable-stream@^3.4.0, readable-stream@^3.6.0: version "3.6.2" resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-3.6.2.tgz#56a9b36ea965c00c5a93ef31eb111a0f11056967" @@ -15241,45 +15021,44 @@ semantic-release-slack-bot@^4.0.2: node-fetch "^2.3.0" slackify-markdown "^4.3.0" -semantic-release@^21.0.5, semantic-release@^25.0.0: - version "25.0.2" - resolved "https://registry.yarnpkg.com/semantic-release/-/semantic-release-25.0.2.tgz#efd4fa16ce3518a747e737baf3f69fd82979d98e" - integrity sha512-6qGjWccl5yoyugHt3jTgztJ9Y0JVzyH8/Voc/D8PlLat9pwxQYXz7W1Dpnq5h0/G5GCYGUaDSlYcyk3AMh5A6g== +semantic-release@^21.0.5: + version "21.1.2" + resolved "https://registry.yarnpkg.com/semantic-release/-/semantic-release-21.1.2.tgz#f4c5ba7c17b53ce90bac4fa6ccf21178d0384445" + integrity sha512-kz76azHrT8+VEkQjoCBHE06JNQgTgsC4bT8XfCzb7DHcsk9vG3fqeMVik8h5rcWCYi2Fd+M3bwA7BG8Z8cRwtA== dependencies: - "@semantic-release/commit-analyzer" "^13.0.1" + "@semantic-release/commit-analyzer" "^10.0.0" "@semantic-release/error" "^4.0.0" - "@semantic-release/github" "^12.0.0" - "@semantic-release/npm" "^13.1.1" - "@semantic-release/release-notes-generator" "^14.1.0" + "@semantic-release/github" "^9.0.0" + "@semantic-release/npm" "^10.0.2" + "@semantic-release/release-notes-generator" "^11.0.0" aggregate-error "^5.0.0" - cosmiconfig "^9.0.0" + cosmiconfig "^8.0.0" debug "^4.0.0" - env-ci "^11.0.0" - execa "^9.0.0" - figures "^6.0.0" - find-versions "^6.0.0" + env-ci "^9.0.0" + execa "^8.0.0" + figures "^5.0.0" + find-versions "^5.1.0" get-stream "^6.0.0" git-log-parser "^1.2.0" - hook-std "^4.0.0" - hosted-git-info "^9.0.0" - import-from-esm "^2.0.0" + hook-std "^3.0.0" + hosted-git-info "^7.0.0" lodash-es "^4.17.21" - marked "^15.0.0" - marked-terminal "^7.3.0" + marked "^5.0.0" + marked-terminal "^5.1.1" micromatch "^4.0.2" p-each-series "^3.0.0" p-reduce "^3.0.0" - read-package-up "^12.0.0" + read-pkg-up "^10.0.0" resolve-from "^5.0.0" semver "^7.3.2" - semver-diff "^5.0.0" + semver-diff "^4.0.0" signale "^1.2.1" - yargs "^18.0.0" + yargs "^17.5.1" -semver-diff@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/semver-diff/-/semver-diff-5.0.0.tgz#62a8396f44c11386c83d1e57caedc806c6c7755c" - integrity sha512-0HbGtOm+S7T6NGQ/pxJSJipJvc4DK3FcRVMRkhsIwJDJ4Jcz5DQC1cPPzB5GhzyHjwttW878HaWQq46CkL3cqg== +semver-diff@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/semver-diff/-/semver-diff-4.0.0.tgz#3afcf5ed6d62259f5c72d0d5d50dffbdc9680df5" + integrity sha512-0Ju4+6A8iOnpL/Thra7dZsSlOHYAHIeMxfhWQRI1/VLcT3WDBZKKtQt/QkBOsiIN9ZpuvHE6cGZ0x4glCMmfiA== dependencies: semver "^7.3.5" @@ -15310,10 +15089,10 @@ semver@^6.0.0, semver@^6.3.0, semver@^6.3.1: resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== -semver@^7.5.2, semver@^7.7.2, semver@^7.7.3: - version "7.7.3" - resolved "https://registry.yarnpkg.com/semver/-/semver-7.7.3.tgz#4b5f4143d007633a8dc671cd0a6ef9147b8bb946" - integrity sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q== +semver@^7.6.0: + version "7.7.4" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.7.4.tgz#28464e36060e991fa7a11d0279d2d3f3b57a7e8a" + integrity sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA== semver@^7.6.3: version "7.7.1" @@ -15615,7 +15394,7 @@ side-channel@^1.0.4: get-intrinsic "^1.0.2" object-inspect "^1.9.0" -side-channel@^1.1.0: +side-channel@^1.0.6, side-channel@^1.1.0: version "1.1.0" resolved "https://registry.yarnpkg.com/side-channel/-/side-channel-1.1.0.tgz#c3fcff9c4da932784873335ec9765fa94ff66bc9" integrity sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw== @@ -15650,6 +15429,17 @@ signale@^1.2.1, signale@^1.4.0: figures "^2.0.0" pkg-conf "^2.1.0" +sigstore@^1.3.0, sigstore@^1.4.0, sigstore@^1.9.0: + version "1.9.0" + resolved "https://registry.yarnpkg.com/sigstore/-/sigstore-1.9.0.tgz#1e7ad8933aa99b75c6898ddd0eeebc3eb0d59875" + integrity sha512-0Zjz0oe37d08VeOtBIuB6cRriqXse2e8w+7yIy2XSXjshRKxbc2KkhXjL229jXSxEm7UbcjS76wcJDGQddVI9A== + dependencies: + "@sigstore/bundle" "^1.1.0" + "@sigstore/protobuf-specs" "^0.2.0" + "@sigstore/sign" "^1.0.0" + "@sigstore/tuf" "^1.0.3" + make-fetch-happen "^11.0.1" + sigstore@^2.2.0: version "2.3.1" resolved "https://registry.yarnpkg.com/sigstore/-/sigstore-2.3.1.tgz#0755dd2cc4820f2e922506da54d3d628e13bfa39" @@ -15662,18 +15452,6 @@ sigstore@^2.2.0: "@sigstore/tuf" "^2.3.4" "@sigstore/verify" "^1.2.1" -sigstore@^4.0.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/sigstore/-/sigstore-4.1.0.tgz#d34b92a544a05e003a2430209d26d8dfafd805a0" - integrity sha512-/fUgUhYghuLzVT/gaJoeVehLCgZiUxPCPMcyVNY0lIf/cTCz58K/WTI7PefDarXxp9nUKpEwg1yyz3eSBMTtgA== - dependencies: - "@sigstore/bundle" "^4.0.0" - "@sigstore/core" "^3.1.0" - "@sigstore/protobuf-specs" "^0.5.0" - "@sigstore/sign" "^4.1.0" - "@sigstore/tuf" "^4.0.1" - "@sigstore/verify" "^3.1.0" - simple-concat@^1.0.0: version "1.0.1" resolved "https://registry.yarnpkg.com/simple-concat/-/simple-concat-1.0.1.tgz#f46976082ba35c2263f1c8ab5edfe26c41c9552f" @@ -15712,13 +15490,6 @@ sisteransi@^1.0.5: resolved "https://registry.yarnpkg.com/sisteransi/-/sisteransi-1.0.5.tgz#134d681297756437cc05ca01370d3a7a571075ed" integrity sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg== -skin-tone@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/skin-tone/-/skin-tone-2.0.0.tgz#4e3933ab45c0d4f4f781745d64b9f4c208e41237" - integrity sha512-kUMbT1oBJCpgrnKoSr0o6wPtvRWT9W9UKvGLwfJYO2WuahZRHOpEyL1ckyMGgMWh0UdpmaoFqKKD29WTomNEGA== - dependencies: - unicode-emoji-modifier-base "^1.0.0" - slackify-markdown@^4.3.0: version "4.4.0" resolved "https://registry.yarnpkg.com/slackify-markdown/-/slackify-markdown-4.4.0.tgz#706a56fd09f536c47588e2c12f1e0ee6930c5e8d" @@ -15737,6 +15508,11 @@ slash@3.0.0, slash@^3.0.0: resolved "https://registry.yarnpkg.com/slash/-/slash-3.0.0.tgz#6539be870c165adbd5240220dbe361f1bc4d4634" integrity sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q== +slash@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/slash/-/slash-5.1.0.tgz#be3adddcdf09ac38eebe8dcdc7b1a57a75b095ce" + integrity sha512-ZA6oR3T/pEyuqwMgAKT0/hAv8oAXckzbkmR0UkUosQ+Mc4RxGoJkRmwHgHufaenlyAgE1Mxgpdcrf75y6XcnDg== + slice-ansi@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/slice-ansi/-/slice-ansi-4.0.0.tgz#500e8dd0fd55b05815086255b3195adf2a45fe6b" @@ -15760,6 +15536,15 @@ socks-proxy-agent@^6.0.0: debug "^4.3.3" socks "^2.6.2" +socks-proxy-agent@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/socks-proxy-agent/-/socks-proxy-agent-7.0.0.tgz#dc069ecf34436621acb41e3efa66ca1b5fed15b6" + integrity sha512-Fgl0YPZ902wEsAyiQ+idGd1A7rSFx/ayC1CQVMw5P+EQx2V0SgpGtf6OKFhVjPflPUl9YMmEOnmfjCdMUsygww== + dependencies: + agent-base "^6.0.2" + debug "^4.3.3" + socks "^2.6.2" + socks-proxy-agent@^8.0.3: version "8.0.5" resolved "https://registry.yarnpkg.com/socks-proxy-agent/-/socks-proxy-agent-8.0.5.tgz#b9cdb4e7e998509d7659d689ce7697ac21645bee" @@ -15845,7 +15630,7 @@ spdx-exceptions@^2.1.0: resolved "https://registry.yarnpkg.com/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz#3f28ce1a77a00372683eade4a433183527a2163d" integrity sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A== -spdx-expression-parse@^3.0.0: +spdx-expression-parse@^3.0.0, spdx-expression-parse@^3.0.1: version "3.0.1" resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz#cf70f50482eefdc98e3ce0a6833e4a53ceeba679" integrity sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q== @@ -15853,14 +15638,6 @@ spdx-expression-parse@^3.0.0: spdx-exceptions "^2.1.0" spdx-license-ids "^3.0.0" -spdx-expression-parse@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-4.0.0.tgz#a23af9f3132115465dac215c099303e4ceac5794" - integrity sha512-Clya5JIij/7C6bRR22+tnGXbc4VKlibKSVj2iHvVeX5iMW7s1SIQlqu699JkODJJIhh/pUu8L0/VLh8xflD+LQ== - dependencies: - spdx-exceptions "^2.1.0" - spdx-license-ids "^3.0.0" - spdx-license-ids@^3.0.0: version "3.0.16" resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.16.tgz#a14f64e0954f6e25cc6587bd4f392522db0d998f" @@ -15973,20 +15750,13 @@ ssri@^10.0.0: dependencies: minipass "^7.0.3" -ssri@^10.0.6: +ssri@^10.0.1, ssri@^10.0.5, ssri@^10.0.6: version "10.0.6" resolved "https://registry.yarnpkg.com/ssri/-/ssri-10.0.6.tgz#a8aade2de60ba2bce8688e3fa349bad05c7dc1e5" integrity sha512-MGrFH9Z4NP9Iyhqn16sDtBpRRNJ0Y2hNa6D65h736fVSaPCHr4DM4sWUNvVaSuC+0OBGhwsrydQwmgfg5LncqQ== dependencies: minipass "^7.0.3" -ssri@^13.0.0: - version "13.0.0" - resolved "https://registry.yarnpkg.com/ssri/-/ssri-13.0.0.tgz#4226b303dc474003d88905f9098cb03361106c74" - integrity sha512-yizwGBpbCn4YomB2lzhZqrHLJoqFGXihNbib3ozhqF/cIp5ue+xSmOQrjNasEE62hFxsCcg/V/z23t4n8jMEng== - dependencies: - minipass "^7.0.3" - ssri@^8.0.0, ssri@^8.0.1: version "8.0.1" resolved "https://registry.yarnpkg.com/ssri/-/ssri-8.0.1.tgz#638e4e439e2ffbd2cd289776d5ca457c4f51a2af" @@ -15994,6 +15764,13 @@ ssri@^8.0.0, ssri@^8.0.1: dependencies: minipass "^3.1.1" +ssri@^9.0.0: + version "9.0.1" + resolved "https://registry.yarnpkg.com/ssri/-/ssri-9.0.1.tgz#544d4c357a8d7b71a19700074b6883fcb4eae057" + integrity sha512-o57Wcn66jMQvfHG1FlYbWeZWW/dHZhJXjpIcTfXldXEk5nz5lStPo3mK0OJQfGR3RbZUlbISexbljkJzuEj/8Q== + dependencies: + minipass "^3.1.1" + stack-utils@^2.0.3: version "2.0.6" resolved "https://registry.yarnpkg.com/stack-utils/-/stack-utils-2.0.6.tgz#aaf0748169c02fc33c8232abccf933f54a1cc34f" @@ -16068,7 +15845,7 @@ string-similarity@^4.0.1: resolved "https://registry.yarnpkg.com/string-similarity/-/string-similarity-4.0.4.tgz#42d01ab0b34660ea8a018da8f56a3309bb8b2a5b" integrity sha512-/q/8Q4Bl4ZKAPjj8WerIBJWALKkaPRfrvhfF8k/B23i4nzrlRj2/go1m90In7nG/3XDSbOo0+pu6RvCTM9RGMQ== -"string-width@^1.0.2 || 2 || 3 || 4", string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.3: +"string-width-cjs@npm:string-width@^4.2.0", "string-width@^1.0.2 || 2 || 3 || 4", string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.3: version "4.2.3" resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== @@ -16085,14 +15862,14 @@ string-width@^2.1.0: is-fullwidth-code-point "^2.0.0" strip-ansi "^4.0.0" -string-width@^7.0.0, string-width@^7.2.0: - version "7.2.0" - resolved "https://registry.yarnpkg.com/string-width/-/string-width-7.2.0.tgz#b5bb8e2165ce275d4d43476dd2700ad9091db6dc" - integrity sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ== +string-width@^5.0.1, string-width@^5.1.2: + version "5.1.2" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-5.1.2.tgz#14f8daec6d81e7221d2a357e668cab73bdbca794" + integrity sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA== dependencies: - emoji-regex "^10.3.0" - get-east-asian-width "^1.0.0" - strip-ansi "^7.1.0" + eastasianwidth "^0.2.0" + emoji-regex "^9.2.2" + strip-ansi "^7.0.1" string.prototype.trim@^1.2.10: version "1.2.10" @@ -16167,6 +15944,13 @@ string_decoder@~1.1.1: dependencies: safe-buffer "~5.1.0" +"strip-ansi-cjs@npm:strip-ansi@^6.0.1", strip-ansi@^6.0.0, strip-ansi@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" + integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== + dependencies: + ansi-regex "^5.0.1" + strip-ansi@^3.0.0: version "3.0.1" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf" @@ -16188,19 +15972,12 @@ strip-ansi@^5.2.0: dependencies: ansi-regex "^4.1.0" -strip-ansi@^6.0.0, strip-ansi@^6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" - integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== - dependencies: - ansi-regex "^5.0.1" - -strip-ansi@^7.1.0: - version "7.1.2" - resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-7.1.2.tgz#132875abde678c7ea8d691533f2e7e22bb744dba" - integrity sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA== +strip-ansi@^7.0.1: + version "7.2.0" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-7.2.0.tgz#d22a269522836a627af8d04b5c3fd2c7fa3e32e3" + integrity sha512-yDPMNjp4WyfYBkHnjIRLfca1i6KMyGCtsVgoKe/z1+6vukgaENdgGBZt+ZmKPc4gavvEZ5OgHfHdrazhgNyG7w== dependencies: - ansi-regex "^6.0.1" + ansi-regex "^6.2.2" strip-bom@^3.0.0: version "3.0.0" @@ -16227,11 +16004,6 @@ strip-final-newline@^3.0.0: resolved "https://registry.yarnpkg.com/strip-final-newline/-/strip-final-newline-3.0.0.tgz#52894c313fbff318835280aed60ff71ebf12b8fd" integrity sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw== -strip-final-newline@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/strip-final-newline/-/strip-final-newline-4.0.0.tgz#35a369ec2ac43df356e3edd5dcebb6429aa1fa5c" - integrity sha512-aulFJcD6YK8V1G7iRB5tigAP4TsHBZZrOV8pjV++zdUwmeV8uzbY7yn6h9MswN62adStNZFuCIx4haBnRuMDaw== - strip-indent@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/strip-indent/-/strip-indent-3.0.0.tgz#c32e1cee940b6b3432c771bc2c54bcce73cd3001" @@ -16273,15 +16045,6 @@ subscriptions-transport-ws@^0.9.19: symbol-observable "^1.0.4" ws "^5.2.0 || ^6.0.0 || ^7.0.0" -super-regex@^1.0.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/super-regex/-/super-regex-1.1.0.tgz#14b69b6374f7b3338db52ecd511dae97c27acf75" - integrity sha512-WHkws2ZflZe41zj6AolvvmaTrWds/VuyeYr9iPVv/oQeaIoVxMKaushfFWpOGDT+GuBrM/sVqF8KUCYQlSSTdQ== - dependencies: - function-timeout "^1.0.1" - make-asynchronous "^1.0.1" - time-span "^5.1.0" - superagent@^10.2.3: version "10.2.3" resolved "https://registry.yarnpkg.com/superagent/-/superagent-10.2.3.tgz#d1e4986f2caac423c37e38077f9073ccfe73a59b" @@ -16320,11 +16083,6 @@ supertest@^7.1.3: methods "^1.1.2" superagent "^10.2.3" -supports-color@^10.2.2: - version "10.2.2" - resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-10.2.2.tgz#466c2978cc5cd0052d542a0b576461c2b802ebb4" - integrity sha512-SS+jx45GF1QjgEXQx4NJZV9ImqmO2NPz5FNsIHrsDjh2YsHnawpan7SNQ1o8NuhrbHZy9AZhIoCUiCeaW/C80g== - supports-color@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7" @@ -16351,7 +16109,12 @@ supports-color@^8, supports-color@^8.0.0, supports-color@^8.1.1: dependencies: has-flag "^4.0.0" -supports-hyperlinks@^2.2.0: +supports-color@^9.4.0: + version "9.4.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-9.4.0.tgz#17bfcf686288f531db3dea3215510621ccb55954" + integrity sha512-VL+lNrEoIXww1coLPOmiEmK/0sGigko5COxI09KzHc2VJXJsQ37UaQ+8quuxjDeA7+KnLGTWRyOXSLLR2Wb4jw== + +supports-hyperlinks@^2.2.0, supports-hyperlinks@^2.3.0: version "2.3.0" resolved "https://registry.yarnpkg.com/supports-hyperlinks/-/supports-hyperlinks-2.3.0.tgz#3943544347c1ff90b15effb03fc14ae45ec10624" integrity sha512-RpsAZlpWcDwOPQA22aCH4J0t7L8JmAvsCxfOSEwm7cQs3LshN36QaTkwd70DnBOXDWGssw2eUoc8CaRWT0XunA== @@ -16359,14 +16122,6 @@ supports-hyperlinks@^2.2.0: has-flag "^4.0.0" supports-color "^7.0.0" -supports-hyperlinks@^3.1.0: - version "3.2.0" - resolved "https://registry.yarnpkg.com/supports-hyperlinks/-/supports-hyperlinks-3.2.0.tgz#b8e485b179681dea496a1e7abdf8985bd3145461" - integrity sha512-zFObLMyZeEwzAoKCyu1B91U79K2t7ApXuQfo8OuxwXLDgcKxuwM+YvcbIhm6QWqz7mHUH1TVytR1PwVVjEuMig== - dependencies: - has-flag "^4.0.0" - supports-color "^7.0.0" - supports-preserve-symlinks-flag@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz#6eda4bd344a3c94aea376d4cc31bc77311039e09" @@ -16377,11 +16132,6 @@ symbol-observable@^1.0.2, symbol-observable@^1.0.4: resolved "https://registry.yarnpkg.com/symbol-observable/-/symbol-observable-1.2.0.tgz#c22688aed4eab3cdc2dfeacbb561660560a00804" integrity sha512-e900nM8RRtGhlV36KGEU9k65K3mPb1WV70OdjfxlG2EAuM1noi/E/BaW/uMhL7bPEssK8QV57vN3esixjUvcXQ== -tagged-tag@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/tagged-tag/-/tagged-tag-1.0.0.tgz#a0b5917c2864cba54841495abfa3f6b13edcf4d6" - integrity sha512-yEFYrVhod+hdNyx7g5Bnkkb0G6si8HJurOoOEgC8B/O0uXLHlaey/65KRv6cuWBNhBgHKAROVpc7QyYqE5gFng== - tar-fs@^2.0.0: version "2.1.4" resolved "https://registry.yarnpkg.com/tar-fs/-/tar-fs-2.1.4.tgz#800824dbf4ef06ded9afea4acafe71c67c76b930" @@ -16403,16 +16153,17 @@ tar-stream@^2.1.4, tar-stream@~2.2.0: inherits "^2.0.3" readable-stream "^3.1.1" -tar@6.2.1, tar@^6.0.2, tar@^6.1.11, tar@^6.1.2, tar@^6.2.1, tar@^7.4.3, tar@^7.5.1, tar@^7.5.2, tar@^7.5.8: - version "7.5.9" - resolved "https://registry.yarnpkg.com/tar/-/tar-7.5.9.tgz#817ac12a54bc4362c51340875b8985d7dc9724b8" - integrity sha512-BTLcK0xsDh2+PUe9F6c2TlRp4zOOBMTkoQHQIWSIzI0R7KG46uEwq4OPk2W7bZcprBMsuaeFsqwYr7pjh6CuHg== +tar@6.2.1, tar@^6.0.2, tar@^6.1.11, tar@^6.1.13, tar@^6.1.2, tar@^6.2.1: + version "6.2.1" + resolved "https://registry.yarnpkg.com/tar/-/tar-6.2.1.tgz#717549c541bc3c2af15751bea94b1dd068d4b03a" + integrity sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A== dependencies: - "@isaacs/fs-minipass" "^4.0.0" - chownr "^3.0.0" - minipass "^7.1.2" - minizlib "^3.1.0" - yallist "^5.0.0" + chownr "^2.0.0" + fs-minipass "^2.0.0" + minipass "^5.0.0" + minizlib "^2.1.1" + mkdirp "^1.0.3" + yallist "^4.0.0" tedious@18.6.1, tedious@^18.6.1: version "18.6.1" @@ -16464,25 +16215,16 @@ text-extensions@^1.0.0: resolved "https://registry.yarnpkg.com/text-extensions/-/text-extensions-1.9.0.tgz#1853e45fee39c945ce6f6c36b2d659b5aabc2a26" integrity sha512-wiBrwC1EhBelW12Zy26JeOUkQ5mRu+5o8rpsJk5+2t+Y5vE7e842qtZDQ2g1NpX/29HdyFeJ4nSIhI47ENSxlQ== +text-extensions@^2.0.0: + version "2.4.0" + resolved "https://registry.yarnpkg.com/text-extensions/-/text-extensions-2.4.0.tgz#a1cfcc50cf34da41bfd047cc744f804d1680ea34" + integrity sha512-te/NtwBwfiNRLf9Ijqx3T0nlqZiQ2XrrtBvu+cLL8ZRrGkO0NHTug8MYFKyoSrv/sHTaSKfilUkizV6XhxMJ3g== + text-table@^0.2.0, text-table@~0.2.0: version "0.2.0" resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4" integrity sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw== -thenify-all@^1.0.0: - version "1.6.0" - resolved "https://registry.yarnpkg.com/thenify-all/-/thenify-all-1.6.0.tgz#1a1918d402d8fc3f98fbf234db0bcc8cc10e9726" - integrity sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA== - dependencies: - thenify ">= 3.1.0 < 4" - -"thenify@>= 3.1.0 < 4": - version "3.3.1" - resolved "https://registry.yarnpkg.com/thenify/-/thenify-3.3.1.tgz#8932e686a4066038a016dd9e2ca46add9838a95f" - integrity sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw== - dependencies: - any-promise "^1.0.0" - thread-stream@^3.0.0: version "3.1.0" resolved "https://registry.yarnpkg.com/thread-stream/-/thread-stream-3.1.0.tgz#4b2ef252a7c215064507d4ef70c05a5e2d34c4f1" @@ -16510,13 +16252,6 @@ through@2, through@2.3.8, "through@>=2.2.7 <3", through@^2.3.6: resolved "https://registry.yarnpkg.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5" integrity sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg== -time-span@^5.1.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/time-span/-/time-span-5.1.0.tgz#80c76cf5a0ca28e0842d3f10a4e99034ce94b90d" - integrity sha512-75voc/9G4rDIJleOo4jPvN4/YC4GRZrY8yy1uU4lwrB3XEQbWve8zXoO5No4eFrGcTAMYyoY67p8jRQdtA1HbA== - dependencies: - convert-hrtime "^5.0.0" - timers-ext@^0.1.7: version "0.1.7" resolved "https://registry.yarnpkg.com/timers-ext/-/timers-ext-0.1.7.tgz#6f57ad8578e07a3fb9f91d9387d65647555e25c6" @@ -16535,10 +16270,10 @@ tiny-lru@^8.0.1: resolved "https://registry.yarnpkg.com/tiny-lru/-/tiny-lru-8.0.2.tgz#812fccbe6e622ded552e3ff8a4c3b5ff34a85e4c" integrity sha512-ApGvZ6vVvTNdsmt676grvCkUCGwzG9IqXma5Z07xJgiC5L7akUMof5U8G2JTI9Rz/ovtVhJBlY6mNhEvtjzOIg== -tiny-relative-date@^2.0.2: - version "2.0.2" - resolved "https://registry.yarnpkg.com/tiny-relative-date/-/tiny-relative-date-2.0.2.tgz#0c35c2a3ef87b80f311314918505aa86c2d44bc9" - integrity sha512-rGxAbeL9z3J4pI2GtBEoFaavHdO4RKAU54hEuOef5kfx5aPqiQtbhYktMOTL5OA33db8BjsDcLXuNp+/v19PHw== +tiny-relative-date@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/tiny-relative-date/-/tiny-relative-date-1.3.0.tgz#fa08aad501ed730f31cc043181d995c39a935e07" + integrity sha512-MOQHpzllWxDCHHaDno30hhLfbouoYlOI8YlMNtvKe1zXbjEVhbcEovQxvZrPvtiYW630GQDoMMarCnjfyfHA+A== tinyglobby@0.2.12: version "0.2.12" @@ -16548,14 +16283,6 @@ tinyglobby@0.2.12: fdir "^6.4.3" picomatch "^4.0.2" -tinyglobby@^0.2.12, tinyglobby@^0.2.14: - version "0.2.15" - resolved "https://registry.yarnpkg.com/tinyglobby/-/tinyglobby-0.2.15.tgz#e228dd1e638cea993d2fdb4fcd2d4602a79951c2" - integrity sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ== - dependencies: - fdir "^6.5.0" - picomatch "^4.0.3" - tmp@^0.0.33: version "0.0.33" resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.33.tgz#6d34335889768d21b2bcda0aa277ced3b1bfadf9" @@ -16766,6 +16493,15 @@ tsutils@^3.21.0: dependencies: tslib "^1.8.1" +tuf-js@^1.1.7: + version "1.1.7" + resolved "https://registry.yarnpkg.com/tuf-js/-/tuf-js-1.1.7.tgz#21b7ae92a9373015be77dfe0cb282a80ec3bbe43" + integrity sha512-i3P9Kgw3ytjELUfpuKVDNBJvk4u5bXL6gskv572mcevPbSKCV3zt3djhmlEQ65yERjIbOSncy7U4cQJaB1CBCg== + dependencies: + "@tufjs/models" "1.0.4" + debug "^4.3.4" + make-fetch-happen "^11.1.1" + tuf-js@^2.2.1: version "2.2.1" resolved "https://registry.yarnpkg.com/tuf-js/-/tuf-js-2.2.1.tgz#fdd8794b644af1a75c7aaa2b197ddffeb2911b56" @@ -16775,15 +16511,6 @@ tuf-js@^2.2.1: debug "^4.3.4" make-fetch-happen "^13.0.1" -tuf-js@^4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/tuf-js/-/tuf-js-4.1.0.tgz#ae4ef9afa456fcb4af103dc50a43bc031f066603" - integrity sha512-50QV99kCKH5P/Vs4E2Gzp7BopNV+KzTXqWeaxrfu5IQJBOULRsTIS9seSsOVT8ZnGXzCyx55nYWAi4qJzpZKEQ== - dependencies: - "@tufjs/models" "4.1.0" - debug "^4.4.3" - make-fetch-happen "^15.0.1" - tunnel-agent@^0.6.0: version "0.6.0" resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd" @@ -16798,11 +16525,6 @@ tunnel-ssh@^5.2.0: dependencies: ssh2 "^1.15.0" -tunnel@^0.0.6: - version "0.0.6" - resolved "https://registry.yarnpkg.com/tunnel/-/tunnel-0.0.6.tgz#72f1314b34a5b192db012324df2cc587ca47f92c" - integrity sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg== - tweetnacl@^0.14.3: version "0.14.5" resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64" @@ -16865,18 +16587,16 @@ type-fest@^2.12.2: resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-2.19.0.tgz#88068015bb33036a598b952e55e9311a60fd3a9b" integrity sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA== -type-fest@^4.39.1, type-fest@^4.6.0: +type-fest@^3.8.0: + version "3.13.1" + resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-3.13.1.tgz#bb744c1f0678bea7543a2d1ec24e83e68e8c8706" + integrity sha512-tLq3bSNx+xSpwvAJnzrK0Ep5CLNWjvFTOp71URMaAEWBfRb9nnJiBoUe0tF8bI4ZFO3omgBR6NvnbzVUT3Ly4g== + +type-fest@^4.2.0: version "4.41.0" resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-4.41.0.tgz#6ae1c8e5731273c2bf1f58ad39cbae2c91a46c58" integrity sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA== -type-fest@^5.2.0: - version "5.4.1" - resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-5.4.1.tgz#aa9eaadcdc0acb0b5bd52e54f966ee3e38e125d2" - integrity sha512-xygQcmneDyzsEuKZrFbRMne5HDqMs++aFzefrJTgEIKjQ3rekM+RPfFCVq2Gp1VIDqddoYeppCj4Pcb+RZW0GQ== - dependencies: - tagged-tag "^1.0.0" - type-is@^1.6.16, type-is@^1.6.18, type-is@~1.6.18: version "1.6.18" resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.18.tgz#4e552cd05df09467dcbc4ef739de89f2cf37c131" @@ -17086,28 +16806,6 @@ undici-types@~6.21.0: resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-6.21.0.tgz#691d00af3909be93a7faa13be61b3a5b50ef12cb" integrity sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ== -undici@^5.28.5: - version "5.29.0" - resolved "https://registry.yarnpkg.com/undici/-/undici-5.29.0.tgz#419595449ae3f2cdcba3580a2e8903399bd1f5a3" - integrity sha512-raqeBD6NQK4SkWhQzeYKd1KmIG6dllBOTt55Rmkt4HtI9mwdWtJljnrXjAFUBLTSN67HWrOIZ3EPF4kjUw80Bg== - dependencies: - "@fastify/busboy" "^2.0.0" - -undici@^7.0.0: - version "7.18.2" - resolved "https://registry.yarnpkg.com/undici/-/undici-7.18.2.tgz#6cf724ef799a67d94fd55adf66b1e184176efcdf" - integrity sha512-y+8YjDFzWdQlSE9N5nzKMT3g4a5UBX1HKowfdXh0uvAnTaqqwqB92Jt4UXBAeKekDs5IaDKyJFR4X1gYVCgXcw== - -unicode-emoji-modifier-base@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/unicode-emoji-modifier-base/-/unicode-emoji-modifier-base-1.0.0.tgz#dbbd5b54ba30f287e2a8d5a249da6c0cef369459" - integrity sha512-yLSH4py7oFH3oG/9K+XWrz1pSi3dfUrWEnInbxMfArOfc1+33BlGPQtLsOYwvdMy11AwUBetYuaRxSPqgkq+8g== - -unicorn-magic@^0.1.0: - version "0.1.0" - resolved "https://registry.yarnpkg.com/unicorn-magic/-/unicorn-magic-0.1.0.tgz#1bb9a51c823aaf9d73a8bfcd3d1a23dde94b0ce4" - integrity sha512-lRfVq8fE8gz6QMBuDM6a+LO3IAzTi05H6gCVaUpir2E1Rwpo4ZUog45KpNXKC/Mn3Yb9UDuHumeFTo9iV/D9FQ== - unicorn-magic@^0.3.0: version "0.3.0" resolved "https://registry.yarnpkg.com/unicorn-magic/-/unicorn-magic-0.3.0.tgz#4efd45c85a69e0dd576d25532fbfa22aa5c8a104" @@ -17132,6 +16830,13 @@ unique-filename@^1.1.1: dependencies: unique-slug "^2.0.0" +unique-filename@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/unique-filename/-/unique-filename-2.0.1.tgz#e785f8675a9a7589e0ac77e0b5c34d2eaeac6da2" + integrity sha512-ODWHtkkdx3IAR+veKxFV+VBkUMcN+FaqzUUd7IZzt+0zhDZFPFxhlqwPF3YQvMHx1TD0tdgYl+kuPnJ8E6ql7A== + dependencies: + unique-slug "^3.0.0" + unique-filename@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/unique-filename/-/unique-filename-3.0.0.tgz#48ba7a5a16849f5080d26c760c86cf5cf05770ea" @@ -17139,13 +16844,6 @@ unique-filename@^3.0.0: dependencies: unique-slug "^4.0.0" -unique-filename@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/unique-filename/-/unique-filename-5.0.0.tgz#8b17bbde1a7ca322dd1a1d23fe17c2b798c43f8f" - integrity sha512-2RaJTAvAb4owyjllTfXzFClJ7WsGxlykkPvCr9pA//LD9goVq+m4PPAeBgNodGZ7nSrntT/auWpJ6Y5IFXcfjg== - dependencies: - unique-slug "^6.0.0" - unique-slug@^2.0.0: version "2.0.2" resolved "https://registry.yarnpkg.com/unique-slug/-/unique-slug-2.0.2.tgz#baabce91083fc64e945b0f3ad613e264f7cd4e6c" @@ -17153,6 +16851,13 @@ unique-slug@^2.0.0: dependencies: imurmurhash "^0.1.4" +unique-slug@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/unique-slug/-/unique-slug-3.0.0.tgz#6d347cf57c8a7a7a6044aabd0e2d74e4d76dc7c9" + integrity sha512-8EyMynh679x/0gqE9fT9oilG+qEt+ibFyqjuVTsZn1+CMxH+XLlpvr2UZx4nVcCwTpx81nICr2JQFkM+HPLq4w== + dependencies: + imurmurhash "^0.1.4" + unique-slug@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/unique-slug/-/unique-slug-4.0.0.tgz#6bae6bb16be91351badd24cdce741f892a6532e3" @@ -17160,13 +16865,6 @@ unique-slug@^4.0.0: dependencies: imurmurhash "^0.1.4" -unique-slug@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/unique-slug/-/unique-slug-6.0.0.tgz#f46fd688a9bd972fd356c23d95812a3a4862ed88" - integrity sha512-4Lup7Ezn8W3d52/xBhZBVdx323ckxa7DEvd9kPQHppTkLoJXw6ltrBCyj5pnrxj0qKDxYMJ56CoxNuFCscdTiw== - dependencies: - imurmurhash "^0.1.4" - unique-string@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/unique-string/-/unique-string-3.0.0.tgz#84a1c377aff5fd7a8bc6b55d8244b2bd90d75b9a" @@ -17215,11 +16913,6 @@ universal-user-agent@^6.0.0: resolved "https://registry.yarnpkg.com/universal-user-agent/-/universal-user-agent-6.0.1.tgz#15f20f55da3c930c57bddbf1734c6654d5fd35aa" integrity sha512-yCzhz6FN2wU1NiiQRogkTQszlQSlpWaw8SvVegAc+bDxbzHgh1vX8uIe8OYyMH6DwH+sdTJsgMl36+mSMdRJIQ== -universal-user-agent@^7.0.0, universal-user-agent@^7.0.2: - version "7.0.3" - resolved "https://registry.yarnpkg.com/universal-user-agent/-/universal-user-agent-7.0.3.tgz#c05870a58125a2dc00431f2df815a77fe69736be" - integrity sha512-TmnEAEAsBJVZM/AADELsK76llnwcf9vMKuPz8JflO1frO8Lchitr0fNaN9d+Ap0BjKtqWqd/J17qeDnXh8CL2A== - universalify@^2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/universalify/-/universalify-2.0.1.tgz#168efc2180964e6386d061e094df61afe239b18d" @@ -17344,11 +17037,6 @@ validate-npm-package-name@^5.0.0: dependencies: builtins "^5.0.0" -validate-npm-package-name@^7.0.0: - version "7.0.2" - resolved "https://registry.yarnpkg.com/validate-npm-package-name/-/validate-npm-package-name-7.0.2.tgz#e57c3d721a4c8bbff454a246e7f7da811559ea0d" - integrity sha512-hVDIBwsRruT73PbK7uP5ebUt+ezEtCmzZz3F59BSr2F6OVFnJ/6h8liuvdLrQ88Xmnk6/+xGGuq+pG9WwTuy3A== - validator@^13.9.0: version "13.15.26" resolved "https://registry.yarnpkg.com/validator/-/validator-13.15.26.tgz#36c3deeab30e97806a658728a155c66fcaa5b944" @@ -17395,11 +17083,6 @@ walk-up-path@^3.0.1: resolved "https://registry.yarnpkg.com/walk-up-path/-/walk-up-path-3.0.1.tgz#c8d78d5375b4966c717eb17ada73dbd41490e886" integrity sha512-9YlCL/ynK3CTlrSRrDxZvUauLzAswPCrsaCgilqFevUYpeEW0/3ScEjaa3kbW/T0ghhkEr7mv+fpjqn1Y1YuTA== -walk-up-path@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/walk-up-path/-/walk-up-path-4.0.0.tgz#590666dcf8146e2d72318164f1f2ac6ef51d4198" - integrity sha512-3hu+tD8YzSLGuFYtPRb48vdhKMi0KQV5sn+uWr8+7dMEq/2G/dtLrdDinkLjqq5TIbIBjYJ4Ax/n3YiaW7QM8A== - walker@^1.0.8: version "1.0.8" resolved "https://registry.yarnpkg.com/walker/-/walker-1.0.8.tgz#bd498db477afe573dc04185f011d3ab8a8d7653f" @@ -17414,11 +17097,6 @@ wcwidth@^1.0.0, wcwidth@^1.0.1: dependencies: defaults "^1.0.3" -web-worker@1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/web-worker/-/web-worker-1.2.0.tgz#5d85a04a7fbc1e7db58f66595d7a3ac7c9c180da" - integrity sha512-PgF341avzqyx60neE9DD+XS26MMNMoUQRz9NOZwW32nPQrF6p77f1htcnjBSEV8BGMKZ16choqUG4hyI0Hx7mA== - webidl-conversions@^3.0.0: version "3.0.1" resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz#24534275e2a7bc6be7bc86611cc16ae0a5654871" @@ -17553,6 +17231,13 @@ which@^2.0.1, which@^2.0.2: dependencies: isexe "^2.0.0" +which@^3.0.0, which@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/which/-/which-3.0.1.tgz#89f1cd0c23f629a8105ffe69b8172791c87b4be1" + integrity sha512-XA1b62dzQzLfaEOSQFTCOd5KFf/1VSzZo7/7TUjnya6u0vGGKzU96UQBZTAThCb2j4/xjBAyii1OhRLJEivHvg== + dependencies: + isexe "^2.0.0" + which@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/which/-/which-4.0.0.tgz#cd60b5e74503a3fbcfbf6cd6b4138a8bae644c1a" @@ -17560,13 +17245,6 @@ which@^4.0.0: dependencies: isexe "^3.1.1" -which@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/which/-/which-6.0.0.tgz#a3a721a14cdd9b991a722e493c177eeff82ff32a" - integrity sha512-f+gEpIKMR9faW/JgAgPK1D7mekkFoqbmiwvNzuhsHetni20QSgzg9Vhn0g2JSJkkfehQnqdUAx7/e15qS1lPxg== - dependencies: - isexe "^3.1.1" - wide-align@1.1.5, wide-align@^1.1.2, wide-align@^1.1.5: version "1.1.5" resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.5.tgz#df1d4c206854369ecf3c9a4898f1b23fbd9d15d3" @@ -17593,39 +17271,39 @@ wordwrap@>=0.0.2, wordwrap@^1.0.0: resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-1.0.0.tgz#27584810891456a4171c8d0226441ade90cbcaeb" integrity sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q== -wrap-ansi@^6.0.1, wrap-ansi@^6.2.0: - version "6.2.0" - resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-6.2.0.tgz#e9393ba07102e6c91a3b221478f0257cd2856e53" - integrity sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA== +"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0", wrap-ansi@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" + integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== dependencies: ansi-styles "^4.0.0" string-width "^4.1.0" strip-ansi "^6.0.0" -wrap-ansi@^7.0.0: - version "7.0.0" - resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" - integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== +wrap-ansi@^6.0.1, wrap-ansi@^6.2.0: + version "6.2.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-6.2.0.tgz#e9393ba07102e6c91a3b221478f0257cd2856e53" + integrity sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA== dependencies: ansi-styles "^4.0.0" string-width "^4.1.0" strip-ansi "^6.0.0" -wrap-ansi@^9.0.0: - version "9.0.2" - resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-9.0.2.tgz#956832dea9494306e6d209eb871643bb873d7c98" - integrity sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww== +wrap-ansi@^8.1.0: + version "8.1.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-8.1.0.tgz#56dc22368ee570face1b49819975d9b9a5ead214" + integrity sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ== dependencies: - ansi-styles "^6.2.1" - string-width "^7.0.0" - strip-ansi "^7.1.0" + ansi-styles "^6.1.0" + string-width "^5.0.1" + strip-ansi "^7.0.1" wrappy@1: version "1.0.2" resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" integrity sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ== -write-file-atomic@5.0.1, write-file-atomic@^5.0.0: +write-file-atomic@5.0.1, write-file-atomic@^5.0.0, write-file-atomic@^5.0.1: version "5.0.1" resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-5.0.1.tgz#68df4717c55c6fa4281a7860b4c2ba0a6d2b11e7" integrity sha512-+QU2zd6OTD8XWIJCbffaiQeH9U73qIqafo1x6V1snCWYGJf6cVE0cDR4D8xRzcEnfI21IFrUPzPGtcPf8AC+Rw== @@ -17650,14 +17328,6 @@ write-file-atomic@^4.0.2: imurmurhash "^0.1.4" signal-exit "^3.0.7" -write-file-atomic@^7.0.0: - version "7.0.0" - resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-7.0.0.tgz#f89def4f223e9bf8b06cc6fdb12bda3a917505c7" - integrity sha512-YnlPC6JqnZl6aO4uRc+dx5PHguiR9S6WeoLtpxNT9wIG+BDya7ZNE1q7KOjVgaA73hKhKLpVPgJ5QA9THQ5BRg== - dependencies: - imurmurhash "^0.1.4" - signal-exit "^4.0.1" - write-json-file@^3.2.0: version "3.2.0" resolved "https://registry.yarnpkg.com/write-json-file/-/write-json-file-3.2.0.tgz#65bbdc9ecd8a1458e15952770ccbadfcff5fe62a" @@ -17714,11 +17384,6 @@ yallist@^4.0.0: resolved "https://registry.yarnpkg.com/yallist/-/yallist-4.0.0.tgz#9bb92790d9c0effec63be73519e11a35019a3a72" integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A== -yallist@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/yallist/-/yallist-5.0.0.tgz#00e2de443639ed0d78fd87de0d27469fbcffb533" - integrity sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw== - yaml@^2.2.1, yaml@^2.8.1: version "2.8.2" resolved "https://registry.yarnpkg.com/yaml/-/yaml-2.8.2.tgz#5694f25eca0ce9c3e7a9d9e00ce0ddabbd9e35c5" @@ -17739,12 +17404,7 @@ yargs-parser@^20.2.2, yargs-parser@^20.2.3: resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-20.2.9.tgz#2eb7dc3b0289718fc295f362753845c41a0c94ee" integrity sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w== -yargs-parser@^22.0.0: - version "22.0.0" - resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-22.0.0.tgz#87b82094051b0567717346ecd00fd14804b357c8" - integrity sha512-rwu/ClNdSMpkSrUb+d6BRsSkLUq1fmfsY6TOpYzTwvwkg1/NRG85KBy3kq++A8LKQwX6lsu+aWad+2khvuXrqw== - -yargs@17.7.2, yargs@^17.0.0, yargs@^17.3.1, yargs@^17.6.2: +yargs@17.7.2, yargs@^17.0.0, yargs@^17.3.1, yargs@^17.5.1, yargs@^17.6.2: version "17.7.2" resolved "https://registry.yarnpkg.com/yargs/-/yargs-17.7.2.tgz#991df39aca675a192b816e1e0363f9d75d2aa269" integrity sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w== @@ -17757,7 +17417,7 @@ yargs@17.7.2, yargs@^17.0.0, yargs@^17.3.1, yargs@^17.6.2: y18n "^5.0.5" yargs-parser "^21.1.1" -yargs@^16.0.0, yargs@^16.2.0: +yargs@^16.2.0: version "16.2.0" resolved "https://registry.yarnpkg.com/yargs/-/yargs-16.2.0.tgz#1c82bf0f6b6a66eafce7ef30e376f49a12477f66" integrity sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw== @@ -17770,18 +17430,6 @@ yargs@^16.0.0, yargs@^16.2.0: y18n "^5.0.5" yargs-parser "^20.2.2" -yargs@^18.0.0: - version "18.0.0" - resolved "https://registry.yarnpkg.com/yargs/-/yargs-18.0.0.tgz#6c84259806273a746b09f579087b68a3c2d25bd1" - integrity sha512-4UEqdc2RYGHZc7Doyqkrqiln3p9X2DZVxaGbwhn2pi7MrRagKaOcIKe8L3OxYcbhXLgLFUS3zAYuQjKBQgmuNg== - dependencies: - cliui "^9.0.1" - escalade "^3.1.1" - get-caller-file "^2.0.5" - string-width "^7.2.0" - y18n "^5.0.5" - yargs-parser "^22.0.0" - ylru@^1.2.0: version "1.4.0" resolved "https://registry.yarnpkg.com/ylru/-/ylru-1.4.0.tgz#0cf0aa57e9c24f8a2cbde0cc1ca2c9592ac4e0f6" @@ -17797,16 +17445,16 @@ yocto-queue@^0.1.0: resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-0.1.0.tgz#0294eb3dee05028d31ee1a5fa2c556a6aaf10a1b" integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== +yocto-queue@^1.0.0: + version "1.2.2" + resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-1.2.2.tgz#3e09c95d3f1aa89a58c114c99223edf639152c00" + integrity sha512-4LCcse/U2MHZ63HAJVE+v71o7yOdIe4cZ70Wpf8D/IyjDKYQLV5GD46B+hSTjJsvV5PztjvHoU580EftxjDZFQ== + yoctocolors-cjs@^2.1.2: version "2.1.2" resolved "https://registry.yarnpkg.com/yoctocolors-cjs/-/yoctocolors-cjs-2.1.2.tgz#f4b905a840a37506813a7acaa28febe97767a242" integrity sha512-cYVsTjKl8b+FrnidjibDWskAv7UKOfcwaVZdp/it9n1s9fU3IkgDbhdIRKCW4JDsAlECJY0ytoVPT3sK6kideA== -yoctocolors@^2.1.1: - version "2.1.2" - resolved "https://registry.yarnpkg.com/yoctocolors/-/yoctocolors-2.1.2.tgz#d795f54d173494e7d8db93150cec0ed7f678c83a" - integrity sha512-CzhO+pFNo8ajLM2d2IW/R93ipy99LWjtwblvC1RsoSUMZgyLbYFr221TnSNT7GjGdYui6P459mw9JH/g/zW2ug== - zen-observable-ts@^0.8.21: version "0.8.21" resolved "https://registry.yarnpkg.com/zen-observable-ts/-/zen-observable-ts-0.8.21.tgz#85d0031fbbde1eba3cd07d3ba90da241215f421d" From 9399bb724065f956d284f4122d768ef280dfda65 Mon Sep 17 00:00:00 2001 From: scra Date: Wed, 25 Mar 2026 11:05:49 +0100 Subject: [PATCH 13/18] refactor(workflow-executor): move userConfirmed to RunStore pendingData (#1507) --- packages/workflow-executor/package.json | 1 + .../src/executors/base-step-executor.ts | 45 +++--- .../load-related-record-step-executor.ts | 19 ++- .../src/executors/mcp-task-step-executor.ts | 38 ++++-- .../trigger-record-action-step-executor.ts | 36 ++--- .../executors/update-record-step-executor.ts | 27 ++-- .../src/http/executor-http-server.ts | 46 +++++++ .../workflow-executor/src/types/execution.ts | 2 - .../src/types/step-execution-data.ts | 12 +- .../load-related-record-step-executor.test.ts | 84 ++++++++---- .../executors/mcp-task-step-executor.test.ts | 84 ++++++++---- ...rigger-record-action-step-executor.test.ts | 94 +++++-------- .../update-record-step-executor.test.ts | 126 ++++++++--------- .../test/http/executor-http-server.test.ts | 128 ++++++++++++++++++ 14 files changed, 492 insertions(+), 250 deletions(-) diff --git a/packages/workflow-executor/package.json b/packages/workflow-executor/package.json index 32ae26db19..5f94e3052f 100644 --- a/packages/workflow-executor/package.json +++ b/packages/workflow-executor/package.json @@ -26,6 +26,7 @@ "@forestadmin/agent-client": "1.4.13", "@forestadmin/ai-proxy": "1.6.1", "@forestadmin/forestadmin-client": "1.37.17", + "@koa/bodyparser": "^6.1.0", "@koa/router": "^13.1.0", "jsonwebtoken": "^9.0.3", "koa": "^3.0.1", diff --git a/packages/workflow-executor/src/executors/base-step-executor.ts b/packages/workflow-executor/src/executors/base-step-executor.ts index 4472ff6ba0..fb1ba2da2f 100644 --- a/packages/workflow-executor/src/executors/base-step-executor.ts +++ b/packages/workflow-executor/src/executors/base-step-executor.ts @@ -3,7 +3,7 @@ import type { ExecutionContext, IStepExecutor, StepExecutionResult } from '../ty import type { CollectionSchema, FieldSchema, RecordRef } from '../types/record'; import type { StepDefinition } from '../types/step-definition'; import type { StepExecutionData } from '../types/step-execution-data'; -import type { BaseStepStatus } from '../types/step-outcome'; +import type { StepStatus } from '../types/step-outcome'; import type { BaseMessage, StructuredToolInterface } from '@forestadmin/ai-proxy'; import { DynamicStructuredTool, HumanMessage, SystemMessage } from '@forestadmin/ai-proxy'; @@ -83,35 +83,46 @@ export default abstract class BaseStepExecutor( + type: string, + ): Promise { + const stepExecutions = await this.context.runStore.getStepExecutions(this.context.runId); + + return stepExecutions.find( + (e): e is TExec => (e as TExec).type === type && e.stepIndex === this.context.stepIndex, + ); + } + /** * Shared confirmation flow for executors that require user approval before acting. - * Handles the find → guard → skipped → delegate pattern. + * Receives a pre-loaded execution (from findPendingExecution) and checks pendingData.userConfirmed: + * - undefined → PATCH not yet called → re-emit awaiting-input (safe no-op) + * - false → save execution as skipped and return success outcome + * - true → execute via resolveAndExecute */ protected async handleConfirmationFlow( - typeDiscriminator: string, + execution: TExec, resolveAndExecute: (execution: TExec) => Promise, ): Promise { - const stepExecutions = await this.context.runStore.getStepExecutions(this.context.runId); - const execution = stepExecutions.find( - (e): e is TExec => - (e as TExec).type === typeDiscriminator && e.stepIndex === this.context.stepIndex, - ); - - if (!execution) { - throw new StepStateError( - `No execution record found for step at index ${this.context.stepIndex}`, - ); - } - if (!execution.pendingData) { throw new StepStateError(`Step at index ${this.context.stepIndex} has no pending data`); } - if (!this.context.userConfirmed) { + const { userConfirmed } = execution.pendingData as { userConfirmed?: boolean }; + + if (userConfirmed === undefined) { + return this.buildOutcomeResult({ status: 'awaiting-input' }); + } + + if (!userConfirmed) { await this.context.runStore.saveStepExecution(this.context.runId, { ...execution, executionResult: { skipped: true }, diff --git a/packages/workflow-executor/src/executors/load-related-record-step-executor.ts b/packages/workflow-executor/src/executors/load-related-record-step-executor.ts index 998157bb6a..108655e27d 100644 --- a/packages/workflow-executor/src/executors/load-related-record-step-executor.ts +++ b/packages/workflow-executor/src/executors/load-related-record-step-executor.ts @@ -37,22 +37,21 @@ interface RelationTarget extends RelationRef { export default class LoadRelatedRecordStepExecutor extends RecordTaskStepExecutor { protected async doExecute(): Promise { - // Branch A -- Re-entry with user confirmation - if (this.context.userConfirmed !== undefined) { - return this.handleConfirmation(); + // Branch A -- Re-entry after pending execution found in RunStore + const pending = await this.findPendingExecution( + 'load-related-record', + ); + + if (pending) { + return this.handleConfirmationFlow(pending, async exec => + this.resolveFromSelection(exec), + ); } // Branches B & C -- First call return this.handleFirstCall(); } - private async handleConfirmation(): Promise { - return this.handleConfirmationFlow( - 'load-related-record', - async execution => this.resolveFromSelection(execution), - ); - } - private async handleFirstCall(): Promise { const { stepDefinition: step } = this.context; const records = await this.getAvailableRecordRefs(); diff --git a/packages/workflow-executor/src/executors/mcp-task-step-executor.ts b/packages/workflow-executor/src/executors/mcp-task-step-executor.ts index fe9c84e9c2..1adc59ed73 100644 --- a/packages/workflow-executor/src/executors/mcp-task-step-executor.ts +++ b/packages/workflow-executor/src/executors/mcp-task-step-executor.ts @@ -48,9 +48,11 @@ export default class McpTaskStepExecutor extends BaseStepExecutor { - if (this.context.userConfirmed !== undefined) { - // Branch A -- Re-entry with user confirmation - return this.handleConfirmationFlow('mcp-task', execution => + // Branch A -- Re-entry after pending execution found in RunStore + const pending = await this.findPendingExecution('mcp-task'); + + if (pending) { + return this.handleConfirmationFlow(pending, execution => this.executeToolAndPersist(execution.pendingData as McpToolCall, execution), ); } @@ -120,15 +122,10 @@ export default class McpTaskStepExecutor extends BaseStepExecutor { protected async doExecute(): Promise { - // Branch A -- Re-entry with user confirmation - if (this.context.userConfirmed !== undefined) { - return this.handleConfirmation(); + // Branch A -- Re-entry after pending execution found in RunStore + const pending = await this.findPendingExecution( + 'trigger-action', + ); + + if (pending) { + return this.handleConfirmationFlow( + pending, + async exec => { + const { selectedRecordRef, pendingData } = exec; + const target: ActionTarget = { + selectedRecordRef, + ...(pendingData as ActionRef), + }; + + return this.resolveAndExecute(target, exec); + }, + ); } // Branches B & C -- First call return this.handleFirstCall(); } - private async handleConfirmation(): Promise { - return this.handleConfirmationFlow( - 'trigger-action', - async execution => { - const { selectedRecordRef, pendingData } = execution; - const target: ActionTarget = { - selectedRecordRef, - ...(pendingData as ActionRef), - }; - - return this.resolveAndExecute(target, execution); - }, - ); - } - private async handleFirstCall(): Promise { const { stepDefinition: step } = this.context; const records = await this.getAvailableRecordRefs(); diff --git a/packages/workflow-executor/src/executors/update-record-step-executor.ts b/packages/workflow-executor/src/executors/update-record-step-executor.ts index 97c0cb1d41..f1263e54a4 100644 --- a/packages/workflow-executor/src/executors/update-record-step-executor.ts +++ b/packages/workflow-executor/src/executors/update-record-step-executor.ts @@ -24,28 +24,23 @@ interface UpdateTarget extends FieldRef { export default class UpdateRecordStepExecutor extends RecordTaskStepExecutor { protected async doExecute(): Promise { - // Branch A -- Re-entry with user confirmation - if (this.context.userConfirmed !== undefined) { - return this.handleConfirmation(); - } - - // Branches B & C -- First call - return this.handleFirstCall(); - } + // Branch A -- Re-entry after pending execution found in RunStore + const pending = await this.findPendingExecution('update-record'); - private async handleConfirmation(): Promise { - return this.handleConfirmationFlow( - 'update-record', - async execution => { - const { selectedRecordRef, pendingData } = execution; + if (pending) { + return this.handleConfirmationFlow(pending, async exec => { + const { selectedRecordRef, pendingData } = exec; const target: UpdateTarget = { selectedRecordRef, ...(pendingData as FieldRef & { value: string }), }; - return this.resolveAndUpdate(target, execution); - }, - ); + return this.resolveAndUpdate(target, exec); + }); + } + + // Branches B & C -- First call + return this.handleFirstCall(); } private async handleFirstCall(): Promise { diff --git a/packages/workflow-executor/src/http/executor-http-server.ts b/packages/workflow-executor/src/http/executor-http-server.ts index 4200ae0347..3bd6ca2153 100644 --- a/packages/workflow-executor/src/http/executor-http-server.ts +++ b/packages/workflow-executor/src/http/executor-http-server.ts @@ -4,6 +4,7 @@ import type { WorkflowPort } from '../ports/workflow-port'; import type Runner from '../runner'; import type { Server } from 'http'; +import bodyParser from '@koa/bodyparser'; import Router from '@koa/router'; import http from 'http'; import Koa from 'koa'; @@ -54,6 +55,8 @@ export default class ExecutorHttpServer { } }); + this.app.use(bodyParser()); + // JWT middleware — validates Bearer token using authSecret // tokenKey: 'rawToken' exposes the raw token string on ctx.state.rawToken for downstream use this.app.use( @@ -96,6 +99,10 @@ export default class ExecutorHttpServer { router.get('/runs/:runId', this.handleGetRun.bind(this)); router.post('/runs/:runId/trigger', this.handleTrigger.bind(this)); + router.patch( + '/runs/:runId/steps/:stepIndex/pending-data', + this.handlePatchPendingData.bind(this), + ); this.app.use(router.routes()); this.app.use(router.allowedMethods()); @@ -158,4 +165,43 @@ export default class ExecutorHttpServer { ctx.status = 200; ctx.body = { triggered: true }; } + + private async handlePatchPendingData(ctx: Koa.Context): Promise { + const { runId, stepIndex: stepIndexStr } = ctx.params; + const stepIndex = parseInt(stepIndexStr, 10); + + if (Number.isNaN(stepIndex)) { + ctx.status = 400; + ctx.body = { error: 'Invalid stepIndex' }; + + return; + } + + const body = ctx.request.body as Record; + const { userConfirmed } = body; + + if (typeof userConfirmed !== 'boolean') { + ctx.status = 400; + ctx.body = { error: 'userConfirmed must be a boolean' }; + + return; + } + + const stepExecutions = await this.options.runStore.getStepExecutions(runId); + const execution = stepExecutions.find(e => e.stepIndex === stepIndex); + + if (!execution || !('pendingData' in execution) || execution.pendingData === undefined) { + ctx.status = 404; + ctx.body = { error: 'Step execution not found or has no pending data' }; + + return; + } + + await this.options.runStore.saveStepExecution(runId, { + ...execution, + pendingData: { ...(execution.pendingData as object), userConfirmed }, + } as Parameters[1]); + + ctx.status = 204; + } } diff --git a/packages/workflow-executor/src/types/execution.ts b/packages/workflow-executor/src/types/execution.ts index 3ec08b3345..2da261f519 100644 --- a/packages/workflow-executor/src/types/execution.ts +++ b/packages/workflow-executor/src/types/execution.ts @@ -21,7 +21,6 @@ export interface PendingStepExecution { readonly baseRecordRef: RecordRef; readonly stepDefinition: StepDefinition; readonly previousSteps: ReadonlyArray; - readonly userConfirmed?: boolean; } export interface StepExecutionResult { @@ -43,6 +42,5 @@ export interface ExecutionContext readonly workflowPort: WorkflowPort; readonly runStore: RunStore; readonly previousSteps: ReadonlyArray>; - readonly userConfirmed?: boolean; readonly logger: Logger; } diff --git a/packages/workflow-executor/src/types/step-execution-data.ts b/packages/workflow-executor/src/types/step-execution-data.ts index edd5f8df02..6f58bd5491 100644 --- a/packages/workflow-executor/src/types/step-execution-data.ts +++ b/packages/workflow-executor/src/types/step-execution-data.ts @@ -50,7 +50,7 @@ export interface UpdateRecordStepExecutionData extends BaseStepExecutionData { /** User confirmed → values returned by updateRecord. User rejected → skipped. */ executionResult?: { updatedValues: Record } | { skipped: true }; /** AI-selected field and value awaiting user confirmation. Used in the confirmation flow only. */ - pendingData?: FieldRef & { value: string }; + pendingData?: FieldRef & { value: string; userConfirmed?: boolean }; selectedRecordRef: RecordRef; } @@ -74,7 +74,7 @@ export interface TriggerRecordActionStepExecutionData extends BaseStepExecutionD executionParams?: ActionRef; executionResult?: { success: true; actionResult: unknown } | { skipped: true }; /** AI-selected action awaiting user confirmation. Used in the confirmation flow only. */ - pendingData?: ActionRef; + pendingData?: ActionRef & { userConfirmed?: boolean }; selectedRecordRef: RecordRef; } @@ -96,7 +96,7 @@ export interface McpTaskStepExecutionData extends BaseStepExecutionData { executionResult?: | { success: true; toolResult: unknown; formattedResponse?: string } | { skipped: true }; - pendingData?: McpToolCall; + pendingData?: McpToolCall & { userConfirmed?: boolean }; } // -- Generic AI Task (fallback for untyped steps) -- @@ -116,10 +116,12 @@ export interface LoadRelatedRecordPendingData extends RelationRef { /** AI-selected fields suggested for display on the frontend. undefined = not computed (no non-relation fields). */ suggestedFields?: string[]; /** - * The record id to load. Initially set by the AI; overwritten by the frontend via - * PATCH /runs/:runId/steps/:stepIndex/pending-data (not yet implemented). + * The record id to load. Initially set by the AI. Can be overridden by the frontend + * (future iteration — the current PATCH endpoint only accepts userConfirmed). */ selectedRecordId: Array; + /** Set by the frontend via PATCH /runs/:runId/steps/:stepIndex/pending-data. */ + userConfirmed?: boolean; } export interface LoadRelatedRecordStepExecutionData extends BaseStepExecutionData { diff --git a/packages/workflow-executor/test/executors/load-related-record-step-executor.test.ts b/packages/workflow-executor/test/executors/load-related-record-step-executor.test.ts index d4c3aa0cda..6ac150c2e3 100644 --- a/packages/workflow-executor/test/executors/load-related-record-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/load-related-record-step-executor.test.ts @@ -738,11 +738,20 @@ describe('LoadRelatedRecordStepExecutor', () => { describe('confirmation accepted (Branch A)', () => { it('uses selectedRecordId from pendingData, no getRelatedData call', async () => { const agentPort = makeMockAgentPort(); - const execution = makePendingExecution(); // selectedRecordId: [99] + const execution = makePendingExecution({ + pendingData: { + displayName: 'Order', + name: 'order', + relatedCollectionName: 'orders', + suggestedFields: ['status', 'amount'], + selectedRecordId: [99], + userConfirmed: true, + }, + }); const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([execution]), }); - const context = makeContext({ agentPort, runStore, userConfirmed: true }); + const context = makeContext({ agentPort, runStore }); const executor = new LoadRelatedRecordStepExecutor(context); const result = await executor.execute(); @@ -776,12 +785,13 @@ describe('LoadRelatedRecordStepExecutor', () => { relatedCollectionName: 'orders', suggestedFields: ['status', 'amount'], selectedRecordId: [42], + userConfirmed: true, }, }); const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([execution]), }); - const context = makeContext({ agentPort, runStore, userConfirmed: true }); + const context = makeContext({ agentPort, runStore }); const executor = new LoadRelatedRecordStepExecutor(context); const result = await executor.execute(); @@ -802,11 +812,20 @@ describe('LoadRelatedRecordStepExecutor', () => { describe('confirmation rejected (Branch A)', () => { it('skips the load when user rejects', async () => { const agentPort = makeMockAgentPort(); - const execution = makePendingExecution(); + const execution = makePendingExecution({ + pendingData: { + displayName: 'Order', + name: 'order', + relatedCollectionName: 'orders', + suggestedFields: ['status', 'amount'], + selectedRecordId: [99], + userConfirmed: false, + }, + }); const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([execution]), }); - const context = makeContext({ agentPort, runStore, userConfirmed: false }); + const context = makeContext({ agentPort, runStore }); const executor = new LoadRelatedRecordStepExecutor(context); const result = await executor.execute(); @@ -823,26 +842,25 @@ describe('LoadRelatedRecordStepExecutor', () => { }); }); - describe('no pending data in confirmation flow (Branch A)', () => { - it('returns error outcome when no execution record is found', async () => { + describe('trigger before PATCH (Branch A)', () => { + it('re-emits awaiting-input when userConfirmed is not yet set in pendingData', async () => { + const agentPort = makeMockAgentPort(); + const execution = makePendingExecution(); // pendingData has no userConfirmed const runStore = makeMockRunStore({ - getStepExecutions: jest.fn().mockResolvedValue([]), + getStepExecutions: jest.fn().mockResolvedValue([execution]), }); - const context = makeContext({ runStore, userConfirmed: true }); + const context = makeContext({ agentPort, runStore }); const executor = new LoadRelatedRecordStepExecutor(context); - await expect(executor.execute()).resolves.toMatchObject({ - stepOutcome: { - type: 'record-task', - stepId: 'load-1', - stepIndex: 0, - status: 'error', - error: 'An unexpected error occurred while processing this step.', - }, - }); + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('awaiting-input'); + expect(agentPort.getRelatedData).not.toHaveBeenCalled(); expect(runStore.saveStepExecution).not.toHaveBeenCalled(); }); + }); + describe('no pending data in confirmation flow (Branch A)', () => { it('returns error outcome when execution exists but pendingData is absent', async () => { const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([ @@ -853,7 +871,7 @@ describe('LoadRelatedRecordStepExecutor', () => { }, ]), }); - const context = makeContext({ runStore, userConfirmed: true }); + const context = makeContext({ runStore }); const executor = new LoadRelatedRecordStepExecutor(context); await expect(executor.execute()).resolves.toMatchObject({ @@ -981,12 +999,21 @@ describe('LoadRelatedRecordStepExecutor', () => { }); it('returns error outcome when saveStepExecution fails after load (Branch A confirmed)', async () => { - const execution = makePendingExecution(); + const execution = makePendingExecution({ + pendingData: { + displayName: 'Order', + name: 'order', + relatedCollectionName: 'orders', + suggestedFields: ['status', 'amount'], + selectedRecordId: [99], + userConfirmed: true, + }, + }); const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([execution]), saveStepExecution: jest.fn().mockRejectedValue(new Error('Disk full')), }); - const context = makeContext({ runId: 'run-1', stepIndex: 0, runStore, userConfirmed: true }); + const context = makeContext({ runId: 'run-1', stepIndex: 0, runStore }); const executor = new LoadRelatedRecordStepExecutor(context); const result = await executor.execute(); @@ -1317,7 +1344,7 @@ describe('LoadRelatedRecordStepExecutor', () => { const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockRejectedValue(new Error('DB timeout')), }); - const context = makeContext({ runStore, userConfirmed: true }); + const context = makeContext({ runStore }); const executor = new LoadRelatedRecordStepExecutor(context); const result = await executor.execute(); @@ -1337,12 +1364,21 @@ describe('LoadRelatedRecordStepExecutor', () => { }); it('returns error outcome when saveStepExecution fails on user reject (Branch A)', async () => { - const execution = makePendingExecution(); + const execution = makePendingExecution({ + pendingData: { + displayName: 'Order', + name: 'order', + relatedCollectionName: 'orders', + suggestedFields: ['status', 'amount'], + selectedRecordId: [99], + userConfirmed: false, + }, + }); const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([execution]), saveStepExecution: jest.fn().mockRejectedValue(new Error('Disk full')), }); - const context = makeContext({ runStore, userConfirmed: false }); + const context = makeContext({ runStore }); const executor = new LoadRelatedRecordStepExecutor(context); const result = await executor.execute(); diff --git a/packages/workflow-executor/test/executors/mcp-task-step-executor.test.ts b/packages/workflow-executor/test/executors/mcp-task-step-executor.test.ts index 5a13735135..a7a3d6d51a 100644 --- a/packages/workflow-executor/test/executors/mcp-task-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/mcp-task-step-executor.test.ts @@ -314,12 +314,16 @@ describe('McpTaskStepExecutor', () => { const execution: McpTaskStepExecutionData = { type: 'mcp-task', stepIndex: 0, - pendingData: { name: 'send_notification', input: { message: 'Hello' } }, + pendingData: { + name: 'send_notification', + input: { message: 'Hello' }, + userConfirmed: true, + }, }; const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([execution]), }); - const context = makeContext({ runStore, userConfirmed: true }); + const context = makeContext({ runStore }); const executor = new McpTaskStepExecutor(context, [tool]); const result = await executor.execute(); @@ -332,7 +336,11 @@ describe('McpTaskStepExecutor', () => { type: 'mcp-task', executionParams: { name: 'send_notification', input: { message: 'Hello' } }, executionResult: { success: true, toolResult: 'email sent' }, - pendingData: { name: 'send_notification', input: { message: 'Hello' } }, + pendingData: { + name: 'send_notification', + input: { message: 'Hello' }, + userConfirmed: true, + }, }), ); }); @@ -349,12 +357,16 @@ describe('McpTaskStepExecutor', () => { const execution: McpTaskStepExecutionData = { type: 'mcp-task', stepIndex: 0, - pendingData: { name: 'send_notification', input: { message: 'Hello' } }, + pendingData: { + name: 'send_notification', + input: { message: 'Hello' }, + userConfirmed: false, + }, }; const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([execution]), }); - const context = makeContext({ runStore, userConfirmed: false }); + const context = makeContext({ runStore }); const executor = new McpTaskStepExecutor(context, [tool]); const result = await executor.execute(); @@ -365,12 +377,43 @@ describe('McpTaskStepExecutor', () => { 'run-1', expect.objectContaining({ executionResult: { skipped: true }, - pendingData: { name: 'send_notification', input: { message: 'Hello' } }, + pendingData: { + name: 'send_notification', + input: { message: 'Hello' }, + userConfirmed: false, + }, }), ); }); }); + describe('trigger before PATCH (Branch A)', () => { + it('re-emits awaiting-input when userConfirmed is not yet set in pendingData', async () => { + const invokeFn = jest.fn(); + const tool = new MockRemoteTool({ + name: 'send_notification', + sourceId: 'mcp-server-1', + invoke: invokeFn, + }); + const execution: McpTaskStepExecutionData = { + type: 'mcp-task', + stepIndex: 0, + pendingData: { name: 'send_notification', input: { message: 'Hello' } }, + }; + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([execution]), + }); + const context = makeContext({ runStore }); + const executor = new McpTaskStepExecutor(context, [tool]); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('awaiting-input'); + expect(invokeFn).not.toHaveBeenCalled(); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + }); + describe('mcpServerId filter', () => { it('passes only tools from the specified server to the AI', async () => { const toolA = new MockRemoteTool({ name: 'tool_a', sourceId: 'server-A' }); @@ -432,13 +475,13 @@ describe('McpTaskStepExecutor', () => { const execution: McpTaskStepExecutionData = { type: 'mcp-task', stepIndex: 0, - pendingData: { name: 'deleted_tool', input: {} }, + pendingData: { name: 'deleted_tool', input: {}, userConfirmed: true }, }; const tool = new MockRemoteTool({ name: 'other_tool', sourceId: 'mcp-server-1' }); const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([execution]), }); - const context = makeContext({ runStore, userConfirmed: true }); + const context = makeContext({ runStore }); const executor = new McpTaskStepExecutor(context, [tool]); const result = await executor.execute(); @@ -492,14 +535,18 @@ describe('McpTaskStepExecutor', () => { const execution: McpTaskStepExecutionData = { type: 'mcp-task', stepIndex: 0, - pendingData: { name: 'send_notification', input: { message: 'Hello' } }, + pendingData: { + name: 'send_notification', + input: { message: 'Hello' }, + userConfirmed: true, + }, }; const logger = { error: jest.fn() }; const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([execution]), saveStepExecution: jest.fn().mockRejectedValue(new Error('Disk full')), }); - const context = makeContext({ runStore, userConfirmed: true, logger }); + const context = makeContext({ runStore, logger }); const executor = new McpTaskStepExecutor(context, [tool]); const result = await executor.execute(); @@ -535,21 +582,6 @@ describe('McpTaskStepExecutor', () => { }); describe('no pending data in confirmation flow (Branch A)', () => { - it('returns error when no execution record is found', async () => { - const runStore = makeMockRunStore({ - getStepExecutions: jest.fn().mockResolvedValue([]), - }); - const context = makeContext({ runStore, userConfirmed: true }); - const executor = new McpTaskStepExecutor(context, []); - - await expect(executor.execute()).resolves.toMatchObject({ - stepOutcome: { - status: 'error', - error: 'An unexpected error occurred while processing this step.', - }, - }); - }); - it('returns error when execution exists but pendingData is absent', async () => { const execution: McpTaskStepExecutionData = { type: 'mcp-task', @@ -558,7 +590,7 @@ describe('McpTaskStepExecutor', () => { const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([execution]), }); - const context = makeContext({ runStore, userConfirmed: true }); + const context = makeContext({ runStore }); const executor = new McpTaskStepExecutor(context, []); await expect(executor.execute()).resolves.toMatchObject({ diff --git a/packages/workflow-executor/test/executors/trigger-record-action-step-executor.test.ts b/packages/workflow-executor/test/executors/trigger-record-action-step-executor.test.ts index c17fb4bfa3..fcf9b63a31 100644 --- a/packages/workflow-executor/test/executors/trigger-record-action-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/trigger-record-action-step-executor.test.ts @@ -203,14 +203,14 @@ describe('TriggerRecordActionStepExecutor', () => { pendingData: { displayName: 'Send Welcome Email', name: 'send-welcome-email', + userConfirmed: true, }, selectedRecordRef: makeRecordRef(), }; const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([execution]), }); - const userConfirmed = true; - const context = makeContext({ agentPort, runStore, userConfirmed }); + const context = makeContext({ agentPort, runStore }); const executor = new TriggerRecordActionStepExecutor(context); const result = await executor.execute(); @@ -230,10 +230,10 @@ describe('TriggerRecordActionStepExecutor', () => { name: 'send-welcome-email', }, executionResult: { success: true, actionResult: { message: 'Email sent' } }, - pendingData: { + pendingData: expect.objectContaining({ displayName: 'Send Welcome Email', name: 'send-welcome-email', - }, + }), }), ); }); @@ -248,14 +248,14 @@ describe('TriggerRecordActionStepExecutor', () => { pendingData: { displayName: 'Send Welcome Email', name: 'send-welcome-email', + userConfirmed: false, }, selectedRecordRef: makeRecordRef(), }; const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([execution]), }); - const userConfirmed = false; - const context = makeContext({ agentPort, runStore, userConfirmed }); + const context = makeContext({ agentPort, runStore }); const executor = new TriggerRecordActionStepExecutor(context); const result = await executor.execute(); @@ -266,63 +266,39 @@ describe('TriggerRecordActionStepExecutor', () => { 'run-1', expect.objectContaining({ executionResult: { skipped: true }, - pendingData: { + pendingData: expect.objectContaining({ displayName: 'Send Welcome Email', name: 'send-welcome-email', - }, + }), }), ); }); }); - describe('no pending action in confirmation flow (Branch A)', () => { - it('returns error outcome when no pending action is found', async () => { + describe('trigger before PATCH (Branch A)', () => { + it('re-emits awaiting-input when userConfirmed is not yet set in pendingData', async () => { + const agentPort = makeMockAgentPort(); + const execution: TriggerRecordActionStepExecutionData = { + type: 'trigger-action', + stepIndex: 0, + pendingData: { displayName: 'Send Welcome Email', name: 'send-welcome-email' }, + selectedRecordRef: makeRecordRef(), + }; const runStore = makeMockRunStore({ - getStepExecutions: jest.fn().mockResolvedValue([]), + getStepExecutions: jest.fn().mockResolvedValue([execution]), }); - const userConfirmed = true; - const context = makeContext({ runStore, userConfirmed }); + const context = makeContext({ agentPort, runStore }); const executor = new TriggerRecordActionStepExecutor(context); - await expect(executor.execute()).resolves.toMatchObject({ - stepOutcome: { - type: 'record-task', - stepId: 'trigger-1', - stepIndex: 0, - status: 'error', - error: 'An unexpected error occurred while processing this step.', - }, - }); - expect(runStore.saveStepExecution).not.toHaveBeenCalled(); - }); - - it('returns error outcome when execution exists but stepIndex does not match', async () => { - const runStore = makeMockRunStore({ - getStepExecutions: jest.fn().mockResolvedValue([ - { - type: 'trigger-action', - stepIndex: 5, - pendingData: { displayName: 'Send Welcome Email' }, - selectedRecordRef: makeRecordRef(), - }, - ]), - }); - const userConfirmed = true; - const context = makeContext({ runStore, userConfirmed }); - const executor = new TriggerRecordActionStepExecutor(context); + const result = await executor.execute(); - await expect(executor.execute()).resolves.toMatchObject({ - stepOutcome: { - type: 'record-task', - stepId: 'trigger-1', - stepIndex: 0, - status: 'error', - error: 'An unexpected error occurred while processing this step.', - }, - }); + expect(result.stepOutcome.status).toBe('awaiting-input'); + expect(agentPort.executeAction).not.toHaveBeenCalled(); expect(runStore.saveStepExecution).not.toHaveBeenCalled(); }); + }); + describe('no pending action in confirmation flow (Branch A)', () => { it('returns error outcome when execution exists but pendingData is absent', async () => { const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([ @@ -333,8 +309,7 @@ describe('TriggerRecordActionStepExecutor', () => { }, ]), }); - const userConfirmed = true; - const context = makeContext({ runStore, userConfirmed }); + const context = makeContext({ runStore }); const executor = new TriggerRecordActionStepExecutor(context); await expect(executor.execute()).resolves.toMatchObject({ @@ -446,14 +421,14 @@ describe('TriggerRecordActionStepExecutor', () => { pendingData: { displayName: 'Send Welcome Email', name: 'send-welcome-email', + userConfirmed: true, }, selectedRecordRef: makeRecordRef(), }; const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([execution]), }); - const userConfirmed = true; - const context = makeContext({ agentPort, runStore, userConfirmed }); + const context = makeContext({ agentPort, runStore }); const executor = new TriggerRecordActionStepExecutor(context); const result = await executor.execute(); @@ -497,14 +472,14 @@ describe('TriggerRecordActionStepExecutor', () => { pendingData: { displayName: 'Send Welcome Email', name: 'send-welcome-email', + userConfirmed: true, }, selectedRecordRef: makeRecordRef(), }; const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([execution]), }); - const userConfirmed = true; - const context = makeContext({ agentPort, runStore, userConfirmed }); + const context = makeContext({ agentPort, runStore }); const executor = new TriggerRecordActionStepExecutor(context); const result = await executor.execute(); @@ -766,8 +741,7 @@ describe('TriggerRecordActionStepExecutor', () => { const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockRejectedValue(new Error('DB timeout')), }); - const userConfirmed = true; - const context = makeContext({ runStore, userConfirmed }); + const context = makeContext({ runStore }); const executor = new TriggerRecordActionStepExecutor(context); const result = await executor.execute(); @@ -781,6 +755,7 @@ describe('TriggerRecordActionStepExecutor', () => { pendingData: { displayName: 'Send Welcome Email', name: 'send-welcome-email', + userConfirmed: false, }, selectedRecordRef: makeRecordRef(), }; @@ -788,8 +763,7 @@ describe('TriggerRecordActionStepExecutor', () => { getStepExecutions: jest.fn().mockResolvedValue([execution]), saveStepExecution: jest.fn().mockRejectedValue(new Error('Disk full')), }); - const userConfirmed = false; - const context = makeContext({ runStore, userConfirmed }); + const context = makeContext({ runStore }); const executor = new TriggerRecordActionStepExecutor(context); const result = await executor.execute(); @@ -830,6 +804,7 @@ describe('TriggerRecordActionStepExecutor', () => { pendingData: { displayName: 'Send Welcome Email', name: 'send-welcome-email', + userConfirmed: true, }, selectedRecordRef: makeRecordRef(), }; @@ -837,8 +812,7 @@ describe('TriggerRecordActionStepExecutor', () => { getStepExecutions: jest.fn().mockResolvedValue([execution]), saveStepExecution: jest.fn().mockRejectedValue(new Error('Disk full')), }); - const userConfirmed = true; - const context = makeContext({ runStore, userConfirmed }); + const context = makeContext({ runStore }); const executor = new TriggerRecordActionStepExecutor(context); const result = await executor.execute(); diff --git a/packages/workflow-executor/test/executors/update-record-step-executor.test.ts b/packages/workflow-executor/test/executors/update-record-step-executor.test.ts index 3e0447c07e..c4da5ef38c 100644 --- a/packages/workflow-executor/test/executors/update-record-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/update-record-step-executor.test.ts @@ -202,14 +202,18 @@ describe('UpdateRecordStepExecutor', () => { const execution: UpdateRecordStepExecutionData = { type: 'update-record', stepIndex: 0, - pendingData: { displayName: 'Status', name: 'status', value: 'active' }, + pendingData: { + displayName: 'Status', + name: 'status', + value: 'active', + userConfirmed: true, + }, selectedRecordRef: makeRecordRef(), }; const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([execution]), }); - const userConfirmed = true; - const context = makeContext({ agentPort, runStore, userConfirmed }); + const context = makeContext({ agentPort, runStore }); const executor = new UpdateRecordStepExecutor(context); const result = await executor.execute(); @@ -226,7 +230,12 @@ describe('UpdateRecordStepExecutor', () => { type: 'update-record', executionParams: { displayName: 'Status', name: 'status', value: 'active' }, executionResult: { updatedValues }, - pendingData: { displayName: 'Status', name: 'status', value: 'active' }, + pendingData: expect.objectContaining({ + displayName: 'Status', + name: 'status', + value: 'active', + userConfirmed: true, + }), }), ); }); @@ -238,14 +247,18 @@ describe('UpdateRecordStepExecutor', () => { const execution: UpdateRecordStepExecutionData = { type: 'update-record', stepIndex: 0, - pendingData: { displayName: 'Status', name: 'status', value: 'active' }, + pendingData: { + displayName: 'Status', + name: 'status', + value: 'active', + userConfirmed: false, + }, selectedRecordRef: makeRecordRef(), }; const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([execution]), }); - const userConfirmed = false; - const context = makeContext({ agentPort, runStore, userConfirmed }); + const context = makeContext({ agentPort, runStore }); const executor = new UpdateRecordStepExecutor(context); const result = await executor.execute(); @@ -256,60 +269,41 @@ describe('UpdateRecordStepExecutor', () => { 'run-1', expect.objectContaining({ executionResult: { skipped: true }, - pendingData: { displayName: 'Status', name: 'status', value: 'active' }, + pendingData: expect.objectContaining({ + displayName: 'Status', + name: 'status', + value: 'active', + userConfirmed: false, + }), }), ); }); }); - describe('no pending update in phase 2 (Branch A)', () => { - it('returns error outcome when no pending update is found', async () => { + describe('trigger before PATCH (Branch A)', () => { + it('re-emits awaiting-input when userConfirmed is not yet set in pendingData', async () => { + const agentPort = makeMockAgentPort(); + const execution: UpdateRecordStepExecutionData = { + type: 'update-record', + stepIndex: 0, + pendingData: { displayName: 'Status', name: 'status', value: 'active' }, + selectedRecordRef: makeRecordRef(), + }; const runStore = makeMockRunStore({ - getStepExecutions: jest.fn().mockResolvedValue([]), + getStepExecutions: jest.fn().mockResolvedValue([execution]), }); - const userConfirmed = true; - const context = makeContext({ runStore, userConfirmed }); + const context = makeContext({ agentPort, runStore }); const executor = new UpdateRecordStepExecutor(context); - await expect(executor.execute()).resolves.toMatchObject({ - stepOutcome: { - type: 'record-task', - stepId: 'update-1', - stepIndex: 0, - status: 'error', - error: 'An unexpected error occurred while processing this step.', - }, - }); - expect(runStore.saveStepExecution).not.toHaveBeenCalled(); - }); - - it('returns error outcome when execution exists but stepIndex does not match', async () => { - const runStore = makeMockRunStore({ - getStepExecutions: jest.fn().mockResolvedValue([ - { - type: 'update-record', - stepIndex: 5, - pendingData: { displayName: 'Status', name: 'status', value: 'active' }, - selectedRecordRef: makeRecordRef(), - }, - ]), - }); - const userConfirmed = true; - const context = makeContext({ runStore, userConfirmed }); - const executor = new UpdateRecordStepExecutor(context); + const result = await executor.execute(); - await expect(executor.execute()).resolves.toMatchObject({ - stepOutcome: { - type: 'record-task', - stepId: 'update-1', - stepIndex: 0, - status: 'error', - error: 'An unexpected error occurred while processing this step.', - }, - }); + expect(result.stepOutcome.status).toBe('awaiting-input'); + expect(agentPort.updateRecord).not.toHaveBeenCalled(); expect(runStore.saveStepExecution).not.toHaveBeenCalled(); }); + }); + describe('no pending update in phase 2 (Branch A)', () => { it('returns error outcome when execution exists but pendingData is absent', async () => { const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([ @@ -320,8 +314,7 @@ describe('UpdateRecordStepExecutor', () => { }, ]), }); - const userConfirmed = true; - const context = makeContext({ runStore, userConfirmed }); + const context = makeContext({ runStore }); const executor = new UpdateRecordStepExecutor(context); await expect(executor.execute()).resolves.toMatchObject({ @@ -594,14 +587,18 @@ describe('UpdateRecordStepExecutor', () => { const execution: UpdateRecordStepExecutionData = { type: 'update-record', stepIndex: 0, - pendingData: { displayName: 'Status', name: 'status', value: 'active' }, + pendingData: { + displayName: 'Status', + name: 'status', + value: 'active', + userConfirmed: true, + }, selectedRecordRef: makeRecordRef(), }; const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([execution]), }); - const userConfirmed = true; - const context = makeContext({ agentPort, runStore, userConfirmed }); + const context = makeContext({ agentPort, runStore }); const executor = new UpdateRecordStepExecutor(context); const result = await executor.execute(); @@ -642,14 +639,18 @@ describe('UpdateRecordStepExecutor', () => { const execution: UpdateRecordStepExecutionData = { type: 'update-record', stepIndex: 0, - pendingData: { displayName: 'Status', name: 'status', value: 'active' }, + pendingData: { + displayName: 'Status', + name: 'status', + value: 'active', + userConfirmed: true, + }, selectedRecordRef: makeRecordRef(), }; const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([execution]), }); - const userConfirmed = true; - const context = makeContext({ agentPort, runStore, userConfirmed }); + const context = makeContext({ agentPort, runStore }); const executor = new UpdateRecordStepExecutor(context); const result = await executor.execute(); @@ -746,8 +747,7 @@ describe('UpdateRecordStepExecutor', () => { const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockRejectedValue(new Error('DB timeout')), }); - const userConfirmed = true; - const context = makeContext({ runStore, userConfirmed }); + const context = makeContext({ runStore }); const executor = new UpdateRecordStepExecutor(context); const result = await executor.execute(); @@ -758,15 +758,19 @@ describe('UpdateRecordStepExecutor', () => { const execution: UpdateRecordStepExecutionData = { type: 'update-record', stepIndex: 0, - pendingData: { displayName: 'Status', name: 'status', value: 'active' }, + pendingData: { + displayName: 'Status', + name: 'status', + value: 'active', + userConfirmed: false, + }, selectedRecordRef: makeRecordRef(), }; const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([execution]), saveStepExecution: jest.fn().mockRejectedValue(new Error('Disk full')), }); - const userConfirmed = false; - const context = makeContext({ runStore, userConfirmed }); + const context = makeContext({ runStore }); const executor = new UpdateRecordStepExecutor(context); const result = await executor.execute(); diff --git a/packages/workflow-executor/test/http/executor-http-server.test.ts b/packages/workflow-executor/test/http/executor-http-server.test.ts index a910db9cb8..91baf9f892 100644 --- a/packages/workflow-executor/test/http/executor-http-server.test.ts +++ b/packages/workflow-executor/test/http/executor-http-server.test.ts @@ -352,6 +352,134 @@ describe('ExecutorHttpServer', () => { }); }); + describe('PATCH /runs/:runId/steps/:stepIndex/pending-data', () => { + it('returns 204 and merges userConfirmed:true into pendingData', async () => { + const existing = { + type: 'update-record' as const, + stepIndex: 2, + pendingData: { fieldName: 'status', value: 'active' }, + }; + const runStore = createMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([existing]), + saveStepExecution: jest.fn().mockResolvedValue(undefined), + }); + + const server = createServer({ runStore }); + const token = signToken({ id: 'user-1' }); + + const response = await request(server.callback) + .patch('/runs/run-1/steps/2/pending-data') + .set('Authorization', `Bearer ${token}`) + .send({ userConfirmed: true }); + + expect(response.status).toBe(204); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + pendingData: { fieldName: 'status', value: 'active', userConfirmed: true }, + }), + ); + }); + + it('returns 204 and merges userConfirmed:false into pendingData', async () => { + const existing = { + type: 'trigger-action' as const, + stepIndex: 0, + pendingData: { name: 'send_email', displayName: 'Send Email' }, + }; + const runStore = createMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([existing]), + saveStepExecution: jest.fn().mockResolvedValue(undefined), + }); + + const server = createServer({ runStore }); + const token = signToken({ id: 'user-1' }); + + const response = await request(server.callback) + .patch('/runs/run-1/steps/0/pending-data') + .set('Authorization', `Bearer ${token}`) + .send({ userConfirmed: false }); + + expect(response.status).toBe(204); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + pendingData: { name: 'send_email', displayName: 'Send Email', userConfirmed: false }, + }), + ); + }); + + it('returns 404 when step execution does not exist', async () => { + const runStore = createMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([]), + }); + + const server = createServer({ runStore }); + const token = signToken({ id: 'user-1' }); + + const response = await request(server.callback) + .patch('/runs/run-1/steps/0/pending-data') + .set('Authorization', `Bearer ${token}`) + .send({ userConfirmed: true }); + + expect(response.status).toBe(404); + expect(response.body).toEqual({ error: 'Step execution not found or has no pending data' }); + }); + + it('returns 404 when step execution has no pendingData', async () => { + const existing = { type: 'condition' as const, stepIndex: 1 }; + const runStore = createMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([existing]), + }); + + const server = createServer({ runStore }); + const token = signToken({ id: 'user-1' }); + + const response = await request(server.callback) + .patch('/runs/run-1/steps/1/pending-data') + .set('Authorization', `Bearer ${token}`) + .send({ userConfirmed: true }); + + expect(response.status).toBe(404); + expect(response.body).toEqual({ error: 'Step execution not found or has no pending data' }); + }); + + it('returns 400 when stepIndex is not a valid integer', async () => { + const server = createServer(); + const token = signToken({ id: 'user-1' }); + + const response = await request(server.callback) + .patch('/runs/run-1/steps/abc/pending-data') + .set('Authorization', `Bearer ${token}`) + .send({ userConfirmed: true }); + + expect(response.status).toBe(400); + expect(response.body).toEqual({ error: 'Invalid stepIndex' }); + }); + + it('returns 400 when userConfirmed is not a boolean', async () => { + const existing = { + type: 'update-record' as const, + stepIndex: 0, + pendingData: { fieldName: 'status', value: 'active' }, + }; + const runStore = createMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([existing]), + }); + + const server = createServer({ runStore }); + const token = signToken({ id: 'user-1' }); + + const response = await request(server.callback) + .patch('/runs/run-1/steps/0/pending-data') + .set('Authorization', `Bearer ${token}`) + .send({ userConfirmed: 'yes' }); + + expect(response.status).toBe(400); + expect(response.body).toEqual({ error: 'userConfirmed must be a boolean' }); + }); + }); + describe('start / stop', () => { it('should start and stop the server', async () => { const server = createServer(); From 50583e84efd421b8b2bfab1b2a5174fc7fdfbe4a Mon Sep 17 00:00:00 2001 From: Matthieu Date: Wed, 25 Mar 2026 12:09:42 +0100 Subject: [PATCH 14/18] feat(workflow-executor): add RunStore implementations (InMemoryStore + DatabaseStore) (#1506) --- packages/workflow-executor/package.json | 4 + packages/workflow-executor/src/index.ts | 4 + .../workflow-executor/src/ports/run-store.ts | 3 + packages/workflow-executor/src/runner.ts | 7 +- .../src/stores/build-run-store.ts | 29 + .../src/stores/database-store.ts | 126 + .../src/stores/in-memory-store.ts | 33 + .../test/executors/base-step-executor.test.ts | 2 + .../executors/condition-step-executor.test.ts | 2 + .../load-related-record-step-executor.test.ts | 26 +- .../executors/mcp-task-step-executor.test.ts | 46 +- .../read-record-step-executor.test.ts | 2 + ...rigger-record-action-step-executor.test.ts | 52 +- .../update-record-step-executor.test.ts | 50 +- .../test/http/executor-http-server.test.ts | 2 + .../workflow-executor/test/runner.test.ts | 26 +- .../test/stores/database-store.test.ts | 111 + .../test/stores/in-memory-store.test.ts | 81 + yarn.lock | 2998 ++++++++++------- 19 files changed, 2289 insertions(+), 1315 deletions(-) create mode 100644 packages/workflow-executor/src/stores/build-run-store.ts create mode 100644 packages/workflow-executor/src/stores/database-store.ts create mode 100644 packages/workflow-executor/src/stores/in-memory-store.ts create mode 100644 packages/workflow-executor/test/stores/database-store.test.ts create mode 100644 packages/workflow-executor/test/stores/in-memory-store.test.ts diff --git a/packages/workflow-executor/package.json b/packages/workflow-executor/package.json index 5f94e3052f..29a994a272 100644 --- a/packages/workflow-executor/package.json +++ b/packages/workflow-executor/package.json @@ -31,12 +31,16 @@ "jsonwebtoken": "^9.0.3", "koa": "^3.0.1", "koa-jwt": "^4.0.4", + "sequelize": "^6.37.8", + "umzug": "^3.8.2", "zod": "4.3.6" }, "devDependencies": { "@types/jsonwebtoken": "^9.0.10", "@types/koa": "^2.13.5", "@types/koa__router": "^12.0.4", + "@types/sequelize": "^6.12.0", + "sqlite3": "^6.0.1", "supertest": "^7.1.3" } } diff --git a/packages/workflow-executor/src/index.ts b/packages/workflow-executor/src/index.ts index 7077ae07da..e359dc4ac9 100644 --- a/packages/workflow-executor/src/index.ts +++ b/packages/workflow-executor/src/index.ts @@ -102,3 +102,7 @@ export type { ExecutorHttpServerOptions } from './http/executor-http-server'; export { default as Runner } from './runner'; export type { RunnerConfig } from './runner'; export { default as validateSecrets } from './validate-secrets'; +export { default as InMemoryStore } from './stores/in-memory-store'; +export { default as DatabaseStore } from './stores/database-store'; +export type { DatabaseStoreOptions } from './stores/database-store'; +export { buildDatabaseRunStore, buildInMemoryRunStore } from './stores/build-run-store'; diff --git a/packages/workflow-executor/src/ports/run-store.ts b/packages/workflow-executor/src/ports/run-store.ts index 426e826a9f..68f6078a39 100644 --- a/packages/workflow-executor/src/ports/run-store.ts +++ b/packages/workflow-executor/src/ports/run-store.ts @@ -1,8 +1,11 @@ /** @draft Types derived from the workflow-executor spec -- subject to change. */ +import type { Logger } from './logger-port'; import type { StepExecutionData } from '../types/step-execution-data'; export interface RunStore { + init(logger?: Logger): Promise; + close(logger?: Logger): Promise; getStepExecutions(runId: string): Promise; saveStepExecution(runId: string, stepExecution: StepExecutionData): Promise; } diff --git a/packages/workflow-executor/src/runner.ts b/packages/workflow-executor/src/runner.ts index 4dd16528bd..ccb423ff50 100644 --- a/packages/workflow-executor/src/runner.ts +++ b/packages/workflow-executor/src/runner.ts @@ -59,6 +59,8 @@ export default class Runner { this.isRunning = true; try { + await this.config.runStore.init(this.logger); + if (this.config.httpPort !== undefined && !this.httpServer) { const server = new ExecutorHttpServer({ port: this.config.httpPort, @@ -92,7 +94,10 @@ export default class Runner { this.httpServer = null; } - await this.config.aiClient.closeConnections(); + await Promise.allSettled([ + this.config.aiClient.closeConnections(), + this.config.runStore.close(this.logger), + ]); // TODO: graceful drain of in-flight steps (out of scope PRD-223) } diff --git a/packages/workflow-executor/src/stores/build-run-store.ts b/packages/workflow-executor/src/stores/build-run-store.ts new file mode 100644 index 0000000000..a081692ba1 --- /dev/null +++ b/packages/workflow-executor/src/stores/build-run-store.ts @@ -0,0 +1,29 @@ +import type { RunStore } from '../ports/run-store'; +import type { Options } from 'sequelize'; + +import { Sequelize } from 'sequelize'; + +import DatabaseStore from './database-store'; +import InMemoryStore from './in-memory-store'; + +export async function buildDatabaseRunStore(options: Options): Promise { + const sequelize = new Sequelize({ logging: false, ...options }); + + const store = new DatabaseStore({ sequelize }); + + try { + await store.init(); + } catch (error) { + await sequelize.close(); + throw error; + } + + return store; +} + +export async function buildInMemoryRunStore(): Promise { + const store = new InMemoryStore(); + await store.init(); + + return store; +} diff --git a/packages/workflow-executor/src/stores/database-store.ts b/packages/workflow-executor/src/stores/database-store.ts new file mode 100644 index 0000000000..1a28015b69 --- /dev/null +++ b/packages/workflow-executor/src/stores/database-store.ts @@ -0,0 +1,126 @@ +import type { Logger } from '../ports/logger-port'; +import type { RunStore } from '../ports/run-store'; +import type { StepExecutionData } from '../types/step-execution-data'; +import type { QueryInterface, Sequelize } from 'sequelize'; + +import { DataTypes } from 'sequelize'; +import { SequelizeStorage, Umzug } from 'umzug'; + +const TABLE_NAME = 'workflow_step_executions'; + +export interface DatabaseStoreOptions { + sequelize: Sequelize; +} + +export default class DatabaseStore implements RunStore { + private readonly sequelize: Sequelize; + + constructor(options: DatabaseStoreOptions) { + this.sequelize = options.sequelize; + } + + async init(logger?: Logger): Promise { + const umzug = new Umzug({ + migrations: [ + { + name: '001_create_workflow_step_executions', + up: async ({ context }: { context: QueryInterface }) => { + await context.createTable(TABLE_NAME, { + id: { + type: DataTypes.INTEGER, + primaryKey: true, + autoIncrement: true, + }, + runId: { + type: DataTypes.STRING(255), + allowNull: false, + field: 'run_id', + }, + stepIndex: { + type: DataTypes.INTEGER, + allowNull: false, + field: 'step_index', + }, + data: { + type: DataTypes.JSON, + allowNull: false, + }, + createdAt: { + type: DataTypes.DATE, + allowNull: false, + defaultValue: DataTypes.NOW, + field: 'created_at', + }, + updatedAt: { + type: DataTypes.DATE, + allowNull: false, + defaultValue: DataTypes.NOW, + field: 'updated_at', + }, + }); + + await context.addIndex(TABLE_NAME, ['run_id'], { name: 'idx_run_id' }); + await context.addIndex(TABLE_NAME, ['run_id', 'step_index'], { + unique: true, + name: 'idx_run_id_step_index', + }); + }, + down: async ({ context }: { context: QueryInterface }) => { + await context.dropTable(TABLE_NAME); + }, + }, + ], + context: this.sequelize.getQueryInterface(), + storage: new SequelizeStorage({ sequelize: this.sequelize }), + logger: undefined, + }); + + try { + await umzug.up(); + } catch (error) { + logger?.error('Database migration failed', { + error: error instanceof Error ? error.message : String(error), + }); + throw error; + } + } + + async getStepExecutions(runId: string): Promise { + const [rows] = await this.sequelize.query( + `SELECT data FROM ${TABLE_NAME} WHERE run_id = :runId ORDER BY step_index ASC`, + { replacements: { runId } }, + ); + + return (rows as Array<{ data: string | StepExecutionData }>).map(row => + typeof row.data === 'string' ? JSON.parse(row.data) : row.data, + ); + } + + async saveStepExecution(runId: string, stepExecution: StepExecutionData): Promise { + await this.sequelize.transaction(async transaction => { + const now = new Date(); + const data = JSON.stringify(stepExecution); + const replacements = { runId, stepIndex: stepExecution.stepIndex, data, now }; + + // Delete + insert in transaction: dialect-agnostic upsert (avoids ON CONFLICT / ON DUPLICATE) + await this.sequelize.query( + `DELETE FROM ${TABLE_NAME} WHERE run_id = :runId AND step_index = :stepIndex`, + { replacements, transaction }, + ); + await this.sequelize.query( + `INSERT INTO ${TABLE_NAME} (run_id, step_index, data, created_at, updated_at) VALUES (:runId, :stepIndex, :data, :now, :now)`, + { replacements, transaction }, + ); + }); + } + + async close(logger?: Logger): Promise { + try { + await this.sequelize.close(); + } catch (error) { + logger?.error('Failed to close database connection', { + error: error instanceof Error ? error.message : String(error), + }); + } + } +} diff --git a/packages/workflow-executor/src/stores/in-memory-store.ts b/packages/workflow-executor/src/stores/in-memory-store.ts new file mode 100644 index 0000000000..8d8aab5714 --- /dev/null +++ b/packages/workflow-executor/src/stores/in-memory-store.ts @@ -0,0 +1,33 @@ +import type { RunStore } from '../ports/run-store'; +import type { StepExecutionData } from '../types/step-execution-data'; + +export default class InMemoryStore implements RunStore { + private readonly data = new Map>(); + + async init(): Promise { + // No-op: in-memory store requires no initialization + } + + async close(): Promise { + // No-op: nothing to clean up + } + + async getStepExecutions(runId: string): Promise { + const runData = this.data.get(runId); + + if (!runData) return []; + + return [...runData.values()].sort((a, b) => a.stepIndex - b.stepIndex); + } + + async saveStepExecution(runId: string, stepExecution: StepExecutionData): Promise { + let runData = this.data.get(runId); + + if (!runData) { + runData = new Map(); + this.data.set(runId, runData); + } + + runData.set(stepExecution.stepIndex, stepExecution); + } +} diff --git a/packages/workflow-executor/test/executors/base-step-executor.test.ts b/packages/workflow-executor/test/executors/base-step-executor.test.ts index 8c3f5c2543..caabca5fed 100644 --- a/packages/workflow-executor/test/executors/base-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/base-step-executor.test.ts @@ -78,6 +78,8 @@ function makeHistoryEntry( function makeMockRunStore(stepExecutions: StepExecutionData[] = []): RunStore { return { + init: jest.fn().mockResolvedValue(undefined), + close: jest.fn().mockResolvedValue(undefined), getStepExecutions: jest.fn().mockResolvedValue(stepExecutions), saveStepExecution: jest.fn().mockResolvedValue(undefined), }; diff --git a/packages/workflow-executor/test/executors/condition-step-executor.test.ts b/packages/workflow-executor/test/executors/condition-step-executor.test.ts index 696b200ab4..22520661ed 100644 --- a/packages/workflow-executor/test/executors/condition-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/condition-step-executor.test.ts @@ -18,6 +18,8 @@ function makeStep(overrides: Partial = {}): ConditionSt function makeMockRunStore(overrides: Partial = {}): RunStore { return { + init: jest.fn().mockResolvedValue(undefined), + close: jest.fn().mockResolvedValue(undefined), getStepExecutions: jest.fn().mockResolvedValue([]), saveStepExecution: jest.fn().mockResolvedValue(undefined), ...overrides, diff --git a/packages/workflow-executor/test/executors/load-related-record-step-executor.test.ts b/packages/workflow-executor/test/executors/load-related-record-step-executor.test.ts index 6ac150c2e3..5e168b42dd 100644 --- a/packages/workflow-executor/test/executors/load-related-record-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/load-related-record-step-executor.test.ts @@ -67,6 +67,8 @@ function makeCollectionSchema(overrides: Partial = {}): Collec function makeMockRunStore(overrides: Partial = {}): RunStore { return { + init: jest.fn().mockResolvedValue(undefined), + close: jest.fn().mockResolvedValue(undefined), getStepExecutions: jest.fn().mockResolvedValue([]), saveStepExecution: jest.fn().mockResolvedValue(undefined), ...overrides, @@ -743,8 +745,8 @@ describe('LoadRelatedRecordStepExecutor', () => { displayName: 'Order', name: 'order', relatedCollectionName: 'orders', - suggestedFields: ['status', 'amount'], selectedRecordId: [99], + suggestedFields: ['status', 'amount'], userConfirmed: true, }, }); @@ -817,8 +819,8 @@ describe('LoadRelatedRecordStepExecutor', () => { displayName: 'Order', name: 'order', relatedCollectionName: 'orders', - suggestedFields: ['status', 'amount'], selectedRecordId: [99], + suggestedFields: ['status', 'amount'], userConfirmed: false, }, }); @@ -842,25 +844,21 @@ describe('LoadRelatedRecordStepExecutor', () => { }); }); - describe('trigger before PATCH (Branch A)', () => { - it('re-emits awaiting-input when userConfirmed is not yet set in pendingData', async () => { - const agentPort = makeMockAgentPort(); - const execution = makePendingExecution(); // pendingData has no userConfirmed + describe('no pending data in confirmation flow (Branch A)', () => { + it('falls through to first-call path when no execution record is found', async () => { const runStore = makeMockRunStore({ - getStepExecutions: jest.fn().mockResolvedValue([execution]), + init: jest.fn().mockResolvedValue(undefined), + close: jest.fn().mockResolvedValue(undefined), + getStepExecutions: jest.fn().mockResolvedValue([]), }); - const context = makeContext({ agentPort, runStore }); + const context = makeContext({ runStore }); const executor = new LoadRelatedRecordStepExecutor(context); const result = await executor.execute(); expect(result.stepOutcome.status).toBe('awaiting-input'); - expect(agentPort.getRelatedData).not.toHaveBeenCalled(); - expect(runStore.saveStepExecution).not.toHaveBeenCalled(); }); - }); - describe('no pending data in confirmation flow (Branch A)', () => { it('returns error outcome when execution exists but pendingData is absent', async () => { const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([ @@ -1004,8 +1002,8 @@ describe('LoadRelatedRecordStepExecutor', () => { displayName: 'Order', name: 'order', relatedCollectionName: 'orders', - suggestedFields: ['status', 'amount'], selectedRecordId: [99], + suggestedFields: ['status', 'amount'], userConfirmed: true, }, }); @@ -1369,8 +1367,8 @@ describe('LoadRelatedRecordStepExecutor', () => { displayName: 'Order', name: 'order', relatedCollectionName: 'orders', - suggestedFields: ['status', 'amount'], selectedRecordId: [99], + suggestedFields: ['status', 'amount'], userConfirmed: false, }, }); diff --git a/packages/workflow-executor/test/executors/mcp-task-step-executor.test.ts b/packages/workflow-executor/test/executors/mcp-task-step-executor.test.ts index a7a3d6d51a..81b74c5749 100644 --- a/packages/workflow-executor/test/executors/mcp-task-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/mcp-task-step-executor.test.ts @@ -40,6 +40,8 @@ function makeStep(overrides: Partial = {}): McpTaskStepDe function makeMockRunStore(overrides: Partial = {}): RunStore { return { + init: jest.fn().mockResolvedValue(undefined), + close: jest.fn().mockResolvedValue(undefined), getStepExecutions: jest.fn().mockResolvedValue([]), saveStepExecution: jest.fn().mockResolvedValue(undefined), ...overrides, @@ -387,33 +389,6 @@ describe('McpTaskStepExecutor', () => { }); }); - describe('trigger before PATCH (Branch A)', () => { - it('re-emits awaiting-input when userConfirmed is not yet set in pendingData', async () => { - const invokeFn = jest.fn(); - const tool = new MockRemoteTool({ - name: 'send_notification', - sourceId: 'mcp-server-1', - invoke: invokeFn, - }); - const execution: McpTaskStepExecutionData = { - type: 'mcp-task', - stepIndex: 0, - pendingData: { name: 'send_notification', input: { message: 'Hello' } }, - }; - const runStore = makeMockRunStore({ - getStepExecutions: jest.fn().mockResolvedValue([execution]), - }); - const context = makeContext({ runStore }); - const executor = new McpTaskStepExecutor(context, [tool]); - - const result = await executor.execute(); - - expect(result.stepOutcome.status).toBe('awaiting-input'); - expect(invokeFn).not.toHaveBeenCalled(); - expect(runStore.saveStepExecution).not.toHaveBeenCalled(); - }); - }); - describe('mcpServerId filter', () => { it('passes only tools from the specified server to the AI', async () => { const toolA = new MockRemoteTool({ name: 'tool_a', sourceId: 'server-A' }); @@ -582,6 +557,23 @@ describe('McpTaskStepExecutor', () => { }); describe('no pending data in confirmation flow (Branch A)', () => { + it('falls through to first-call path when no execution record is found', async () => { + const runStore = makeMockRunStore({ + init: jest.fn().mockResolvedValue(undefined), + close: jest.fn().mockResolvedValue(undefined), + getStepExecutions: jest.fn().mockResolvedValue([]), + }); + const context = makeContext({ runStore }); + const executor = new McpTaskStepExecutor(context, []); + + await expect(executor.execute()).resolves.toMatchObject({ + stepOutcome: { + status: 'error', + error: 'No tools are available to execute this step.', + }, + }); + }); + it('returns error when execution exists but pendingData is absent', async () => { const execution: McpTaskStepExecutionData = { type: 'mcp-task', diff --git a/packages/workflow-executor/test/executors/read-record-step-executor.test.ts b/packages/workflow-executor/test/executors/read-record-step-executor.test.ts index f565b7525f..7acf003c1a 100644 --- a/packages/workflow-executor/test/executors/read-record-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/read-record-step-executor.test.ts @@ -60,6 +60,8 @@ function makeCollectionSchema(overrides: Partial = {}): Collec function makeMockRunStore(overrides: Partial = {}): RunStore { return { + init: jest.fn().mockResolvedValue(undefined), + close: jest.fn().mockResolvedValue(undefined), getStepExecutions: jest.fn().mockResolvedValue([]), saveStepExecution: jest.fn().mockResolvedValue(undefined), ...overrides, diff --git a/packages/workflow-executor/test/executors/trigger-record-action-step-executor.test.ts b/packages/workflow-executor/test/executors/trigger-record-action-step-executor.test.ts index fcf9b63a31..6adc26d1ed 100644 --- a/packages/workflow-executor/test/executors/trigger-record-action-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/trigger-record-action-step-executor.test.ts @@ -55,6 +55,8 @@ function makeCollectionSchema(overrides: Partial = {}): Collec function makeMockRunStore(overrides: Partial = {}): RunStore { return { + init: jest.fn().mockResolvedValue(undefined), + close: jest.fn().mockResolvedValue(undefined), getStepExecutions: jest.fn().mockResolvedValue([]), saveStepExecution: jest.fn().mockResolvedValue(undefined), ...overrides, @@ -230,10 +232,11 @@ describe('TriggerRecordActionStepExecutor', () => { name: 'send-welcome-email', }, executionResult: { success: true, actionResult: { message: 'Email sent' } }, - pendingData: expect.objectContaining({ + pendingData: { displayName: 'Send Welcome Email', name: 'send-welcome-email', - }), + userConfirmed: true, + }, }), ); }); @@ -266,39 +269,50 @@ describe('TriggerRecordActionStepExecutor', () => { 'run-1', expect.objectContaining({ executionResult: { skipped: true }, - pendingData: expect.objectContaining({ + pendingData: { displayName: 'Send Welcome Email', name: 'send-welcome-email', - }), + userConfirmed: false, + }, }), ); }); }); - describe('trigger before PATCH (Branch A)', () => { - it('re-emits awaiting-input when userConfirmed is not yet set in pendingData', async () => { - const agentPort = makeMockAgentPort(); - const execution: TriggerRecordActionStepExecutionData = { - type: 'trigger-action', - stepIndex: 0, - pendingData: { displayName: 'Send Welcome Email', name: 'send-welcome-email' }, - selectedRecordRef: makeRecordRef(), - }; + describe('no pending action in confirmation flow (Branch A)', () => { + it('falls through to first-call path when no pending action is found', async () => { const runStore = makeMockRunStore({ - getStepExecutions: jest.fn().mockResolvedValue([execution]), + init: jest.fn().mockResolvedValue(undefined), + close: jest.fn().mockResolvedValue(undefined), + getStepExecutions: jest.fn().mockResolvedValue([]), }); - const context = makeContext({ agentPort, runStore }); + const context = makeContext({ runStore }); + const executor = new TriggerRecordActionStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('awaiting-input'); + }); + + it('falls through to first-call path when execution exists but stepIndex does not match', async () => { + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([ + { + type: 'trigger-action', + stepIndex: 5, + pendingData: { displayName: 'Send Welcome Email' }, + selectedRecordRef: makeRecordRef(), + }, + ]), + }); + const context = makeContext({ runStore }); const executor = new TriggerRecordActionStepExecutor(context); const result = await executor.execute(); expect(result.stepOutcome.status).toBe('awaiting-input'); - expect(agentPort.executeAction).not.toHaveBeenCalled(); - expect(runStore.saveStepExecution).not.toHaveBeenCalled(); }); - }); - describe('no pending action in confirmation flow (Branch A)', () => { it('returns error outcome when execution exists but pendingData is absent', async () => { const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([ diff --git a/packages/workflow-executor/test/executors/update-record-step-executor.test.ts b/packages/workflow-executor/test/executors/update-record-step-executor.test.ts index c4da5ef38c..29ff8a4691 100644 --- a/packages/workflow-executor/test/executors/update-record-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/update-record-step-executor.test.ts @@ -60,6 +60,8 @@ function makeCollectionSchema(overrides: Partial = {}): Collec function makeMockRunStore(overrides: Partial = {}): RunStore { return { + init: jest.fn().mockResolvedValue(undefined), + close: jest.fn().mockResolvedValue(undefined), getStepExecutions: jest.fn().mockResolvedValue([]), saveStepExecution: jest.fn().mockResolvedValue(undefined), ...overrides, @@ -230,12 +232,12 @@ describe('UpdateRecordStepExecutor', () => { type: 'update-record', executionParams: { displayName: 'Status', name: 'status', value: 'active' }, executionResult: { updatedValues }, - pendingData: expect.objectContaining({ + pendingData: { displayName: 'Status', name: 'status', value: 'active', userConfirmed: true, - }), + }, }), ); }); @@ -269,41 +271,51 @@ describe('UpdateRecordStepExecutor', () => { 'run-1', expect.objectContaining({ executionResult: { skipped: true }, - pendingData: expect.objectContaining({ + pendingData: { displayName: 'Status', name: 'status', value: 'active', userConfirmed: false, - }), + }, }), ); }); }); - describe('trigger before PATCH (Branch A)', () => { - it('re-emits awaiting-input when userConfirmed is not yet set in pendingData', async () => { - const agentPort = makeMockAgentPort(); - const execution: UpdateRecordStepExecutionData = { - type: 'update-record', - stepIndex: 0, - pendingData: { displayName: 'Status', name: 'status', value: 'active' }, - selectedRecordRef: makeRecordRef(), - }; + describe('no pending update in phase 2 (Branch A)', () => { + it('falls through to first-call path when no pending update is found', async () => { const runStore = makeMockRunStore({ - getStepExecutions: jest.fn().mockResolvedValue([execution]), + init: jest.fn().mockResolvedValue(undefined), + close: jest.fn().mockResolvedValue(undefined), + getStepExecutions: jest.fn().mockResolvedValue([]), }); - const context = makeContext({ agentPort, runStore }); + const context = makeContext({ runStore }); + const executor = new UpdateRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('awaiting-input'); + }); + + it('falls through to first-call path when execution exists but stepIndex does not match', async () => { + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([ + { + type: 'update-record', + stepIndex: 5, + pendingData: { displayName: 'Status', name: 'status', value: 'active' }, + selectedRecordRef: makeRecordRef(), + }, + ]), + }); + const context = makeContext({ runStore }); const executor = new UpdateRecordStepExecutor(context); const result = await executor.execute(); expect(result.stepOutcome.status).toBe('awaiting-input'); - expect(agentPort.updateRecord).not.toHaveBeenCalled(); - expect(runStore.saveStepExecution).not.toHaveBeenCalled(); }); - }); - describe('no pending update in phase 2 (Branch A)', () => { it('returns error outcome when execution exists but pendingData is absent', async () => { const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([ diff --git a/packages/workflow-executor/test/http/executor-http-server.test.ts b/packages/workflow-executor/test/http/executor-http-server.test.ts index 91baf9f892..dca12e7c26 100644 --- a/packages/workflow-executor/test/http/executor-http-server.test.ts +++ b/packages/workflow-executor/test/http/executor-http-server.test.ts @@ -16,6 +16,8 @@ function signToken(payload: object, secret = AUTH_SECRET, options?: jsonwebtoken function createMockRunStore(overrides: Partial = {}): RunStore { return { + init: jest.fn().mockResolvedValue(undefined), + close: jest.fn().mockResolvedValue(undefined), getStepExecutions: jest.fn().mockResolvedValue([]), saveStepExecution: jest.fn().mockResolvedValue(undefined), ...overrides, diff --git a/packages/workflow-executor/test/runner.test.ts b/packages/workflow-executor/test/runner.test.ts index 57b1aa8117..92c595768b 100644 --- a/packages/workflow-executor/test/runner.test.ts +++ b/packages/workflow-executor/test/runner.test.ts @@ -75,7 +75,12 @@ function createRunnerConfig( return { agentPort: {} as AgentPort, workflowPort: createMockWorkflowPort(), - runStore: {} as RunStore, + runStore: { + init: jest.fn().mockResolvedValue(undefined), + close: jest.fn().mockResolvedValue(undefined), + getStepExecutions: jest.fn().mockResolvedValue([]), + saveStepExecution: jest.fn().mockResolvedValue(undefined), + } as unknown as RunStore, pollingIntervalMs: POLLING_INTERVAL_MS, aiClient: createMockAiClient() as unknown as AiClient, logger: createMockLogger(), @@ -189,6 +194,15 @@ describe('start', () => { expect(MockedExecutorHttpServer).toHaveBeenCalledTimes(1); }); + it('should call runStore.init() on start', async () => { + const config = createRunnerConfig(); + runner = new Runner(config); + + await runner.start(); + + expect(config.runStore.init).toHaveBeenCalledTimes(1); + }); + it('should throw ConfigurationError when envSecret is invalid', async () => { runner = new Runner(createRunnerConfig({ envSecret: 'bad' })); @@ -214,6 +228,16 @@ describe('stop', () => { expect(MockedExecutorHttpServer.prototype.stop).toHaveBeenCalled(); }); + it('should call runStore.close() on stop', async () => { + const config = createRunnerConfig(); + runner = new Runner(config); + + await runner.start(); + await runner.stop(); + + expect(config.runStore.close).toHaveBeenCalledTimes(1); + }); + it('should handle stop when no HTTP server is running', async () => { runner = new Runner(createRunnerConfig()); diff --git a/packages/workflow-executor/test/stores/database-store.test.ts b/packages/workflow-executor/test/stores/database-store.test.ts new file mode 100644 index 0000000000..21050e9125 --- /dev/null +++ b/packages/workflow-executor/test/stores/database-store.test.ts @@ -0,0 +1,111 @@ +import type { StepExecutionData } from '../../src/types/step-execution-data'; + +import { Sequelize } from 'sequelize'; + +import DatabaseStore from '../../src/stores/database-store'; + +function makeStepExecution(overrides: Partial = {}): StepExecutionData { + return { + type: 'condition', + stepIndex: 0, + executionParams: { answer: 'yes' }, + ...overrides, + } as StepExecutionData; +} + +describe('DatabaseStore (SQLite)', () => { + let sequelize: Sequelize; + let store: DatabaseStore; + + beforeEach(async () => { + sequelize = new Sequelize({ dialect: 'sqlite', storage: ':memory:', logging: false }); + store = new DatabaseStore({ sequelize }); + await store.init(); + }); + + afterEach(async () => { + await store.close(); + }); + + it('returns empty array for unknown runId', async () => { + const result = await store.getStepExecutions('unknown'); + expect(result).toEqual([]); + }); + + it('saves and retrieves a step execution', async () => { + const step = makeStepExecution({ stepIndex: 0 }); + await store.saveStepExecution('run-1', step); + + const result = await store.getStepExecutions('run-1'); + expect(result).toEqual([step]); + }); + + it('saves multiple steps for the same run', async () => { + const step0 = makeStepExecution({ stepIndex: 0 }); + const step1 = makeStepExecution({ stepIndex: 1, type: 'read-record' } as never); + + await store.saveStepExecution('run-1', step0); + await store.saveStepExecution('run-1', step1); + + const result = await store.getStepExecutions('run-1'); + expect(result).toHaveLength(2); + expect(result[0]).toEqual(step0); + expect(result[1]).toEqual(step1); + }); + + it('returns steps ordered by stepIndex', async () => { + await store.saveStepExecution('run-1', makeStepExecution({ stepIndex: 2 })); + await store.saveStepExecution('run-1', makeStepExecution({ stepIndex: 0 })); + await store.saveStepExecution('run-1', makeStepExecution({ stepIndex: 1 })); + + const result = await store.getStepExecutions('run-1'); + expect(result.map(s => s.stepIndex)).toEqual([0, 1, 2]); + }); + + it('overwrites step execution with the same stepIndex (upsert)', async () => { + const original = makeStepExecution({ stepIndex: 0 }); + const updated = makeStepExecution({ + stepIndex: 0, + executionParams: { answer: 'no' }, + } as never); + + await store.saveStepExecution('run-1', original); + await store.saveStepExecution('run-1', updated); + + const result = await store.getStepExecutions('run-1'); + expect(result).toHaveLength(1); + expect(result[0]).toEqual(updated); + }); + + it('isolates data between different runIds', async () => { + const step1 = makeStepExecution({ stepIndex: 0 }); + const step2 = makeStepExecution({ stepIndex: 0, type: 'read-record' } as never); + + await store.saveStepExecution('run-1', step1); + await store.saveStepExecution('run-2', step2); + + expect(await store.getStepExecutions('run-1')).toEqual([step1]); + expect(await store.getStepExecutions('run-2')).toEqual([step2]); + }); + + it('preserves complex nested JSON data', async () => { + const step: StepExecutionData = { + type: 'update-record', + stepIndex: 0, + executionParams: { displayName: 'Status', name: 'status', value: 'active' }, + executionResult: { updatedValues: { status: 'active', nested: { deep: true } } }, + pendingData: { displayName: 'Status', name: 'status', value: 'active' }, + selectedRecordRef: { collectionName: 'users', recordId: ['42'], stepIndex: 0 }, + }; + + await store.saveStepExecution('run-1', step); + + const result = await store.getStepExecutions('run-1'); + expect(result[0]).toEqual(step); + }); + + it('runs init idempotently', async () => { + // Running init a second time should not fail + await expect(store.init()).resolves.toBeUndefined(); + }); +}); diff --git a/packages/workflow-executor/test/stores/in-memory-store.test.ts b/packages/workflow-executor/test/stores/in-memory-store.test.ts new file mode 100644 index 0000000000..61f96f22ab --- /dev/null +++ b/packages/workflow-executor/test/stores/in-memory-store.test.ts @@ -0,0 +1,81 @@ +import type { StepExecutionData } from '../../src/types/step-execution-data'; + +import InMemoryStore from '../../src/stores/in-memory-store'; + +function makeStepExecution(overrides: Partial = {}): StepExecutionData { + return { + type: 'condition', + stepIndex: 0, + executionParams: { answer: 'yes' }, + ...overrides, + } as StepExecutionData; +} + +describe('InMemoryStore', () => { + let store: InMemoryStore; + + beforeEach(() => { + store = new InMemoryStore(); + }); + + it('returns empty array for unknown runId', async () => { + const result = await store.getStepExecutions('unknown'); + expect(result).toEqual([]); + }); + + it('saves and retrieves a step execution', async () => { + const step = makeStepExecution({ stepIndex: 0 }); + await store.saveStepExecution('run-1', step); + + const result = await store.getStepExecutions('run-1'); + expect(result).toEqual([step]); + }); + + it('saves multiple steps for the same run', async () => { + const step0 = makeStepExecution({ stepIndex: 0 }); + const step1 = makeStepExecution({ stepIndex: 1, type: 'read-record' } as never); + + await store.saveStepExecution('run-1', step0); + await store.saveStepExecution('run-1', step1); + + const result = await store.getStepExecutions('run-1'); + expect(result).toHaveLength(2); + expect(result).toContainEqual(step0); + expect(result).toContainEqual(step1); + }); + + it('returns steps ordered by stepIndex even when inserted out of order', async () => { + await store.saveStepExecution('run-1', makeStepExecution({ stepIndex: 2 })); + await store.saveStepExecution('run-1', makeStepExecution({ stepIndex: 0 })); + await store.saveStepExecution('run-1', makeStepExecution({ stepIndex: 1 })); + + const result = await store.getStepExecutions('run-1'); + expect(result.map(s => s.stepIndex)).toEqual([0, 1, 2]); + }); + + it('overwrites step execution with the same stepIndex (upsert)', async () => { + const original = makeStepExecution({ stepIndex: 0 }); + const updated = makeStepExecution({ + stepIndex: 0, + executionParams: { answer: 'no' }, + } as never); + + await store.saveStepExecution('run-1', original); + await store.saveStepExecution('run-1', updated); + + const result = await store.getStepExecutions('run-1'); + expect(result).toHaveLength(1); + expect(result[0]).toEqual(updated); + }); + + it('isolates data between different runIds', async () => { + const step1 = makeStepExecution({ stepIndex: 0 }); + const step2 = makeStepExecution({ stepIndex: 0, type: 'read-record' } as never); + + await store.saveStepExecution('run-1', step1); + await store.saveStepExecution('run-2', step2); + + expect(await store.getStepExecutions('run-1')).toEqual([step1]); + expect(await store.getStepExecutions('run-2')).toEqual([step2]); + }); +}); diff --git a/yarn.lock b/yarn.lock index 114e6c834d..68971371dc 100644 --- a/yarn.lock +++ b/yarn.lock @@ -7,6 +7,34 @@ resolved "https://registry.yarnpkg.com/@aashutoshrathi/word-wrap/-/word-wrap-1.2.6.tgz#bd9154aec9983f77b3a034ecaa015c2e4201f6cf" integrity sha512-1Yjs2SvM8TflER/OD3cOjhWWOZb58A2t7wpE2S9XfBYTiIl+XFhQG2bjy4Pu1I+EAlCNUzRDYDdFwFYUKvXcIA== +"@actions/core@^3.0.0": + version "3.0.0" + resolved "https://registry.yarnpkg.com/@actions/core/-/core-3.0.0.tgz#89cb07c119e9b46a649ad5f355e77de9b3108cf8" + integrity sha512-zYt6cz+ivnTmiT/ksRVriMBOiuoUpDCJJlZ5KPl2/FRdvwU3f7MPh9qftvbkXJThragzUZieit2nyHUyw53Seg== + dependencies: + "@actions/exec" "^3.0.0" + "@actions/http-client" "^4.0.0" + +"@actions/exec@^3.0.0": + version "3.0.0" + resolved "https://registry.yarnpkg.com/@actions/exec/-/exec-3.0.0.tgz#8c3464d20f0aa4068707757021d7e3c01a7ee203" + integrity sha512-6xH/puSoNBXb72VPlZVm7vQ+svQpFyA96qdDBvhB8eNZOE8LtPf9L4oAsfzK/crCL8YZ+19fKYVnM63Sl+Xzlw== + dependencies: + "@actions/io" "^3.0.2" + +"@actions/http-client@^4.0.0": + version "4.0.0" + resolved "https://registry.yarnpkg.com/@actions/http-client/-/http-client-4.0.0.tgz#f9754133c22802466482bf96321d42f2dba1fc82" + integrity sha512-QuwPsgVMsD6qaPD57GLZi9sqzAZCtiJT8kVBCDpLtxhL5MydQ4gS+DrejtZZPdIYyB1e95uCK9Luyds7ybHI3g== + dependencies: + tunnel "^0.0.6" + undici "^6.23.0" + +"@actions/io@^3.0.2": + version "3.0.2" + resolved "https://registry.yarnpkg.com/@actions/io/-/io-3.0.2.tgz#6f89b27a159d109836d983efa283997c23b92284" + integrity sha512-nRBchcMM+QK1pdjO7/idu86rbJI5YHUKCvKs0KxnSYbVe3F51UfGxuZX4Qy/fWlp6l7gWFwIkrOzN+oUK03kfw== + "@ampproject/remapping@^2.2.0": version "2.2.1" resolved "https://registry.yarnpkg.com/@ampproject/remapping/-/remapping-2.2.1.tgz#99e8e11851128b8702cd57c33684f1d0f260b630" @@ -660,13 +688,13 @@ "@smithy/types" "^4.12.1" tslib "^2.6.2" -"@aws-sdk/xml-builder@^3.972.6": - version "3.972.9" - resolved "https://registry.yarnpkg.com/@aws-sdk/xml-builder/-/xml-builder-3.972.9.tgz#38a43a0a860c4c73100d727e5b28c43339597b50" - integrity sha512-ItnlMgSqkPrUfJs7EsvU/01zw5UeIb2tNPhD09LBLHbg+g+HDiKibSLwpkuz/ZIlz4F2IMn+5XgE4AK/pfPuog== +"@aws-sdk/xml-builder@>=3.972.9", "@aws-sdk/xml-builder@^3.972.6": + version "3.972.15" + resolved "https://registry.yarnpkg.com/@aws-sdk/xml-builder/-/xml-builder-3.972.15.tgz#7cbc823f8eb11fa8c02d81a744892e41b1762619" + integrity sha512-PxMRlCFNiQnke9YR29vjFQwz4jq+6Q04rOVFeTDR2K7Qpv9h9FOWOxG+zJjageimYbWqE3bTuLjmryWHAWbvaA== dependencies: - "@smithy/types" "^4.13.0" - fast-xml-parser "5.4.1" + "@smithy/types" "^4.13.1" + fast-xml-parser "5.5.8" tslib "^2.6.2" "@aws/lambda-invoke-store@^0.2.2": @@ -891,7 +919,7 @@ "@babel/highlight" "^7.22.13" chalk "^2.4.2" -"@babel/code-frame@^7.21.4": +"@babel/code-frame@^7.26.2": version "7.29.0" resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.29.0.tgz#7cd7a59f15b3cc0dcd803038f7792712a7d0b15c" integrity sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw== @@ -1721,7 +1749,12 @@ object-hash "^3.0.0" uuid "^9.0.0" -"@gar/promisify@^1.0.1", "@gar/promisify@^1.1.3": +"@gar/promise-retry@^1.0.0", "@gar/promise-retry@^1.0.2": + version "1.0.3" + resolved "https://registry.yarnpkg.com/@gar/promise-retry/-/promise-retry-1.0.3.tgz#65e726428e794bc4453948e0a41e6de4215ce8b0" + integrity sha512-GmzA9ckNokPypTg10pgpeHNQe7ph+iIKKmhKu3Ob9ANkswreCx7R3cKmY781K8QK3AqVL3xVh9A42JvIAbkkSA== + +"@gar/promisify@^1.0.1": version "1.1.3" resolved "https://registry.yarnpkg.com/@gar/promisify/-/promisify-1.1.3.tgz#555193ab2e3bb3b6adc3d551c9c030d9e860daf6" integrity sha512-k2Ty1JcVojjJFwrg/ThKi2ujJ7XNLYaFGNB/bWT9wGR+oSMJHMa5w+CUq6p/pVrKeNNgA7pCqEcjSnHVoqJQFw== @@ -1915,17 +1948,12 @@ resolved "https://registry.yarnpkg.com/@inquirer/type/-/type-3.0.5.tgz#fe00207e57d5f040e5b18e809c8e7abc3a2ade3a" integrity sha512-ZJpeIYYueOz/i/ONzrfof8g89kNdO2hjGuvULROo3O8rlB2CRtSseE5KeirnyE4t/thAn/EwvS/vuQeJCn+NZg== -"@isaacs/cliui@^8.0.2": - version "8.0.2" - resolved "https://registry.yarnpkg.com/@isaacs/cliui/-/cliui-8.0.2.tgz#b37667b7bc181c168782259bab42474fbf52b550" - integrity sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA== +"@isaacs/fs-minipass@^4.0.0": + version "4.0.1" + resolved "https://registry.yarnpkg.com/@isaacs/fs-minipass/-/fs-minipass-4.0.1.tgz#2d59ae3ab4b38fb4270bfa23d30f8e2e86c7fe32" + integrity sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w== dependencies: - string-width "^5.1.2" - string-width-cjs "npm:string-width@^4.2.0" - strip-ansi "^7.0.1" - strip-ansi-cjs "npm:strip-ansi@^6.0.1" - wrap-ansi "^8.1.0" - wrap-ansi-cjs "npm:wrap-ansi@^7.0.0" + minipass "^7.0.4" "@isaacs/string-locale-compare@^1.1.0": version "1.1.0" @@ -2580,6 +2608,17 @@ lru-cache "^10.0.1" socks-proxy-agent "^8.0.3" +"@npmcli/agent@^4.0.0": + version "4.0.0" + resolved "https://registry.yarnpkg.com/@npmcli/agent/-/agent-4.0.0.tgz#2bb2b1c0a170940511554a7986ae2a8be9fedcce" + integrity sha512-kAQTcEN9E8ERLVg5AsGwLNoFb+oEG6engbqAU2P43gD4JEIkNGMHdVQ096FsOAAYpZPB0RSt0zgInKIAS1l5QA== + dependencies: + agent-base "^7.1.0" + http-proxy-agent "^7.0.0" + https-proxy-agent "^7.0.1" + lru-cache "^11.2.1" + socks-proxy-agent "^8.0.3" + "@npmcli/arborist@7.5.4": version "7.5.4" resolved "https://registry.yarnpkg.com/@npmcli/arborist/-/arborist-7.5.4.tgz#3dd9e531d6464ef6715e964c188e0880c471ac9b" @@ -2621,65 +2660,59 @@ treeverse "^3.0.0" walk-up-path "^3.0.1" -"@npmcli/arborist@^6.5.0": - version "6.5.1" - resolved "https://registry.yarnpkg.com/@npmcli/arborist/-/arborist-6.5.1.tgz#b378a2e162e9b868d06f8f2c7e87e828de7e63ba" - integrity sha512-cdV8pGurLK0CifZRilMJbm2CZ3H4Snk8PAqOngj5qmgFLjEllMLvScSZ3XKfd+CK8fo/hrPHO9zazy9OYdvmUg== +"@npmcli/arborist@^9.4.2": + version "9.4.2" + resolved "https://registry.yarnpkg.com/@npmcli/arborist/-/arborist-9.4.2.tgz#3fe6b905c671e7082a13ff247c6cf5f2006a4202" + integrity sha512-omJgPyzt11cEGrxzgrECoOyxAunmPMgBFTcAB/FbaB+9iOYhGmRdsQqySV8o0LWQ/l2kTeASUIMR4xJufVwmtw== dependencies: + "@gar/promise-retry" "^1.0.0" "@isaacs/string-locale-compare" "^1.1.0" - "@npmcli/fs" "^3.1.0" - "@npmcli/installed-package-contents" "^2.0.2" - "@npmcli/map-workspaces" "^3.0.2" - "@npmcli/metavuln-calculator" "^5.0.0" - "@npmcli/name-from-folder" "^2.0.0" - "@npmcli/node-gyp" "^3.0.0" - "@npmcli/package-json" "^4.0.0" - "@npmcli/query" "^3.1.0" - "@npmcli/run-script" "^6.0.0" - bin-links "^4.0.1" - cacache "^17.0.4" - common-ancestor-path "^1.0.1" - hosted-git-info "^6.1.1" - json-parse-even-better-errors "^3.0.0" + "@npmcli/fs" "^5.0.0" + "@npmcli/installed-package-contents" "^4.0.0" + "@npmcli/map-workspaces" "^5.0.0" + "@npmcli/metavuln-calculator" "^9.0.2" + "@npmcli/name-from-folder" "^4.0.0" + "@npmcli/node-gyp" "^5.0.0" + "@npmcli/package-json" "^7.0.0" + "@npmcli/query" "^5.0.0" + "@npmcli/redact" "^4.0.0" + "@npmcli/run-script" "^10.0.0" + bin-links "^6.0.0" + cacache "^20.0.1" + common-ancestor-path "^2.0.0" + hosted-git-info "^9.0.0" json-stringify-nice "^1.1.4" - minimatch "^9.0.0" - nopt "^7.0.0" - npm-install-checks "^6.2.0" - npm-package-arg "^10.1.0" - npm-pick-manifest "^8.0.1" - npm-registry-fetch "^14.0.3" - npmlog "^7.0.1" - pacote "^15.0.8" - parse-conflict-json "^3.0.0" - proc-log "^3.0.0" + lru-cache "^11.2.1" + minimatch "^10.0.3" + nopt "^9.0.0" + npm-install-checks "^8.0.0" + npm-package-arg "^13.0.0" + npm-pick-manifest "^11.0.1" + npm-registry-fetch "^19.0.0" + pacote "^21.0.2" + parse-conflict-json "^5.0.1" + proc-log "^6.0.0" + proggy "^4.0.0" promise-all-reject-late "^1.0.0" - promise-call-limit "^1.0.2" - read-package-json-fast "^3.0.2" + promise-call-limit "^3.0.1" semver "^7.3.7" - ssri "^10.0.1" + ssri "^13.0.0" treeverse "^3.0.0" - walk-up-path "^3.0.1" + walk-up-path "^4.0.0" -"@npmcli/config@^6.4.0": - version "6.4.1" - resolved "https://registry.yarnpkg.com/@npmcli/config/-/config-6.4.1.tgz#006409c739635db008e78bf58c92421cc147911d" - integrity sha512-uSz+elSGzjCMANWa5IlbGczLYPkNI/LeR+cHrgaTqTrTSh9RHhOFA4daD2eRUz6lMtOW+Fnsb+qv7V2Zz8ML0g== +"@npmcli/config@^10.8.0": + version "10.8.0" + resolved "https://registry.yarnpkg.com/@npmcli/config/-/config-10.8.0.tgz#41f982ecb96ca136e515faaf6feae3f303fbbd54" + integrity sha512-YkhoXZQU7zxyGi3V7J0zdK2pghzF9YXHiRdpRX8QNhsefk/zAJZJjRsbbw1hD67hlMp2gSygUGgW4y7FlrUThw== dependencies: - "@npmcli/map-workspaces" "^3.0.2" + "@npmcli/map-workspaces" "^5.0.0" + "@npmcli/package-json" "^7.0.0" ci-info "^4.0.0" - ini "^4.1.0" - nopt "^7.0.0" - proc-log "^3.0.0" - read-package-json-fast "^3.0.2" + ini "^6.0.0" + nopt "^9.0.0" + proc-log "^6.0.0" semver "^7.3.5" - walk-up-path "^3.0.1" - -"@npmcli/disparity-colors@^3.0.0": - version "3.0.1" - resolved "https://registry.yarnpkg.com/@npmcli/disparity-colors/-/disparity-colors-3.0.1.tgz#042d5ef548200c81e3ee3a84c994744573fe79fd" - integrity sha512-cOypTz/9IAhaPgOktbDNPeccTU88y8I1ZURbPeC0ooziK1h6dRJs2iGz1eKP1muaeVbow8GqQ0DaxLG8Bpmblw== - dependencies: - ansi-styles "^4.3.0" + walk-up-path "^4.0.0" "@npmcli/fs@^1.0.0": version "1.1.1" @@ -2689,14 +2722,6 @@ "@gar/promisify" "^1.0.1" semver "^7.3.5" -"@npmcli/fs@^2.1.0": - version "2.1.2" - resolved "https://registry.yarnpkg.com/@npmcli/fs/-/fs-2.1.2.tgz#a9e2541a4a2fec2e69c29b35e6060973da79b865" - integrity sha512-yOJKRvohFOaLqipNtwYB9WugyZKhC/DZC4VYPmpaCzDBrA8YpK3qHZ8/HGscMnE4GqbkLNuVcCnxkeQEdGt6LQ== - dependencies: - "@gar/promisify" "^1.1.3" - semver "^7.3.5" - "@npmcli/fs@^3.1.0": version "3.1.0" resolved "https://registry.yarnpkg.com/@npmcli/fs/-/fs-3.1.0.tgz#233d43a25a91d68c3a863ba0da6a3f00924a173e" @@ -2711,19 +2736,12 @@ dependencies: semver "^7.3.5" -"@npmcli/git@^4.0.0", "@npmcli/git@^4.0.1", "@npmcli/git@^4.1.0": - version "4.1.0" - resolved "https://registry.yarnpkg.com/@npmcli/git/-/git-4.1.0.tgz#ab0ad3fd82bc4d8c1351b6c62f0fa56e8fe6afa6" - integrity sha512-9hwoB3gStVfa0N31ymBmrX+GuDGdVA/QWShZVqE0HK2Af+7QGGrCTbZia/SW0ImUTjTne7SP91qxDmtXvDHRPQ== +"@npmcli/fs@^5.0.0": + version "5.0.0" + resolved "https://registry.yarnpkg.com/@npmcli/fs/-/fs-5.0.0.tgz#674619771907342b3d1ac197aaf1deeb657e3539" + integrity sha512-7OsC1gNORBEawOa5+j2pXN9vsicaIOH5cPXxoR6fJOmH6/EXpJB2CajXOu1fPRFun2m1lktEFX11+P89hqO/og== dependencies: - "@npmcli/promise-spawn" "^6.0.0" - lru-cache "^7.4.4" - npm-pick-manifest "^8.0.0" - proc-log "^3.0.0" - promise-inflight "^1.0.1" - promise-retry "^2.0.1" semver "^7.3.5" - which "^3.0.0" "@npmcli/git@^5.0.0": version "5.0.8" @@ -2740,6 +2758,20 @@ semver "^7.3.5" which "^4.0.0" +"@npmcli/git@^7.0.0": + version "7.0.2" + resolved "https://registry.yarnpkg.com/@npmcli/git/-/git-7.0.2.tgz#680c3271fe51401c07ee41076be678851e600ff0" + integrity sha512-oeolHDjExNAJAnlYP2qzNjMX/Xi9bmu78C9dIGr4xjobrSKbuMYCph8lTzn4vnW3NjIqVmw/f8BCfouqyJXlRg== + dependencies: + "@gar/promise-retry" "^1.0.0" + "@npmcli/promise-spawn" "^9.0.0" + ini "^6.0.0" + lru-cache "^11.2.1" + npm-pick-manifest "^11.0.1" + proc-log "^6.0.0" + semver "^7.3.5" + which "^6.0.0" + "@npmcli/installed-package-contents@^2.0.1": version "2.0.2" resolved "https://registry.yarnpkg.com/@npmcli/installed-package-contents/-/installed-package-contents-2.0.2.tgz#bfd817eccd9e8df200919e73f57f9e3d9e4f9e33" @@ -2748,7 +2780,7 @@ npm-bundled "^3.0.0" npm-normalize-package-bin "^3.0.0" -"@npmcli/installed-package-contents@^2.0.2", "@npmcli/installed-package-contents@^2.1.0": +"@npmcli/installed-package-contents@^2.1.0": version "2.1.0" resolved "https://registry.yarnpkg.com/@npmcli/installed-package-contents/-/installed-package-contents-2.1.0.tgz#63048e5f6e40947a3a88dcbcb4fd9b76fdd37c17" integrity sha512-c8UuGLeZpm69BryRykLuKRyKFZYJsZSCT4aVY5ds4omyZqJ172ApzgfKJ5eV/r3HgLdUYgFVe54KSFVjKoe27w== @@ -2756,7 +2788,15 @@ npm-bundled "^3.0.0" npm-normalize-package-bin "^3.0.0" -"@npmcli/map-workspaces@^3.0.2", "@npmcli/map-workspaces@^3.0.4": +"@npmcli/installed-package-contents@^4.0.0": + version "4.0.0" + resolved "https://registry.yarnpkg.com/@npmcli/installed-package-contents/-/installed-package-contents-4.0.0.tgz#18e5070704cfe0278f9ae48038558b6efd438426" + integrity sha512-yNyAdkBxB72gtZ4GrwXCM0ZUedo9nIbOMKfGjt6Cu6DXf0p8y1PViZAKDC8q8kv/fufx0WTjRBdSlyrvnP7hmA== + dependencies: + npm-bundled "^5.0.0" + npm-normalize-package-bin "^5.0.0" + +"@npmcli/map-workspaces@^3.0.2": version "3.0.6" resolved "https://registry.yarnpkg.com/@npmcli/map-workspaces/-/map-workspaces-3.0.6.tgz#27dc06c20c35ef01e45a08909cab9cb3da08cea6" integrity sha512-tkYs0OYnzQm6iIRdfy+LcLBjcKuQCeE5YLb8KnrIlutJfheNaPvPpgoFEyEFgbjzl5PLZ3IA/BWAwRU0eHuQDA== @@ -2766,15 +2806,15 @@ minimatch "^9.0.0" read-package-json-fast "^3.0.0" -"@npmcli/metavuln-calculator@^5.0.0": - version "5.0.1" - resolved "https://registry.yarnpkg.com/@npmcli/metavuln-calculator/-/metavuln-calculator-5.0.1.tgz#426b3e524c2008bcc82dbc2ef390aefedd643d76" - integrity sha512-qb8Q9wIIlEPj3WeA1Lba91R4ZboPL0uspzV0F9uwP+9AYMVB2zOoa7Pbk12g6D2NHAinSbHh6QYmGuRyHZ874Q== +"@npmcli/map-workspaces@^5.0.0", "@npmcli/map-workspaces@^5.0.3": + version "5.0.3" + resolved "https://registry.yarnpkg.com/@npmcli/map-workspaces/-/map-workspaces-5.0.3.tgz#5b887ec0b535a2ba64d1d338867326a2b9c041d1" + integrity sha512-o2grssXo1e774E5OtEwwrgoszYRh0lqkJH+Pb9r78UcqdGJRDRfhpM8DvZPjzNLLNYeD/rNbjOKM3Ss5UABROw== dependencies: - cacache "^17.0.0" - json-parse-even-better-errors "^3.0.0" - pacote "^15.0.0" - semver "^7.3.5" + "@npmcli/name-from-folder" "^4.0.0" + "@npmcli/package-json" "^7.0.0" + glob "^13.0.0" + minimatch "^10.0.3" "@npmcli/metavuln-calculator@^7.1.1": version "7.1.1" @@ -2787,6 +2827,17 @@ proc-log "^4.1.0" semver "^7.3.5" +"@npmcli/metavuln-calculator@^9.0.2", "@npmcli/metavuln-calculator@^9.0.3": + version "9.0.3" + resolved "https://registry.yarnpkg.com/@npmcli/metavuln-calculator/-/metavuln-calculator-9.0.3.tgz#57b330f3fb8ca34db2782ad5349ea4384bed9c96" + integrity sha512-94GLSYhLXF2t2LAC7pDwLaM4uCARzxShyAQKsirmlNcpidH89VA4/+K1LbJmRMgz5gy65E/QBBWQdUvGLe2Frg== + dependencies: + cacache "^20.0.0" + json-parse-even-better-errors "^5.0.0" + pacote "^21.0.0" + proc-log "^6.0.0" + semver "^7.3.5" + "@npmcli/move-file@^1.0.1": version "1.1.2" resolved "https://registry.yarnpkg.com/@npmcli/move-file/-/move-file-1.1.2.tgz#1a82c3e372f7cae9253eb66d72543d6b8685c674" @@ -2795,24 +2846,26 @@ mkdirp "^1.0.4" rimraf "^3.0.2" -"@npmcli/move-file@^2.0.0": - version "2.0.1" - resolved "https://registry.yarnpkg.com/@npmcli/move-file/-/move-file-2.0.1.tgz#26f6bdc379d87f75e55739bab89db525b06100e4" - integrity sha512-mJd2Z5TjYWq/ttPLLGqArdtnC74J6bOzg4rMDnN+p1xTacZ2yPRCk2y0oSWQtygLR9YVQXgOcONrwtnk3JupxQ== - dependencies: - mkdirp "^1.0.4" - rimraf "^3.0.2" - "@npmcli/name-from-folder@^2.0.0": version "2.0.0" resolved "https://registry.yarnpkg.com/@npmcli/name-from-folder/-/name-from-folder-2.0.0.tgz#c44d3a7c6d5c184bb6036f4d5995eee298945815" integrity sha512-pwK+BfEBZJbKdNYpHHRTNBwBoqrN/iIMO0AiGvYsp3Hoaq0WbgGSWQR6SCldZovoDpY3yje5lkFUe6gsDgJ2vg== +"@npmcli/name-from-folder@^4.0.0": + version "4.0.0" + resolved "https://registry.yarnpkg.com/@npmcli/name-from-folder/-/name-from-folder-4.0.0.tgz#b4d516ae4fab5ed4e8e8032abff3488703fc24a3" + integrity sha512-qfrhVlOSqmKM8i6rkNdZzABj8MKEITGFAY+4teqBziksCQAOLutiAxM1wY2BKEd8KjUSpWmWCYxvXr0y4VTlPg== + "@npmcli/node-gyp@^3.0.0": version "3.0.0" resolved "https://registry.yarnpkg.com/@npmcli/node-gyp/-/node-gyp-3.0.0.tgz#101b2d0490ef1aa20ed460e4c0813f0db560545a" integrity sha512-gp8pRXC2oOxu0DUE1/M3bYtb1b3/DbJ5aM113+XJBgfXdussRAsX0YOrOhdd8WvnAR6auDBvJomGAkLKA5ydxA== +"@npmcli/node-gyp@^5.0.0": + version "5.0.0" + resolved "https://registry.yarnpkg.com/@npmcli/node-gyp/-/node-gyp-5.0.0.tgz#35475a58b5d791764a7252231197a14deefe8e47" + integrity sha512-uuG5HZFXLfyFKqg8QypsmgLQW7smiRjVc45bqD/ofZZcR/uxEjgQU8qDPv0s9TEeMUiAAU/GC5bR6++UdTirIQ== + "@npmcli/package-json@5.2.0": version "5.2.0" resolved "https://registry.yarnpkg.com/@npmcli/package-json/-/package-json-5.2.0.tgz#a1429d3111c10044c7efbfb0fce9f2c501f4cfad" @@ -2826,19 +2879,6 @@ proc-log "^4.0.0" semver "^7.5.3" -"@npmcli/package-json@^4.0.0", "@npmcli/package-json@^4.0.1": - version "4.0.1" - resolved "https://registry.yarnpkg.com/@npmcli/package-json/-/package-json-4.0.1.tgz#1a07bf0e086b640500791f6bf245ff43cc27fa37" - integrity sha512-lRCEGdHZomFsURroh522YvA/2cVb9oPIJrjHanCJZkiasz1BzcnLr3tBJhlV7S86MBJBuAQ33is2D60YitZL2Q== - dependencies: - "@npmcli/git" "^4.1.0" - glob "^10.2.2" - hosted-git-info "^6.1.1" - json-parse-even-better-errors "^3.0.0" - normalize-package-data "^5.0.0" - proc-log "^3.0.0" - semver "^7.5.3" - "@npmcli/package-json@^5.0.0", "@npmcli/package-json@^5.1.0": version "5.2.1" resolved "https://registry.yarnpkg.com/@npmcli/package-json/-/package-json-5.2.1.tgz#df69477b1023b81ff8503f2b9db4db4faea567ed" @@ -2852,12 +2892,18 @@ proc-log "^4.0.0" semver "^7.5.3" -"@npmcli/promise-spawn@^6.0.0", "@npmcli/promise-spawn@^6.0.1", "@npmcli/promise-spawn@^6.0.2": - version "6.0.2" - resolved "https://registry.yarnpkg.com/@npmcli/promise-spawn/-/promise-spawn-6.0.2.tgz#c8bc4fa2bd0f01cb979d8798ba038f314cfa70f2" - integrity sha512-gGq0NJkIGSwdbUt4yhdF8ZrmkGKVz9vAdVzpOfnom+V8PLSmSOVhZwbNvZZS1EYcJN5hzzKBxmmVVAInM6HQLg== - dependencies: - which "^3.0.0" +"@npmcli/package-json@^7.0.0", "@npmcli/package-json@^7.0.5": + version "7.0.5" + resolved "https://registry.yarnpkg.com/@npmcli/package-json/-/package-json-7.0.5.tgz#e29481dfc586d1625a6553799e6bec52ae0487a5" + integrity sha512-iVuTlG3ORq2iaVa1IWUxAO/jIp77tUKBhoMjuzYW2kL4MLN1bi/ofqkZ7D7OOwh8coAx1/S2ge0rMdGv8sLSOQ== + dependencies: + "@npmcli/git" "^7.0.0" + glob "^13.0.0" + hosted-git-info "^9.0.0" + json-parse-even-better-errors "^5.0.0" + proc-log "^6.0.0" + semver "^7.5.3" + spdx-expression-parse "^4.0.0" "@npmcli/promise-spawn@^7.0.0": version "7.0.2" @@ -2866,6 +2912,13 @@ dependencies: which "^4.0.0" +"@npmcli/promise-spawn@^9.0.0", "@npmcli/promise-spawn@^9.0.1": + version "9.0.1" + resolved "https://registry.yarnpkg.com/@npmcli/promise-spawn/-/promise-spawn-9.0.1.tgz#20e80cbdd2f24ad263a15de3ebbb1673cb82005b" + integrity sha512-OLUaoqBuyxeTqUvjA3FZFiXUfYC1alp3Sa99gW3EUDz3tZ3CbXDdcZ7qWKBzicrJleIgucoWamWH1saAmH/l2Q== + dependencies: + which "^6.0.0" + "@npmcli/query@^3.1.0": version "3.1.0" resolved "https://registry.yarnpkg.com/@npmcli/query/-/query-3.1.0.tgz#bc202c59e122a06cf8acab91c795edda2cdad42c" @@ -2873,11 +2926,23 @@ dependencies: postcss-selector-parser "^6.0.10" +"@npmcli/query@^5.0.0": + version "5.0.0" + resolved "https://registry.yarnpkg.com/@npmcli/query/-/query-5.0.0.tgz#c8cb9ec42c2ef149077282e948dc068ecc79ee11" + integrity sha512-8TZWfTQOsODpLqo9SVhVjHovmKXNpevHU0gO9e+y4V4fRIOneiXy0u0sMP9LmS71XivrEWfZWg50ReH4WRT4aQ== + dependencies: + postcss-selector-parser "^7.0.0" + "@npmcli/redact@^2.0.0": version "2.0.1" resolved "https://registry.yarnpkg.com/@npmcli/redact/-/redact-2.0.1.tgz#95432fd566e63b35c04494621767a4312c316762" integrity sha512-YgsR5jCQZhVmTJvjduTOIHph0L73pK8xwMVaDY0PatySqVM9AZj93jpoXYSJqfHFxFkN9dmqTw6OiqExsS3LPw== +"@npmcli/redact@^4.0.0": + version "4.0.0" + resolved "https://registry.yarnpkg.com/@npmcli/redact/-/redact-4.0.0.tgz#c91121e02b7559a997614a2c1057cd7fc67608c4" + integrity sha512-gOBg5YHMfZy+TfHArfVogwgfBeQnKbbGo3pSUyK/gSI0AVu+pEiDVcKlQb0D8Mg1LNRZILZ6XG8I5dJ4KuAd9Q== + "@npmcli/run-script@8.1.0", "@npmcli/run-script@^8.0.0", "@npmcli/run-script@^8.1.0": version "8.1.0" resolved "https://registry.yarnpkg.com/@npmcli/run-script/-/run-script-8.1.0.tgz#a563e5e29b1ca4e648a6b1bbbfe7220b4bfe39fc" @@ -2890,16 +2955,16 @@ proc-log "^4.0.0" which "^4.0.0" -"@npmcli/run-script@^6.0.0", "@npmcli/run-script@^6.0.2": - version "6.0.2" - resolved "https://registry.yarnpkg.com/@npmcli/run-script/-/run-script-6.0.2.tgz#a25452d45ee7f7fb8c16dfaf9624423c0c0eb885" - integrity sha512-NCcr1uQo1k5U+SYlnIrbAh3cxy+OQT1VtqiAbxdymSlptbzBb62AjH2xXgjNCoP073hoa1CfCAcwoZ8k96C4nA== +"@npmcli/run-script@^10.0.0", "@npmcli/run-script@^10.0.4": + version "10.0.4" + resolved "https://registry.yarnpkg.com/@npmcli/run-script/-/run-script-10.0.4.tgz#99cddae483ce3dbf1a10f5683a4e6aaa02345ac0" + integrity sha512-mGUWr1uMnf0le2TwfOZY4SFxZGXGfm4Jtay/nwAa2FLNAKXUoUwaGwBMNH36UHPtinWfTSJ3nqFQr0091CxVGg== dependencies: - "@npmcli/node-gyp" "^3.0.0" - "@npmcli/promise-spawn" "^6.0.0" - node-gyp "^9.0.0" - read-package-json-fast "^3.0.0" - which "^3.0.0" + "@npmcli/node-gyp" "^5.0.0" + "@npmcli/package-json" "^7.0.0" + "@npmcli/promise-spawn" "^9.0.0" + node-gyp "^12.1.0" + proc-log "^6.0.0" "@nuxtjs/opencollective@0.3.2": version "0.3.2" @@ -3065,18 +3130,10 @@ resolved "https://registry.yarnpkg.com/@octokit/auth-token/-/auth-token-4.0.0.tgz#40d203ea827b9f17f42a29c6afb93b7745ef80c7" integrity sha512-tY/msAuJo6ARbK6SPIxZrPBms3xPbfwBrulZe0Wtr/DIY9lje2HeV1uoebShn6mx7SjCHif6EjMvoREj+gZ+SA== -"@octokit/core@^5.0.0": - version "5.2.2" - resolved "https://registry.yarnpkg.com/@octokit/core/-/core-5.2.2.tgz#252805732de9b4e8e4f658d34b80c4c9b2534761" - integrity sha512-/g2d4sW9nUDJOMz3mabVQvOGhVa4e/BN/Um7yca9Bb2XTzPPnfTWHWQg+IsEYO7M3Vx+EXvaM/I2pJWIMun1bg== - dependencies: - "@octokit/auth-token" "^4.0.0" - "@octokit/graphql" "^7.1.0" - "@octokit/request" "^8.4.1" - "@octokit/request-error" "^5.1.1" - "@octokit/types" "^13.0.0" - before-after-hook "^2.2.0" - universal-user-agent "^6.0.0" +"@octokit/auth-token@^6.0.0": + version "6.0.0" + resolved "https://registry.yarnpkg.com/@octokit/auth-token/-/auth-token-6.0.0.tgz#b02e9c08a2d8937df09a2a981f226ad219174c53" + integrity sha512-P4YJBPdPSpWTQ1NU4XYdvHvXJJDxM6YwpS0FZHRgP7YFkdVxsWcpWGy/NVqlAA7PcPCnMacXlRm1y2PFZRWL/w== "@octokit/core@^5.0.2": version "5.2.1" @@ -3091,6 +3148,27 @@ before-after-hook "^2.2.0" universal-user-agent "^6.0.0" +"@octokit/core@^7.0.0": + version "7.0.6" + resolved "https://registry.yarnpkg.com/@octokit/core/-/core-7.0.6.tgz#0d58704391c6b681dec1117240ea4d2a98ac3916" + integrity sha512-DhGl4xMVFGVIyMwswXeyzdL4uXD5OGILGX5N8Y+f6W7LhC1Ze2poSNrkF/fedpVDHEEZ+PHFW0vL14I+mm8K3Q== + dependencies: + "@octokit/auth-token" "^6.0.0" + "@octokit/graphql" "^9.0.3" + "@octokit/request" "^10.0.6" + "@octokit/request-error" "^7.0.2" + "@octokit/types" "^16.0.0" + before-after-hook "^4.0.0" + universal-user-agent "^7.0.0" + +"@octokit/endpoint@^11.0.3": + version "11.0.3" + resolved "https://registry.yarnpkg.com/@octokit/endpoint/-/endpoint-11.0.3.tgz#acf5f7feddde4e12185d5312ee38ff77235d8205" + integrity sha512-FWFlNxghg4HrXkD3ifYbS/IdL/mDHjh9QcsNyhQjN8dplUoZbejsdpmuqdA76nxj2xoWPs7p8uX2SNr9rYu0Ag== + dependencies: + "@octokit/types" "^16.0.0" + universal-user-agent "^7.0.2" + "@octokit/endpoint@^9.0.6": version "9.0.6" resolved "https://registry.yarnpkg.com/@octokit/endpoint/-/endpoint-9.0.6.tgz#114d912108fe692d8b139cfe7fc0846dfd11b6c0" @@ -3108,16 +3186,25 @@ "@octokit/types" "^13.0.0" universal-user-agent "^6.0.0" -"@octokit/openapi-types@^20.0.0": - version "20.0.0" - resolved "https://registry.yarnpkg.com/@octokit/openapi-types/-/openapi-types-20.0.0.tgz#9ec2daa0090eeb865ee147636e0c00f73790c6e5" - integrity sha512-EtqRBEjp1dL/15V7WiX5LJMIxxkdiGJnabzYx5Apx4FkQIFgAfKumXeYAqqJCj1s+BMX4cPFIFC4OLCR6stlnA== +"@octokit/graphql@^9.0.3": + version "9.0.3" + resolved "https://registry.yarnpkg.com/@octokit/graphql/-/graphql-9.0.3.tgz#5b8341c225909e924b466705c13477face869456" + integrity sha512-grAEuupr/C1rALFnXTv6ZQhFuL1D8G5y8CN04RgrO4FIPMrtm+mcZzFG7dcBm+nq+1ppNixu+Jd78aeJOYxlGA== + dependencies: + "@octokit/request" "^10.0.6" + "@octokit/types" "^16.0.0" + universal-user-agent "^7.0.0" "@octokit/openapi-types@^24.2.0": version "24.2.0" resolved "https://registry.yarnpkg.com/@octokit/openapi-types/-/openapi-types-24.2.0.tgz#3d55c32eac0d38da1a7083a9c3b0cca77924f7d3" integrity sha512-9sIH3nSUttelJSXUrmGzl7QUBFul0/mB8HRYl3fOlgHbIWG+WnYDXU3v/2zMtAvuzZ/ed00Ei6on975FhBfzrg== +"@octokit/openapi-types@^27.0.0": + version "27.0.0" + resolved "https://registry.yarnpkg.com/@octokit/openapi-types/-/openapi-types-27.0.0.tgz#374ea53781965fd02a9d36cacb97e152cefff12d" + integrity sha512-whrdktVs1h6gtR+09+QsNk2+FO+49j6ga1c55YZudfEG+oKJVvJLQi3zkOm5JjiUXAagWK2tI2kTGKJ2Ys7MGA== + "@octokit/plugin-enterprise-rest@6.0.1": version "6.0.1" resolved "https://registry.yarnpkg.com/@octokit/plugin-enterprise-rest/-/plugin-enterprise-rest-6.0.1.tgz#e07896739618dab8da7d4077c658003775f95437" @@ -3130,12 +3217,12 @@ dependencies: "@octokit/types" "^13.7.0" -"@octokit/plugin-paginate-rest@^9.0.0": - version "9.2.2" - resolved "https://registry.yarnpkg.com/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-9.2.2.tgz#c516bc498736bcdaa9095b9a1d10d9d0501ae831" - integrity sha512-u3KYkGF7GcZnSD/3UP0S7K5XUFT2FkOQdcfXZGZQPGv3lm4F2Xbf71lvjldr8c1H3nNbF+33cLEkWYbokGWqiQ== +"@octokit/plugin-paginate-rest@^14.0.0": + version "14.0.0" + resolved "https://registry.yarnpkg.com/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-14.0.0.tgz#44dc9fff2dacb148d4c5c788b573ddc044503026" + integrity sha512-fNVRE7ufJiAA3XUrha2omTA39M6IXIc6GIZLvlbsm8QOQCYvpq/LkMNGyFlB1d8hTDzsAXa3OKtybdMAYsV/fw== dependencies: - "@octokit/types" "^12.6.0" + "@octokit/types" "^16.0.0" "@octokit/plugin-request-log@^4.0.0": version "4.0.1" @@ -3149,24 +3236,24 @@ dependencies: "@octokit/types" "^13.8.0" -"@octokit/plugin-retry@^6.0.0": - version "6.1.0" - resolved "https://registry.yarnpkg.com/@octokit/plugin-retry/-/plugin-retry-6.1.0.tgz#cf5b92223246327ca9c7e17262b93ffde028ab0a" - integrity sha512-WrO3bvq4E1Xh1r2mT9w6SDFg01gFmP81nIG77+p/MqW1JeXXgL++6umim3t6x0Zj5pZm3rXAN+0HEjmmdhIRig== +"@octokit/plugin-retry@^8.0.0": + version "8.1.0" + resolved "https://registry.yarnpkg.com/@octokit/plugin-retry/-/plugin-retry-8.1.0.tgz#e25c2fb5e0a09cfe674ef9df75d7ca4fafa16c11" + integrity sha512-O1FZgXeiGb2sowEr/hYTr6YunGdSAFWnr2fyW39Ah85H8O33ELASQxcvOFF5LE6Tjekcyu2ms4qAzJVhSaJxTw== dependencies: - "@octokit/request-error" "^5.0.0" - "@octokit/types" "^13.0.0" + "@octokit/request-error" "^7.0.2" + "@octokit/types" "^16.0.0" bottleneck "^2.15.3" -"@octokit/plugin-throttling@^8.0.0": - version "8.2.0" - resolved "https://registry.yarnpkg.com/@octokit/plugin-throttling/-/plugin-throttling-8.2.0.tgz#9ec3ea2e37b92fac63f06911d0c8141b46dc4941" - integrity sha512-nOpWtLayKFpgqmgD0y3GqXafMFuKcA4tRPZIfu7BArd2lEZeb1988nhWhwx4aZWmjDmUfdgVf7W+Tt4AmvRmMQ== +"@octokit/plugin-throttling@^11.0.0": + version "11.0.3" + resolved "https://registry.yarnpkg.com/@octokit/plugin-throttling/-/plugin-throttling-11.0.3.tgz#584b1a9ca73a5daafeeb7dd5cc13a1bd29a6a60d" + integrity sha512-34eE0RkFCKycLl2D2kq7W+LovheM/ex3AwZCYN8udpi6bxsyjZidb2McXs69hZhLmJlDqTSP8cH+jSRpiaijBg== dependencies: - "@octokit/types" "^12.2.0" + "@octokit/types" "^16.0.0" bottleneck "^2.15.3" -"@octokit/request-error@^5.0.0", "@octokit/request-error@^5.1.1": +"@octokit/request-error@^5.1.1": version "5.1.1" resolved "https://registry.yarnpkg.com/@octokit/request-error/-/request-error-5.1.1.tgz#b9218f9c1166e68bb4d0c89b638edc62c9334805" integrity sha512-v9iyEQJH6ZntoENr9/yXxjuezh4My67CBSu9r6Ve/05Iu5gNgnisNWOsoJHTP6k0Rr0+HQIpnH+kyammu90q/g== @@ -3175,6 +3262,25 @@ deprecation "^2.0.0" once "^1.4.0" +"@octokit/request-error@^7.0.2": + version "7.1.0" + resolved "https://registry.yarnpkg.com/@octokit/request-error/-/request-error-7.1.0.tgz#440fa3cae310466889778f5a222b47a580743638" + integrity sha512-KMQIfq5sOPpkQYajXHwnhjCC0slzCNScLHs9JafXc4RAJI+9f+jNDlBNaIMTvazOPLgb4BnlhGJOTbnN0wIjPw== + dependencies: + "@octokit/types" "^16.0.0" + +"@octokit/request@^10.0.6": + version "10.0.8" + resolved "https://registry.yarnpkg.com/@octokit/request/-/request-10.0.8.tgz#6609a5a38ad6f8ee203d9eb8ac9361d906a4414e" + integrity sha512-SJZNwY9pur9Agf7l87ywFi14W+Hd9Jg6Ifivsd33+/bGUQIjNujdFiXII2/qSlN2ybqUHfp5xpekMEjIBTjlSw== + dependencies: + "@octokit/endpoint" "^11.0.3" + "@octokit/request-error" "^7.0.2" + "@octokit/types" "^16.0.0" + fast-content-type-parse "^3.0.0" + json-with-bigint "^3.5.3" + universal-user-agent "^7.0.2" + "@octokit/request@^8.4.1": version "8.4.1" resolved "https://registry.yarnpkg.com/@octokit/request/-/request-8.4.1.tgz#715a015ccf993087977ea4365c44791fc4572486" @@ -3195,13 +3301,6 @@ "@octokit/plugin-request-log" "^4.0.0" "@octokit/plugin-rest-endpoint-methods" "13.3.2-cjs.1" -"@octokit/types@^12.2.0", "@octokit/types@^12.6.0": - version "12.6.0" - resolved "https://registry.yarnpkg.com/@octokit/types/-/types-12.6.0.tgz#8100fb9eeedfe083aae66473bd97b15b62aedcb2" - integrity sha512-1rhSOfRa6H9w4YwK0yrf5faDaDTb+yLyBUKOCV4xtCDB5VmIPqd/v9yr9o6SAzOAlRxMiRiCic6JVM1/kunVkw== - dependencies: - "@octokit/openapi-types" "^20.0.0" - "@octokit/types@^13.0.0", "@octokit/types@^13.1.0", "@octokit/types@^13.7.0", "@octokit/types@^13.8.0": version "13.10.0" resolved "https://registry.yarnpkg.com/@octokit/types/-/types-13.10.0.tgz#3e7c6b19c0236c270656e4ea666148c2b51fd1a3" @@ -3209,6 +3308,13 @@ dependencies: "@octokit/openapi-types" "^24.2.0" +"@octokit/types@^16.0.0": + version "16.0.0" + resolved "https://registry.yarnpkg.com/@octokit/types/-/types-16.0.0.tgz#fbd7fa590c2ef22af881b1d79758bfaa234dbb7c" + integrity sha512-sKq+9r1Mm4efXW1FCk7hFSeJo4QKreL/tTbR0rz/qx/r1Oa2VV83LTA/H/MuCOX7uCIJmQVRKBcbmWoySjAnSg== + dependencies: + "@octokit/openapi-types" "^27.0.0" + "@paralleldrive/cuid2@2.2.2", "@paralleldrive/cuid2@^2.2.2": version "2.2.2" resolved "https://registry.yarnpkg.com/@paralleldrive/cuid2/-/cuid2-2.2.2.tgz#7f91364d53b89e2c9cb9e02e8dd0f129e834455f" @@ -3216,11 +3322,6 @@ dependencies: "@noble/hashes" "^1.1.5" -"@pkgjs/parseargs@^0.11.0": - version "0.11.0" - resolved "https://registry.yarnpkg.com/@pkgjs/parseargs/-/parseargs-0.11.0.tgz#a77ea742fab25775145434eb1d2328cf5013ac33" - integrity sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg== - "@pnpm/config.env-replace@^1.1.0": version "1.1.0" resolved "https://registry.yarnpkg.com/@pnpm/config.env-replace/-/config.env-replace-1.1.0.tgz#ab29da53df41e8948a00f2433f085f54de8b3a4c" @@ -3270,6 +3371,43 @@ resolved "https://registry.yarnpkg.com/@rtsao/scc/-/scc-1.1.0.tgz#927dd2fae9bc3361403ac2c7a00c32ddce9ad7e8" integrity sha512-zt6OdqaDoOnJ1ZYsCYGt9YmWzDXl4vQdKTyJev62gFhRGKdx7mcT54V9KIjg+d2wi9EXsPvAPKe7i7WjfVWB8g== +"@rushstack/node-core-library@5.13.0": + version "5.13.0" + resolved "https://registry.yarnpkg.com/@rushstack/node-core-library/-/node-core-library-5.13.0.tgz#f79d6868b74be102eee75b93c37be45fb9b47ead" + integrity sha512-IGVhy+JgUacAdCGXKUrRhwHMTzqhWwZUI+qEPcdzsb80heOw0QPbhhoVsoiMF7Klp8eYsp7hzpScMXmOa3Uhfg== + dependencies: + ajv "~8.13.0" + ajv-draft-04 "~1.0.0" + ajv-formats "~3.0.1" + fs-extra "~11.3.0" + import-lazy "~4.0.0" + jju "~1.4.0" + resolve "~1.22.1" + semver "~7.5.4" + +"@rushstack/terminal@0.15.2": + version "0.15.2" + resolved "https://registry.yarnpkg.com/@rushstack/terminal/-/terminal-0.15.2.tgz#8fa030409603a22db606ecb18709050e46517add" + integrity sha512-7Hmc0ysK5077R/IkLS9hYu0QuNafm+TbZbtYVzCMbeOdMjaRboLKrhryjwZSRJGJzu+TV1ON7qZHeqf58XfLpA== + dependencies: + "@rushstack/node-core-library" "5.13.0" + supports-color "~8.1.1" + +"@rushstack/ts-command-line@^4.12.2": + version "4.23.7" + resolved "https://registry.yarnpkg.com/@rushstack/ts-command-line/-/ts-command-line-4.23.7.tgz#9c6f05a00f776c7b8ea3321e2b5a03acc5e9efa8" + integrity sha512-Gr9cB7DGe6uz5vq2wdr89WbVDKz0UeuFEn5H2CfWDe7JvjFFaiV15gi6mqDBTbHhHCWS7w8mF1h3BnIfUndqdA== + dependencies: + "@rushstack/terminal" "0.15.2" + "@types/argparse" "1.0.38" + argparse "~1.0.9" + string-argv "~0.3.1" + +"@sec-ant/readable-stream@^0.4.1": + version "0.4.1" + resolved "https://registry.yarnpkg.com/@sec-ant/readable-stream/-/readable-stream-0.4.1.tgz#60de891bb126abfdc5410fdc6166aca065f10a0c" + integrity sha512-831qok9r2t8AlxLko40y2ebgSDhenenCatLVeW/uBtnHPyhHOvG0C7TvfgecV+wHzIm5KUICgzmVpWS+IMEAeg== + "@semantic-release/changelog@^6.0.3": version "6.0.3" resolved "https://registry.yarnpkg.com/@semantic-release/changelog/-/changelog-6.0.3.tgz#6195630ecbeccad174461de727d5f975abc23eeb" @@ -3280,16 +3418,17 @@ fs-extra "^11.0.0" lodash "^4.17.4" -"@semantic-release/commit-analyzer@^10.0.0": - version "10.0.4" - resolved "https://registry.yarnpkg.com/@semantic-release/commit-analyzer/-/commit-analyzer-10.0.4.tgz#e2770f341b75d8f19fe6b5b833e8c2e0de2b84de" - integrity sha512-pFGn99fn8w4/MHE0otb2A/l5kxgOuxaaauIh4u30ncoTJuqWj4hXTgEJ03REqjS+w1R2vPftSsO26WC61yOcpw== +"@semantic-release/commit-analyzer@^13.0.1": + version "13.0.1" + resolved "https://registry.yarnpkg.com/@semantic-release/commit-analyzer/-/commit-analyzer-13.0.1.tgz#d84b599c3fef623ccc01f0cc2025eb56a57d8feb" + integrity sha512-wdnBPHKkr9HhNhXOhZD5a2LNl91+hs8CC2vsAVYxtZH3y0dV3wKn+uZSN61rdJQZ8EGxzWB3inWocBHV9+u/CQ== dependencies: - conventional-changelog-angular "^6.0.0" - conventional-commits-filter "^3.0.0" - conventional-commits-parser "^5.0.0" + conventional-changelog-angular "^8.0.0" + conventional-changelog-writer "^8.0.0" + conventional-commits-filter "^5.0.0" + conventional-commits-parser "^6.0.0" debug "^4.0.0" - import-from "^4.0.0" + import-from-esm "^2.0.0" lodash-es "^4.17.21" micromatch "^4.0.2" @@ -3322,62 +3461,65 @@ micromatch "^4.0.0" p-reduce "^2.0.0" -"@semantic-release/github@^9.0.0": - version "9.2.6" - resolved "https://registry.yarnpkg.com/@semantic-release/github/-/github-9.2.6.tgz#0b0b00ab3ab0486cd3aecb4ae2f9f9cf2edd8eae" - integrity sha512-shi+Lrf6exeNZF+sBhK+P011LSbhmIAoUEgEY6SsxF8irJ+J2stwI5jkyDQ+4gzYyDImzV6LCKdYB9FXnQRWKA== +"@semantic-release/github@^12.0.0": + version "12.0.6" + resolved "https://registry.yarnpkg.com/@semantic-release/github/-/github-12.0.6.tgz#c60c556e7087938be988d0be3de6d70e8cbaced8" + integrity sha512-aYYFkwHW3c6YtHwQF0t0+lAjlU+87NFOZuH2CvWFD0Ylivc7MwhZMiHOJ0FMpIgPpCVib/VUAcOwvrW0KnxQtA== dependencies: - "@octokit/core" "^5.0.0" - "@octokit/plugin-paginate-rest" "^9.0.0" - "@octokit/plugin-retry" "^6.0.0" - "@octokit/plugin-throttling" "^8.0.0" + "@octokit/core" "^7.0.0" + "@octokit/plugin-paginate-rest" "^14.0.0" + "@octokit/plugin-retry" "^8.0.0" + "@octokit/plugin-throttling" "^11.0.0" "@semantic-release/error" "^4.0.0" aggregate-error "^5.0.0" debug "^4.3.4" dir-glob "^3.0.1" - globby "^14.0.0" http-proxy-agent "^7.0.0" https-proxy-agent "^7.0.0" - issue-parser "^6.0.0" + issue-parser "^7.0.0" lodash-es "^4.17.21" mime "^4.0.0" p-filter "^4.0.0" + tinyglobby "^0.2.14" + undici "^7.0.0" url-join "^5.0.0" -"@semantic-release/npm@^10.0.2": - version "10.0.6" - resolved "https://registry.yarnpkg.com/@semantic-release/npm/-/npm-10.0.6.tgz#1c47a77e79464586fa1c67f148567ef2b9fda315" - integrity sha512-DyqHrGE8aUyapA277BB+4kV0C4iMHh3sHzUWdf0jTgp5NNJxVUz76W1f57FB64Ue03him3CBXxFqQD2xGabxow== +"@semantic-release/npm@^13.1.1": + version "13.1.5" + resolved "https://registry.yarnpkg.com/@semantic-release/npm/-/npm-13.1.5.tgz#99178d57ca8f68fb4ea2aa2d388052ec3f397498" + integrity sha512-Hq5UxzoatN3LHiq2rTsWS54nCdqJHlsssGERCo8WlvdfFA9LoN0vO+OuKVSjtNapIc/S8C2LBj206wKLHg62mg== dependencies: + "@actions/core" "^3.0.0" "@semantic-release/error" "^4.0.0" aggregate-error "^5.0.0" - execa "^8.0.0" + env-ci "^11.2.0" + execa "^9.0.0" fs-extra "^11.0.0" lodash-es "^4.17.21" nerf-dart "^1.0.0" - normalize-url "^8.0.0" - npm "^9.5.0" + normalize-url "^9.0.0" + npm "^11.6.2" rc "^1.2.8" - read-pkg "^8.0.0" + read-pkg "^10.0.0" registry-auth-token "^5.0.0" semver "^7.1.2" tempy "^3.0.0" -"@semantic-release/release-notes-generator@^11.0.0": - version "11.0.7" - resolved "https://registry.yarnpkg.com/@semantic-release/release-notes-generator/-/release-notes-generator-11.0.7.tgz#2193b8aa6b8b40297b6cbc5156bc9a7e5cdb9bbd" - integrity sha512-T09QB9ImmNx7Q6hY6YnnEbw/rEJ6a+22LBxfZq+pSAXg/OL/k0siwEm5cK4k1f9dE2Z2mPIjJKKohzUm0jbxcQ== +"@semantic-release/release-notes-generator@^14.1.0": + version "14.1.0" + resolved "https://registry.yarnpkg.com/@semantic-release/release-notes-generator/-/release-notes-generator-14.1.0.tgz#ac47bd214b48130e71578d9acefb1b1272854070" + integrity sha512-CcyDRk7xq+ON/20YNR+1I/jP7BYKICr1uKd1HHpROSnnTdGqOTburi4jcRiTYz0cpfhxSloQO3cGhnoot7IEkA== dependencies: - conventional-changelog-angular "^6.0.0" - conventional-changelog-writer "^6.0.0" - conventional-commits-filter "^4.0.0" - conventional-commits-parser "^5.0.0" + conventional-changelog-angular "^8.0.0" + conventional-changelog-writer "^8.0.0" + conventional-commits-filter "^5.0.0" + conventional-commits-parser "^6.0.0" debug "^4.0.0" get-stream "^7.0.0" - import-from "^4.0.0" + import-from-esm "^2.0.0" into-stream "^7.0.0" lodash-es "^4.17.21" - read-pkg-up "^10.0.0" + read-package-up "^11.0.0" "@semrel-extra/topo@^1.14.0": version "1.14.1" @@ -3448,13 +3590,6 @@ resolved "https://registry.yarnpkg.com/@sideway/pinpoint/-/pinpoint-2.0.0.tgz#cff8ffadc372ad29fd3f78277aeb29e632cc70df" integrity sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ== -"@sigstore/bundle@^1.1.0": - version "1.1.0" - resolved "https://registry.yarnpkg.com/@sigstore/bundle/-/bundle-1.1.0.tgz#17f8d813b09348b16eeed66a8cf1c3d6bd3d04f1" - integrity sha512-PFutXEy0SmQxYI4texPw3dd2KewuNqv7OuK1ZFtY2fM754yhvG2KdgwIhRnoEE2uHdtdGNQ8s0lb94dW9sELog== - dependencies: - "@sigstore/protobuf-specs" "^0.2.0" - "@sigstore/bundle@^2.3.2": version "2.3.2" resolved "https://registry.yarnpkg.com/@sigstore/bundle/-/bundle-2.3.2.tgz#ad4dbb95d665405fd4a7a02c8a073dbd01e4e95e" @@ -3462,29 +3597,32 @@ dependencies: "@sigstore/protobuf-specs" "^0.3.2" +"@sigstore/bundle@^4.0.0": + version "4.0.0" + resolved "https://registry.yarnpkg.com/@sigstore/bundle/-/bundle-4.0.0.tgz#854eda43eb6a59352037e49000177c8904572f83" + integrity sha512-NwCl5Y0V6Di0NexvkTqdoVfmjTaQwoLM236r89KEojGmq/jMls8S+zb7yOwAPdXvbwfKDlP+lmXgAL4vKSQT+A== + dependencies: + "@sigstore/protobuf-specs" "^0.5.0" + "@sigstore/core@^1.0.0", "@sigstore/core@^1.1.0": version "1.1.0" resolved "https://registry.yarnpkg.com/@sigstore/core/-/core-1.1.0.tgz#5583d8f7ffe599fa0a89f2bf289301a5af262380" integrity sha512-JzBqdVIyqm2FRQCulY6nbQzMpJJpSiJ8XXWMhtOX9eKgaXXpfNOF53lzQEjIydlStnd/eFtuC1dW4VYdD93oRg== -"@sigstore/protobuf-specs@^0.2.0": - version "0.2.1" - resolved "https://registry.yarnpkg.com/@sigstore/protobuf-specs/-/protobuf-specs-0.2.1.tgz#be9ef4f3c38052c43bd399d3f792c97ff9e2277b" - integrity sha512-XTWVxnWJu+c1oCshMLwnKvz8ZQJJDVOlciMfgpJBQbThVjKTCG8dwyhgLngBD2KN0ap9F/gOV8rFDEx8uh7R2A== +"@sigstore/core@^3.1.0", "@sigstore/core@^3.2.0": + version "3.2.0" + resolved "https://registry.yarnpkg.com/@sigstore/core/-/core-3.2.0.tgz#beaea6ea4d7d4caadadb7453168e35636b78830e" + integrity sha512-kxHrDQ9YgfrWUSXU0cjsQGv8JykOFZQ9ErNKbFPWzk3Hgpwu8x2hHrQ9IdA8yl+j9RTLTC3sAF3Tdq1IQCP4oA== "@sigstore/protobuf-specs@^0.3.2": version "0.3.3" resolved "https://registry.yarnpkg.com/@sigstore/protobuf-specs/-/protobuf-specs-0.3.3.tgz#7dd46d68b76c322873a2ef7581ed955af6f4dcde" integrity sha512-RpacQhBlwpBWd7KEJsRKcBQalbV28fvkxwTOJIqhIuDysMMaJW47V4OqW30iJB9uRpqOSxxEAQFdr8tTattReQ== -"@sigstore/sign@^1.0.0": - version "1.0.0" - resolved "https://registry.yarnpkg.com/@sigstore/sign/-/sign-1.0.0.tgz#6b08ebc2f6c92aa5acb07a49784cb6738796f7b4" - integrity sha512-INxFVNQteLtcfGmcoldzV6Je0sbbfh9I16DM4yJPw3j5+TFP8X6uIiA18mvpEa9yyeycAKgPmOA3X9hVdVTPUA== - dependencies: - "@sigstore/bundle" "^1.1.0" - "@sigstore/protobuf-specs" "^0.2.0" - make-fetch-happen "^11.0.1" +"@sigstore/protobuf-specs@^0.5.0": + version "0.5.0" + resolved "https://registry.yarnpkg.com/@sigstore/protobuf-specs/-/protobuf-specs-0.5.0.tgz#e5f029edcb3a4329853a09b603011e61043eb005" + integrity sha512-MM8XIwUjN2bwvCg1QvrMtbBmpcSHrkhFSCu1D11NyPvDQ25HEc4oG5/OcQfd/Tlf/OxmKWERDj0zGE23jQaMwA== "@sigstore/sign@^2.3.2": version "2.3.2" @@ -3498,13 +3636,17 @@ proc-log "^4.2.0" promise-retry "^2.0.1" -"@sigstore/tuf@^1.0.3": - version "1.0.3" - resolved "https://registry.yarnpkg.com/@sigstore/tuf/-/tuf-1.0.3.tgz#2a65986772ede996485728f027b0514c0b70b160" - integrity sha512-2bRovzs0nJZFlCN3rXirE4gwxCn97JNjMmwpecqlbgV9WcxX7WRuIrgzx/X7Ib7MYRbyUTpBYE0s2x6AmZXnlg== +"@sigstore/sign@^4.1.0": + version "4.1.1" + resolved "https://registry.yarnpkg.com/@sigstore/sign/-/sign-4.1.1.tgz#34765fe4a190d693340c0771a3d150a397bcfc55" + integrity sha512-Hf4xglukg0XXQ2RiD5vSoLjdPe8OBUPA8XeVjUObheuDcWdYWrnH/BNmxZCzkAy68MzmNCxXLeurJvs6hcP2OQ== dependencies: - "@sigstore/protobuf-specs" "^0.2.0" - tuf-js "^1.1.7" + "@gar/promise-retry" "^1.0.2" + "@sigstore/bundle" "^4.0.0" + "@sigstore/core" "^3.2.0" + "@sigstore/protobuf-specs" "^0.5.0" + make-fetch-happen "^15.0.4" + proc-log "^6.1.0" "@sigstore/tuf@^2.3.4": version "2.3.4" @@ -3514,6 +3656,14 @@ "@sigstore/protobuf-specs" "^0.3.2" tuf-js "^2.2.1" +"@sigstore/tuf@^4.0.1", "@sigstore/tuf@^4.0.2": + version "4.0.2" + resolved "https://registry.yarnpkg.com/@sigstore/tuf/-/tuf-4.0.2.tgz#7d2fa2abcd5afa5baf752671d14a1c6ed0ed3196" + integrity sha512-TCAzTy0xzdP79EnxSjq9KQ3eaR7+FmudLC6eRKknVKZbV7ZNlGLClAAQb/HMNJ5n2OBNk2GT1tEmU0xuPr+SLQ== + dependencies: + "@sigstore/protobuf-specs" "^0.5.0" + tuf-js "^4.1.0" + "@sigstore/verify@^1.2.1": version "1.2.1" resolved "https://registry.yarnpkg.com/@sigstore/verify/-/verify-1.2.1.tgz#c7e60241b432890dcb8bd8322427f6062ef819e1" @@ -3523,15 +3673,34 @@ "@sigstore/core" "^1.1.0" "@sigstore/protobuf-specs" "^0.3.2" +"@sigstore/verify@^3.1.0": + version "3.1.0" + resolved "https://registry.yarnpkg.com/@sigstore/verify/-/verify-3.1.0.tgz#4046d4186421db779501fe87fa5acaa5d4d21b08" + integrity sha512-mNe0Iigql08YupSOGv197YdHpPPr+EzDZmfCgMc7RPNaZTw5aLN01nBl6CHJOh3BGtnMIj83EeN4butBchc8Ag== + dependencies: + "@sigstore/bundle" "^4.0.0" + "@sigstore/core" "^3.1.0" + "@sigstore/protobuf-specs" "^0.5.0" + +"@simple-libs/stream-utils@^1.2.0": + version "1.2.0" + resolved "https://registry.yarnpkg.com/@simple-libs/stream-utils/-/stream-utils-1.2.0.tgz#5af724b826f1ab4d7f2826d31d3efccec124102b" + integrity sha512-KxXvfapcixpz6rVEB6HPjOUZT22yN6v0vI0urQSk1L8MlEWPDFCZkhw2xmkyoTGYeFw7tWTZd7e3lVzRZRN/EA== + "@sinclair/typebox@^0.27.8": version "0.27.8" resolved "https://registry.yarnpkg.com/@sinclair/typebox/-/typebox-0.27.8.tgz#6667fac16c436b5434a387a34dedb013198f6e6e" integrity sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA== -"@sindresorhus/merge-streams@^2.1.0": - version "2.3.0" - resolved "https://registry.yarnpkg.com/@sindresorhus/merge-streams/-/merge-streams-2.3.0.tgz#719df7fb41766bc143369eaa0dd56d8dc87c9958" - integrity sha512-LtoMMhxAlorcGhmFYI+LhPgbPZCkgP6ra1YL604EeF6U98pLlQ3iWIGMdWSC+vWmPBWBNgmDBAhnAobLROJmwg== +"@sindresorhus/is@^4.6.0": + version "4.6.0" + resolved "https://registry.yarnpkg.com/@sindresorhus/is/-/is-4.6.0.tgz#3c7c9c46e678feefe7a2e5bb609d3dbd665ffb3f" + integrity sha512-t09vSN3MdfsyCHoFcTRCH/iUtG7OJ0CsjzB8cjAmKc/va/kIgeDI/TxsigdncE/4be734m0cvIYwNaV4i2XqAw== + +"@sindresorhus/merge-streams@^4.0.0": + version "4.0.0" + resolved "https://registry.yarnpkg.com/@sindresorhus/merge-streams/-/merge-streams-4.0.0.tgz#abb11d99aeb6d27f1b563c38147a72d50058e339" + integrity sha512-tlqY9xq5ukxTUZBmoOp+m61cqwQD5pHJtFY3Mn8CA8ps6yghLH/Hw8UPdqg4OLmFW3IFlcXnQNmo/dh8HzXYIQ== "@sinonjs/commons@^3.0.0": version "3.0.0" @@ -3883,6 +4052,13 @@ dependencies: tslib "^2.6.2" +"@smithy/types@^4.13.1": + version "4.13.1" + resolved "https://registry.yarnpkg.com/@smithy/types/-/types-4.13.1.tgz#8aaf15bb0f42b4e7c93c87018a3678a06d74691d" + integrity sha512-787F3yzE2UiJIQ+wYW1CVg2odHjmaWLGksnKQHUrK/lYZSEcy1msuLVvxaR/sI2/aDe9U+TBuLsXnr3vod1g0g== + dependencies: + tslib "^2.6.2" + "@smithy/url-parser@^4.2.10", "@smithy/url-parser@^4.2.9": version "4.2.10" resolved "https://registry.yarnpkg.com/@smithy/url-parser/-/url-parser-4.2.10.tgz#9c123e4acd5074cc2f4626fc629762d9dbefca18" @@ -4082,24 +4258,11 @@ resolved "https://registry.yarnpkg.com/@tsconfig/node16/-/node16-1.0.4.tgz#0b92dcc0cc1c81f6f306a381f28e31b1a56536e9" integrity sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA== -"@tufjs/canonical-json@1.0.0": - version "1.0.0" - resolved "https://registry.yarnpkg.com/@tufjs/canonical-json/-/canonical-json-1.0.0.tgz#eade9fd1f537993bc1f0949f3aea276ecc4fab31" - integrity sha512-QTnf++uxunWvG2z3UFNzAoQPHxnSXOwtaI3iJ+AohhV+5vONuArPjJE7aPXPVXfXJsqrVbZBu9b81AJoSd09IQ== - "@tufjs/canonical-json@2.0.0": version "2.0.0" resolved "https://registry.yarnpkg.com/@tufjs/canonical-json/-/canonical-json-2.0.0.tgz#a52f61a3d7374833fca945b2549bc30a2dd40d0a" integrity sha512-yVtV8zsdo8qFHe+/3kw81dSLyF7D576A5cCFCi4X7B39tWT7SekaEFUnvnWJHz+9qO7qJTah1JbrDjWKqFtdWA== -"@tufjs/models@1.0.4": - version "1.0.4" - resolved "https://registry.yarnpkg.com/@tufjs/models/-/models-1.0.4.tgz#5a689630f6b9dbda338d4b208019336562f176ef" - integrity sha512-qaGV9ltJP0EO25YfFUPhxRVK0evXFIAGicsVXuRim4Ed9cjPxYhNnNJ49SFmbeLgtxpslIkX317IgpfcHPVj/A== - dependencies: - "@tufjs/canonical-json" "1.0.0" - minimatch "^9.0.0" - "@tufjs/models@2.0.1": version "2.0.1" resolved "https://registry.yarnpkg.com/@tufjs/models/-/models-2.0.1.tgz#e429714e753b6c2469af3212e7f320a6973c2812" @@ -4108,6 +4271,14 @@ "@tufjs/canonical-json" "2.0.0" minimatch "^9.0.4" +"@tufjs/models@4.1.0": + version "4.1.0" + resolved "https://registry.yarnpkg.com/@tufjs/models/-/models-4.1.0.tgz#494b39cf5e2f6855d80031246dd236d8086069b3" + integrity sha512-Y8cK9aggNRsqJVaKUlEYs4s7CvQ1b1ta2DVPyAimb0I2qhzjNk+A+mxvll/klL0RlfuIUei8BF7YWiua4kQqww== + dependencies: + "@tufjs/canonical-json" "2.0.0" + minimatch "^10.1.1" + "@tybys/wasm-util@^0.9.0": version "0.9.0" resolved "https://registry.yarnpkg.com/@tybys/wasm-util/-/wasm-util-0.9.0.tgz#3e75eb00604c8d6db470bf18c37b7d984a0e3355" @@ -4129,6 +4300,11 @@ dependencies: "@types/node" "*" +"@types/argparse@1.0.38": + version "1.0.38" + resolved "https://registry.yarnpkg.com/@types/argparse/-/argparse-1.0.38.tgz#a81fd8606d481f873a3800c6ebae4f1d768a56a9" + integrity sha512-ebDJ9b0e702Yr7pWgB0jzm+CX4Srzz8RcXtLJDJB+BSccqMa36uyH/zUsSYao5+BD1ytv3k3rPYCq4mAE1hsXA== + "@types/babel__core@^7.1.14": version "7.20.4" resolved "https://registry.yarnpkg.com/@types/babel__core/-/babel__core-7.20.4.tgz#26a87347e6c6f753b3668398e34496d6d9ac6ac0" @@ -4505,7 +4681,7 @@ dependencies: undici-types "~6.21.0" -"@types/normalize-package-data@^2.4.0", "@types/normalize-package-data@^2.4.1": +"@types/normalize-package-data@^2.4.0", "@types/normalize-package-data@^2.4.3", "@types/normalize-package-data@^2.4.4": version "2.4.4" resolved "https://registry.yarnpkg.com/@types/normalize-package-data/-/normalize-package-data-2.4.4.tgz#56e2cc26c397c038fab0e3a917a12d5c5909e901" integrity sha512-37i+OaWTh9qeK4LSHPsyRC7NahnGotNuZvjLSgcPzblpHB3rrCJxAOgI5gCdKm7coonsaX1Of0ILiTcnZjbfxA== @@ -4551,6 +4727,13 @@ "@types/mime" "^1" "@types/node" "*" +"@types/sequelize@^6.12.0": + version "6.12.0" + resolved "https://registry.yarnpkg.com/@types/sequelize/-/sequelize-6.12.0.tgz#91b3ab13830b90dbac3632c8e2adc071c7c5b527" + integrity sha512-rJRUf3AkP356HicHSjM2I7+d0FiLvOPZdDxFY0bhmKiZnGOuZZ1cUcDlPyyin9b3pYFvxZfJUTC3nGXLoi6DWA== + dependencies: + sequelize "*" + "@types/serve-static@*": version "1.15.5" resolved "https://registry.yarnpkg.com/@types/serve-static/-/serve-static-1.15.5.tgz#15e67500ec40789a1e8c9defc2d32a896f05b033" @@ -4787,7 +4970,7 @@ JSONStream@^1.3.5: jsonparse "^1.2.0" through ">=2.2.7 <3" -abbrev@1, abbrev@^1.0.0: +abbrev@1: version "1.1.1" resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.1.tgz#f8f2c887ad10bf67f634f005b6987fed3179aac8" integrity sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q== @@ -4797,6 +4980,11 @@ abbrev@^2.0.0: resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-2.0.0.tgz#cf59829b8b4f03f89dda2771cb7f3653828c89bf" integrity sha512-6/mh1E2u2YgEsCHdY0Yx5oW+61gZU+1vXaoiHHrpKeuRNNgFvS+/jrwHiQhB5apAf5oB7UB7E19ol2R2LKH8hQ== +abbrev@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-4.0.0.tgz#ec933f0e27b6cd60e89b5c6b2a304af42209bb05" + integrity sha512-a1wflyaL0tHtJSmLSOVybYhy22vRih4eduhhrkcjgrWGnRfrZtovJ2FRjxuTtkkj47O/baf0R86QU5OuYpz8fA== + abort-controller@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/abort-controller/-/abort-controller-3.0.0.tgz#eaf54d53b62bae4138e809ca225c8439a6efb392" @@ -4876,13 +5064,6 @@ agentkeepalive@^4.1.3: dependencies: humanize-ms "^1.2.1" -agentkeepalive@^4.2.1: - version "4.6.0" - resolved "https://registry.yarnpkg.com/agentkeepalive/-/agentkeepalive-4.6.0.tgz#35f73e94b3f40bf65f105219c623ad19c136ea6a" - integrity sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ== - dependencies: - humanize-ms "^1.2.1" - aggregate-error@^3.0.0: version "3.1.0" resolved "https://registry.yarnpkg.com/aggregate-error/-/aggregate-error-3.1.0.tgz#92670ff50f5359bdb7a3e0d40d0ec30c5737687a" @@ -4899,6 +5080,11 @@ aggregate-error@^5.0.0: clean-stack "^5.2.0" indent-string "^5.0.0" +ajv-draft-04@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/ajv-draft-04/-/ajv-draft-04-1.0.0.tgz#3b64761b268ba0b9e668f0b41ba53fce0ad77fc8" + integrity sha512-mv00Te6nmYbRp5DCwclxtt7yV/joXJPGS7nM+97GdxvuttCOfgI3K4U25zboyeX0O+myI8ERluxQe5wljMmVIw== + ajv-formats@^2.1.1: version "2.1.1" resolved "https://registry.yarnpkg.com/ajv-formats/-/ajv-formats-2.1.1.tgz#6e669400659eb74973bbf2e33327180a0996b520" @@ -4906,7 +5092,7 @@ ajv-formats@^2.1.1: dependencies: ajv "^8.0.0" -ajv-formats@^3.0.1: +ajv-formats@^3.0.1, ajv-formats@~3.0.1: version "3.0.1" resolved "https://registry.yarnpkg.com/ajv-formats/-/ajv-formats-3.0.1.tgz#3d5dc762bca17679c3c2ea7e90ad6b7532309578" integrity sha512-8iUql50EUR+uUcdRQ3HDqa6EVyo3docL8g5WJ3FNcWmu62IbkGUue/pEyLBW8VGKKucTPgqeks4fIU1DA4yowQ== @@ -4933,6 +5119,16 @@ ajv@^8.0.0, ajv@^8.1.0, ajv@^8.10.0, ajv@^8.11.0, ajv@^8.17.1: json-schema-traverse "^1.0.0" require-from-string "^2.0.2" +ajv@~8.13.0: + version "8.13.0" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-8.13.0.tgz#a3939eaec9fb80d217ddf0c3376948c023f28c91" + integrity sha512-PRA911Blj99jR5RMeTunVbNXMF6Lp4vZXnk5GQjcnUWUTsrXtekg/pnmFFI2u/I36Y/2bITGS30GZCXei6uNkA== + dependencies: + fast-deep-equal "^3.1.3" + json-schema-traverse "^1.0.0" + require-from-string "^2.0.2" + uri-js "^4.4.1" + ansi-colors@^4.1.1: version "4.1.3" resolved "https://registry.yarnpkg.com/ansi-colors/-/ansi-colors-4.1.3.tgz#37611340eb2243e70cc604cad35d63270d48781b" @@ -4950,10 +5146,12 @@ ansi-escapes@^4.2.1, ansi-escapes@^4.3.2: dependencies: type-fest "^0.21.3" -ansi-escapes@^6.2.0: - version "6.2.1" - resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-6.2.1.tgz#76c54ce9b081dad39acec4b5d53377913825fb0f" - integrity sha512-4nJ3yixlEthEJ9Rk4vPcdBRkZvQZlYyu8j4/Mqz5sgIkddmEnH2Yj2ZrnP9S3tQOvSNRUIgVNF/1yPpRAGNRig== +ansi-escapes@^7.0.0: + version "7.3.0" + resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-7.3.0.tgz#5395bb74b2150a4a1d6e3c2565f4aeca78d28627" + integrity sha512-BvU8nYgGQBxcmMuEeUEmNTvrMVjJNSH7RgW24vXexN4Ven6qCvy4TntnvlnwnMLTVlcRQQdbRY8NKnaIoeWDNg== + dependencies: + environment "^1.0.0" ansi-regex@^2.0.0: version "2.1.1" @@ -4975,7 +5173,7 @@ ansi-regex@^5.0.1: resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.1.tgz#082cb2c89c9fe8659a311a53bd6a4dc5301db304" integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ== -ansi-regex@^6.2.2: +ansi-regex@^6.1.0, ansi-regex@^6.2.2: version "6.2.2" resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-6.2.2.tgz#60216eea464d864597ce2832000738a0589650c1" integrity sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg== @@ -5004,7 +5202,7 @@ ansi-styles@^5.0.0: resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-5.2.0.tgz#07449690ad45777d1924ac2abb2fc8895dba836b" integrity sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA== -ansi-styles@^6.1.0: +ansi-styles@^6.2.1: version "6.2.3" resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-6.2.3.tgz#c044d5dcc521a076413472597a1acb1f103c4041" integrity sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg== @@ -5024,6 +5222,11 @@ antlr4@^4.13.1-patch-1: resolved "https://registry.yarnpkg.com/antlr4/-/antlr4-4.13.1-patch-1.tgz#946176f863f890964a050c4f18c47fd6f7e57602" integrity sha512-OjFLWWLzDMV9rdFhpvroCWR4ooktNg9/nvVYSA5z28wuVpU36QUNuioR1XLnQtcjVlf8npjyz593PxnU/f/Cow== +any-promise@^1.0.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/any-promise/-/any-promise-1.3.0.tgz#abc6afeedcea52e809cdc0376aed3ce39635d17f" + integrity sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A== + anymatch@^3.0.3, anymatch@~3.1.2: version "3.1.3" resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-3.1.3.tgz#790c58b19ba1720a84205b57c618d5ad8524973e" @@ -5134,17 +5337,12 @@ are-we-there-yet@^3.0.0: delegates "^1.0.0" readable-stream "^3.6.0" -are-we-there-yet@^4.0.0: - version "4.0.2" - resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-4.0.2.tgz#aed25dd0eae514660d49ac2b2366b175c614785a" - integrity sha512-ncSWAawFhKMJDTdoAeOV+jyW1VCMj5QIAwULIBV0SSR7B/RLPPEQiknKcg/RIIZlUQrxELpsxMiTUoAQ4sIUyg== - arg@^4.1.0: version "4.1.3" resolved "https://registry.yarnpkg.com/arg/-/arg-4.1.3.tgz#269fc7ad5b8e42cb63c896d5666017261c144089" integrity sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA== -argparse@^1.0.7: +argparse@^1.0.7, argparse@~1.0.9: version "1.0.10" resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.10.tgz#bcd6791ea5ae09725e17e5ad988134cd40b3d911" integrity sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg== @@ -5453,6 +5651,11 @@ balanced-match@^1.0.0: resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee" integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== +balanced-match@^4.0.2: + version "4.0.4" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-4.0.4.tgz#bfb10662feed8196a2c62e7c68e17720c274179a" + integrity sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA== + base64-js@^1.3.1, base64-js@^1.5.1: version "1.5.1" resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.5.1.tgz#1b1b440160a5bf7ad40b650f095963481903930a" @@ -5475,7 +5678,12 @@ before-after-hook@^2.2.0: resolved "https://registry.yarnpkg.com/before-after-hook/-/before-after-hook-2.2.3.tgz#c51e809c81a4e354084422b9b26bad88249c517c" integrity sha512-NzUnlZexiaH/46WDhANlyR2bXRopNg4F/zuSA3OpZnllCUgRaOF2znDioDWrmbNVsuZk6l9pMquQB38cfBZwkQ== -bin-links@^4.0.1, bin-links@^4.0.4: +before-after-hook@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/before-after-hook/-/before-after-hook-4.0.0.tgz#cf1447ab9160df6a40f3621da64d6ffc36050cb9" + integrity sha512-q6tR3RPqIB1pMiTRMFcZwuG5T8vwp+vUvEG0vuI6B+Rikh5BfPp2fQ82c925FOs+b0lcFQ8CFrL+KbilfZFhOQ== + +bin-links@^4.0.4: version "4.0.4" resolved "https://registry.yarnpkg.com/bin-links/-/bin-links-4.0.4.tgz#c3565832b8e287c85f109a02a17027d152a58a63" integrity sha512-cMtq4W5ZsEwcutJrVId+a/tjt8GSbS+h0oNkdl6+6rBuEv8Ot33Bevj5KPm40t309zuhVic8NjpuL42QCiJWWA== @@ -5485,6 +5693,17 @@ bin-links@^4.0.1, bin-links@^4.0.4: read-cmd-shim "^4.0.0" write-file-atomic "^5.0.0" +bin-links@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/bin-links/-/bin-links-6.0.0.tgz#0245114374463a694e161a1e65417e7939ab2eba" + integrity sha512-X4CiKlcV2GjnCMwnKAfbVWpHa++65th9TuzAEYtZoATiOE2DQKhSp4CJlyLoTqdhBKlXjpXjCTYPNNFS33Fi6w== + dependencies: + cmd-shim "^8.0.0" + npm-normalize-package-bin "^5.0.0" + proc-log "^6.0.0" + read-cmd-shim "^6.0.0" + write-file-atomic "^7.0.0" + binary-extensions@^2.0.0: version "2.2.0" resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-2.2.0.tgz#75f502eeaf9ffde42fc98829645be4ea76bd9e2d" @@ -5495,6 +5714,11 @@ binary-extensions@^2.2.0: resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-2.3.0.tgz#f6e14a97858d327252200242d4ccfe522c445522" integrity sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw== +binary-extensions@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-3.1.0.tgz#be31cd3aa5c7e3dc42c501e57d4fff87d665e17e" + integrity sha512-Jvvd9hy1w+xUad8+ckQsWA/V1AoyubOvqn0aygjMOVM4BfIaRav1NFS3LsTSDaV4n4FtcCtQXvzep1E6MboqwQ== + bindings@^1.5.0: version "1.5.0" resolved "https://registry.yarnpkg.com/bindings/-/bindings-1.5.0.tgz#10353c9e945334bc0511a6d90b38fbc7c9c504df" @@ -5612,7 +5836,14 @@ brace-expansion@^2.0.1, brace-expansion@^2.0.2: dependencies: balanced-match "^1.0.0" -braces@^3.0.1, braces@^3.0.3, braces@~3.0.2: +brace-expansion@^5.0.2: + version "5.0.4" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-5.0.4.tgz#614daaecd0a688f660bbbc909a8748c3d80d4336" + integrity sha512-h+DEnpVvxmfVefa4jFbCf5HdH5YMDXRsmKflpf1pILZWRFlTbJpxeU55nJl4Smt5HQaGzg1o6RHFPJaOqnmBDg== + dependencies: + balanced-match "^4.0.2" + +braces@^3.0.3, braces@~3.0.2: version "3.0.3" resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789" integrity sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA== @@ -5755,41 +5986,17 @@ cacache@^15.2.0: tar "^6.0.2" unique-filename "^1.1.1" -cacache@^16.1.0: - version "16.1.3" - resolved "https://registry.yarnpkg.com/cacache/-/cacache-16.1.3.tgz#a02b9f34ecfaf9a78c9f4bc16fceb94d5d67a38e" - integrity sha512-/+Emcj9DAXxX4cwlLmRI9c166RuL3w30zp4R7Joiv2cQTtTtA+jeuCAjH3ZlGnYS3tKENSrKhAzVVP9GVyzeYQ== - dependencies: - "@npmcli/fs" "^2.1.0" - "@npmcli/move-file" "^2.0.0" - chownr "^2.0.0" - fs-minipass "^2.1.0" - glob "^8.0.1" - infer-owner "^1.0.4" - lru-cache "^7.7.1" - minipass "^3.1.6" - minipass-collect "^1.0.2" - minipass-flush "^1.0.5" - minipass-pipeline "^1.2.4" - mkdirp "^1.0.4" - p-map "^4.0.0" - promise-inflight "^1.0.1" - rimraf "^3.0.2" - ssri "^9.0.0" - tar "^6.1.11" - unique-filename "^2.0.0" - -cacache@^17.0.0, cacache@^17.0.4, cacache@^17.1.4: - version "17.1.4" - resolved "https://registry.yarnpkg.com/cacache/-/cacache-17.1.4.tgz#b3ff381580b47e85c6e64f801101508e26604b35" - integrity sha512-/aJwG2l3ZMJ1xNAnqbMpA40of9dj/pIH3QfiuQSqjfPJF747VR0J/bHn+/KdNnHKc6XQcWt/AfRSBft82W1d2A== +cacache@^18.0.0, cacache@^18.0.3: + version "18.0.4" + resolved "https://registry.yarnpkg.com/cacache/-/cacache-18.0.4.tgz#4601d7578dadb59c66044e157d02a3314682d6a5" + integrity sha512-B+L5iIa9mgcjLbliir2th36yEwPftrzteHYujzsx3dFP/31GCHcIeS8f5MGd80odLOjaOvSpU3EEAmRQptkxLQ== dependencies: "@npmcli/fs" "^3.1.0" fs-minipass "^3.0.0" glob "^10.2.2" - lru-cache "^7.7.1" + lru-cache "^10.0.1" minipass "^7.0.3" - minipass-collect "^1.0.2" + minipass-collect "^2.0.1" minipass-flush "^1.0.5" minipass-pipeline "^1.2.4" p-map "^4.0.0" @@ -5797,23 +6004,21 @@ cacache@^17.0.0, cacache@^17.0.4, cacache@^17.1.4: tar "^6.1.11" unique-filename "^3.0.0" -cacache@^18.0.0, cacache@^18.0.3: - version "18.0.4" - resolved "https://registry.yarnpkg.com/cacache/-/cacache-18.0.4.tgz#4601d7578dadb59c66044e157d02a3314682d6a5" - integrity sha512-B+L5iIa9mgcjLbliir2th36yEwPftrzteHYujzsx3dFP/31GCHcIeS8f5MGd80odLOjaOvSpU3EEAmRQptkxLQ== +cacache@^20.0.0, cacache@^20.0.1, cacache@^20.0.4: + version "20.0.4" + resolved "https://registry.yarnpkg.com/cacache/-/cacache-20.0.4.tgz#9b547dc3db0c1f87cba6dbbff91fb17181b4bbb1" + integrity sha512-M3Lab8NPYlZU2exsL3bMVvMrMqgwCnMWfdZbK28bn3pK6APT/Te/I8hjRPNu1uwORY9a1eEQoifXbKPQMfMTOA== dependencies: - "@npmcli/fs" "^3.1.0" + "@npmcli/fs" "^5.0.0" fs-minipass "^3.0.0" - glob "^10.2.2" - lru-cache "^10.0.1" + glob "^13.0.0" + lru-cache "^11.1.0" minipass "^7.0.3" minipass-collect "^2.0.1" minipass-flush "^1.0.5" minipass-pipeline "^1.2.4" - p-map "^4.0.0" - ssri "^10.0.0" - tar "^6.1.11" - unique-filename "^3.0.0" + p-map "^7.0.2" + ssri "^13.0.0" cache-content-type@^1.0.0: version "1.0.1" @@ -5957,7 +6162,7 @@ chalk@^5.2.0: resolved "https://registry.yarnpkg.com/chalk/-/chalk-5.3.0.tgz#67c20a7ebef70e7f3970a01f90fa210cb6860385" integrity sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w== -chalk@^5.3.0: +chalk@^5.4.1, chalk@^5.6.2: version "5.6.2" resolved "https://registry.yarnpkg.com/chalk/-/chalk-5.6.2.tgz#b1238b6e23ea337af71c7f8a295db5af0c158aea" integrity sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA== @@ -6037,6 +6242,11 @@ chownr@^2.0.0: resolved "https://registry.yarnpkg.com/chownr/-/chownr-2.0.0.tgz#15bfbe53d2eab4cf70f18a8cd68ebe5b3cb1dece" integrity sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ== +chownr@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/chownr/-/chownr-3.0.0.tgz#9855e64ecd240a9cc4267ce8a4aa5d24a1da15e4" + integrity sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g== + ci-info@^3.2.0: version "3.9.0" resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-3.9.0.tgz#4279a62028a7b1f262f3473fc9605f5e218c59b4" @@ -6047,12 +6257,15 @@ ci-info@^4.0.0: resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-4.2.0.tgz#cbd21386152ebfe1d56f280a3b5feccbd96764c7" integrity sha512-cYY9mypksY8NRqgDB1XD1RiJL338v/551niynFTGkZOO2LHuB2OmOYxDIe/ttN9AHwrqdum1360G3ald0W9kCg== -cidr-regex@^3.1.1: - version "3.1.1" - resolved "https://registry.yarnpkg.com/cidr-regex/-/cidr-regex-3.1.1.tgz#ba1972c57c66f61875f18fd7dd487469770b571d" - integrity sha512-RBqYd32aDwbCMFJRL6wHOlDNYJsPNTt8vC82ErHF5vKt8QQzxm1FrkW8s/R5pVrXMf17sba09Uoy91PKiddAsw== - dependencies: - ip-regex "^4.1.0" +ci-info@^4.4.0: + version "4.4.0" + resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-4.4.0.tgz#7d54eff9f54b45b62401c26032696eb59c8bd18c" + integrity sha512-77PSwercCZU2Fc4sX94eF8k8Pxte6JAwL4/ICZLFjJLqegs7kCuAsqqj/70NQF6TvDpgFjkubQB2FW2ZZddvQg== + +cidr-regex@^5.0.1: + version "5.0.3" + resolved "https://registry.yarnpkg.com/cidr-regex/-/cidr-regex-5.0.3.tgz#6ae2f772d93c9cc941f85a4d597e4f97e95610af" + integrity sha512-zfPT2uurEroxXqefaL2L7/fT5ED2XTutC6UwFbSZfqSOk1vk5VFY6xa6/R6pBxB4Uc8MNPbRW5ykqutFG5P5ww== cjs-module-lexer@^1.0.0: version "1.2.3" @@ -6089,14 +6302,6 @@ cli-color@^2.0.0: memoizee "^0.4.15" timers-ext "^0.1.7" -cli-columns@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/cli-columns/-/cli-columns-4.0.0.tgz#9fe4d65975238d55218c41bd2ed296a7fa555646" - integrity sha512-XW2Vg+w+L9on9wtwKpyzluIPCWXjaBahI7mTcYjx+BVIYD9c3yqcv/yKC7CmdCZat4rq2yiE1UMSJC5ivKfMtQ== - dependencies: - string-width "^4.2.3" - strip-ansi "^6.0.1" - cli-cursor@3.1.0, cli-cursor@^3.0.0, cli-cursor@^3.1.0: version "3.1.0" resolved "https://registry.yarnpkg.com/cli-cursor/-/cli-cursor-3.1.0.tgz#264305a7ae490d1d03bf0c9ba7c925d1753af307" @@ -6111,6 +6316,18 @@ cli-cursor@^2.1.0: dependencies: restore-cursor "^2.0.0" +cli-highlight@^2.1.11: + version "2.1.11" + resolved "https://registry.yarnpkg.com/cli-highlight/-/cli-highlight-2.1.11.tgz#49736fa452f0aaf4fae580e30acb26828d2dc1bf" + integrity sha512-9KDcoEVwyUXrjcJNvHD0NFc/hiwe/WPVYIleQh2O1N2Zro5gWJZ/K+3DGn8w8P/F6FxOgzyC5bxDyHIgCSPhGg== + dependencies: + chalk "^4.0.0" + highlight.js "^10.7.1" + mz "^2.4.0" + parse5 "^5.1.1" + parse5-htmlparser2-tree-adapter "^6.0.0" + yargs "^16.0.0" + cli-progress@^3.12.0: version "3.12.0" resolved "https://registry.yarnpkg.com/cli-progress/-/cli-progress-3.12.0.tgz#807ee14b66bcc086258e444ad0f19e7d42577942" @@ -6133,7 +6350,7 @@ cli-spinners@^2.5.0: resolved "https://registry.yarnpkg.com/cli-spinners/-/cli-spinners-2.9.1.tgz#9c0b9dad69a6d47cbb4333c14319b060ed395a35" integrity sha512-jHgecW0pxkonBJdrKsqxgRX9AcG+u/5k0Q7WPDfi8AogLAdwxEkyYYNWwZ5GvVFoFx2uiY1eNcSK00fh+1+FyQ== -cli-table3@^0.6.3: +cli-table3@^0.6.5: version "0.6.5" resolved "https://registry.yarnpkg.com/cli-table3/-/cli-table3-0.6.5.tgz#013b91351762739c16a9567c21a04632e449bf2f" integrity sha512-+W/5efTR7y5HRD7gACw9yQjqMVvEMLBHmboM/kPWam+H+Hmyrgjh6YncVKK122YZkXrLudzTuAukUw9FnMf7IQ== @@ -6191,6 +6408,15 @@ cliui@^8.0.1: strip-ansi "^6.0.1" wrap-ansi "^7.0.0" +cliui@^9.0.1: + version "9.0.1" + resolved "https://registry.yarnpkg.com/cliui/-/cliui-9.0.1.tgz#6f7890f386f6f1f79953adc1f78dec46fcc2d291" + integrity sha512-k7ndgKhwoQveBL+/1tqGJYNz097I7WOvwbmmU2AR5+magtbjPWQTS1C5vzGkBC8Ym8UWRzfKUzUUqFLypY4Q+w== + dependencies: + string-width "^7.2.0" + strip-ansi "^7.1.0" + wrap-ansi "^9.0.0" + clone-deep@4.0.1: version "4.0.1" resolved "https://registry.yarnpkg.com/clone-deep/-/clone-deep-4.0.1.tgz#c19fd9bdbbf85942b4fd979c84dcf7d5f07c2387" @@ -6210,6 +6436,11 @@ cmd-shim@6.0.3, cmd-shim@^6.0.0: resolved "https://registry.yarnpkg.com/cmd-shim/-/cmd-shim-6.0.3.tgz#c491e9656594ba17ac83c4bd931590a9d6e26033" integrity sha512-FMabTRlc5t5zjdenF6mS0MBeFZm0XqHqeOkcskKFb/LYCcRQ5fVgLOHVc4Lq9CqABd9zhjwPjMBCJvMCziSVtA== +cmd-shim@^8.0.0: + version "8.0.0" + resolved "https://registry.yarnpkg.com/cmd-shim/-/cmd-shim-8.0.0.tgz#5be238f22f40faf3f7e8c92edc3f5d354f7657b2" + integrity sha512-Jk/BK6NCapZ58BKUxlSI+ouKRbjH1NLZCgJkYoab+vEHUY3f6OzpNBN9u7HFSv9J6TRDGs4PLOHezoKGaFRSCA== + co-body@^6.2.0: version "6.2.0" resolved "https://registry.yarnpkg.com/co-body/-/co-body-6.2.0.tgz#afd776d60e5659f4eee862df83499698eb1aea1b" @@ -6281,7 +6512,7 @@ colors@1.0.3: resolved "https://registry.yarnpkg.com/colors/-/colors-1.0.3.tgz#0433f44d809680fdeb60ed260f1b0c262e82a40b" integrity sha512-pFGrxThWcWQ2MsAz6RtgeWe4NK2kUE1WfsrvvlctdII745EW9I0yflqhe7++M5LEc7bV2c/9/5zc8sFcpL0Drw== -columnify@1.6.0, columnify@^1.6.0: +columnify@1.6.0: version "1.6.0" resolved "https://registry.yarnpkg.com/columnify/-/columnify-1.6.0.tgz#6989531713c9008bb29735e61e37acf5bd553cf3" integrity sha512-lomjuFZKfM6MSAnV9aCZC9sc0qGbmZdfygNv+nCpqVkSKdCxCklLtd16O0EILGkImHw9ZpHkAnHaB+8Zxq5W6Q== @@ -6318,6 +6549,11 @@ common-ancestor-path@^1.0.1: resolved "https://registry.yarnpkg.com/common-ancestor-path/-/common-ancestor-path-1.0.1.tgz#4f7d2d1394d91b7abdf51871c62f71eadb0182a7" integrity sha512-L3sHRo1pXXEqX8VU28kfgUY+YGsk09hPqZiZmLacNib6XNTCM8ubYeT7ryXQw8asB1sKgcU5lkB7ONug08aB8w== +common-ancestor-path@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/common-ancestor-path/-/common-ancestor-path-2.0.0.tgz#f1d361aea9236aad5b92a0ff5b9df1422dd360ff" + integrity sha512-dnN3ibLeoRf2HNC+OlCiNc5d2zxbLJXOtiZUudNFSXZrNSydxcCsSpRzXwfu7BBWCIfHPw+xTayeBvJCP/D8Ng== + common-tags@^1.4.0: version "1.8.2" resolved "https://registry.yarnpkg.com/common-tags/-/common-tags-1.8.2.tgz#94ebb3c076d26032745fd54face7f688ef5ac9c6" @@ -6412,6 +6648,13 @@ conventional-changelog-angular@^6.0.0: dependencies: compare-func "^2.0.0" +conventional-changelog-angular@^8.0.0: + version "8.3.0" + resolved "https://registry.yarnpkg.com/conventional-changelog-angular/-/conventional-changelog-angular-8.3.0.tgz#e344def5f3d3c4f3242dea3c4e12c402e0d6832c" + integrity sha512-DOuBwYSqWzfwuRByY9O4oOIvDlkUCTDzfbOgcSbkY+imXXj+4tmrEFao3K+FxemClYfYnZzsvudbwrhje9VHDA== + dependencies: + compare-func "^2.0.0" + conventional-changelog-conventionalcommits@^6.1.0: version "6.1.0" resolved "https://registry.yarnpkg.com/conventional-changelog-conventionalcommits/-/conventional-changelog-conventionalcommits-6.1.0.tgz#3bad05f4eea64e423d3d90fc50c17d2c8cf17652" @@ -6454,6 +6697,17 @@ conventional-changelog-writer@^6.0.0: semver "^7.0.0" split "^1.0.1" +conventional-changelog-writer@^8.0.0: + version "8.4.0" + resolved "https://registry.yarnpkg.com/conventional-changelog-writer/-/conventional-changelog-writer-8.4.0.tgz#600bfb4c98ccf0a31baddf8a1305f229072faf1f" + integrity sha512-HHBFkk1EECxxmCi4CTu091iuDpQv5/OavuCUAuZmrkWpmYfyD816nom1CvtfXJ/uYfAAjavgHvXHX291tSLK8g== + dependencies: + "@simple-libs/stream-utils" "^1.2.0" + conventional-commits-filter "^5.0.0" + handlebars "^4.7.7" + meow "^13.0.0" + semver "^7.5.2" + conventional-commits-filter@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/conventional-commits-filter/-/conventional-commits-filter-3.0.0.tgz#bf1113266151dd64c49cd269e3eb7d71d7015ee2" @@ -6462,10 +6716,10 @@ conventional-commits-filter@^3.0.0: lodash.ismatch "^4.4.0" modify-values "^1.0.1" -conventional-commits-filter@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/conventional-commits-filter/-/conventional-commits-filter-4.0.0.tgz#845d713e48dc7d1520b84ec182e2773c10c7bf7f" - integrity sha512-rnpnibcSOdFcdclpFwWa+pPlZJhXE7l+XK04zxhbWrhgpR96h33QLz8hITTXbcYICxVr3HZFtbtUAQ+4LdBo9A== +conventional-commits-filter@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/conventional-commits-filter/-/conventional-commits-filter-5.0.0.tgz#72811f95d379e79d2d39d5c0c53c9351ef284e86" + integrity sha512-tQMagCOC59EVgNZcC5zl7XqO30Wki9i9J3acbUvkaosCT6JX3EeFwJD7Qqp4MCikRnzS18WXV3BLIQ66ytu6+Q== conventional-commits-parser@^4.0.0: version "4.0.0" @@ -6477,15 +6731,13 @@ conventional-commits-parser@^4.0.0: meow "^8.1.2" split2 "^3.2.2" -conventional-commits-parser@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/conventional-commits-parser/-/conventional-commits-parser-5.0.0.tgz#57f3594b81ad54d40c1b4280f04554df28627d9a" - integrity sha512-ZPMl0ZJbw74iS9LuX9YIAiW8pfM5p3yh2o/NbXHbkFuZzY5jvdi5jFycEOkmBW5H5I7nA+D6f3UcsCLP2vvSEA== +conventional-commits-parser@^6.0.0: + version "6.3.0" + resolved "https://registry.yarnpkg.com/conventional-commits-parser/-/conventional-commits-parser-6.3.0.tgz#fc170753ca66f31940a438539bf48e4406ac54b5" + integrity sha512-RfOq/Cqy9xV9bOA8N+ZH6DlrDR+5S3Mi0B5kACEjESpE+AviIpAptx9a9cFpWCCvgRtWT+0BbUw+e1BZfts9jg== dependencies: - JSONStream "^1.3.5" - is-text-path "^2.0.0" - meow "^12.0.1" - split2 "^4.0.0" + "@simple-libs/stream-utils" "^1.2.0" + meow "^13.0.0" conventional-recommended-bump@7.0.1: version "7.0.1" @@ -6500,6 +6752,11 @@ conventional-recommended-bump@7.0.1: git-semver-tags "^5.0.0" meow "^8.1.2" +convert-hrtime@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/convert-hrtime/-/convert-hrtime-5.0.0.tgz#f2131236d4598b95de856926a67100a0a97e9fa3" + integrity sha512-lOETlkIeYSJWcbbcvjRKGxVMXJR+8+OQb/mTPbA4ObPMytYIsUbuOE0Jzy60hjARYszq1id0j8KgVhC+WGZVTg== + convert-source-map@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-2.0.0.tgz#4b560f649fc4e918dd0ab75cf4961e8bc882d82a" @@ -6596,6 +6853,16 @@ cosmiconfig@^8.0.0, cosmiconfig@^8.3.6: parse-json "^5.2.0" path-type "^4.0.0" +cosmiconfig@^9.0.0: + version "9.0.1" + resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-9.0.1.tgz#df110631a8547b5d1a98915271986f06e3011379" + integrity sha512-hr4ihw+DBqcvrsEDioRO31Z17x71pUYoNe/4h6Z0wB72p7MU7/9gH8Q3s12NFhHPfYBBOV3qyfUxmr/Yn3shnQ== + dependencies: + env-paths "^2.2.1" + import-fresh "^3.3.0" + js-yaml "^4.1.0" + parse-json "^5.2.0" + cpu-features@~0.0.8, cpu-features@~0.0.9: version "0.0.9" resolved "https://registry.yarnpkg.com/cpu-features/-/cpu-features-0.0.9.tgz#5226b92f0f1c63122b0a3eb84cb8335a4de499fc" @@ -6936,10 +7203,10 @@ diff@^4.0.1: resolved "https://registry.yarnpkg.com/diff/-/diff-4.0.2.tgz#60f3aecb89d5fae520c11aa19efc2bb982aade7d" integrity sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A== -diff@^5.1.0: - version "5.2.2" - resolved "https://registry.yarnpkg.com/diff/-/diff-5.2.2.tgz#0a4742797281d09cfa699b79ea32d27723623bad" - integrity sha512-vtcDfH3TOjP8UekytvnHH1o1P4FcUdt4eQ1Y+Abap1tk/OB2MWQvcwS2ClCd1zuIhc3JKOx6p3kod8Vfys3E+A== +diff@^8.0.2: + version "8.0.4" + resolved "https://registry.yarnpkg.com/diff/-/diff-8.0.4.tgz#4f5baf3188b9b2431117b962eb20ba330fadf696" + integrity sha512-DPi0FmjiSU5EvQV0++GFDOJ9ASQUVFh5kD+OzOnYdi7n3Wpm9hWWGfB/O2blfHcMVTL5WkQXSnRiK9makhrcnw== dir-glob@^3.0.0, dir-glob@^3.0.1: version "3.0.1" @@ -7064,11 +7331,6 @@ duplexer2@~0.1.0: dependencies: readable-stream "^2.0.2" -eastasianwidth@^0.2.0: - version "0.2.0" - resolved "https://registry.yarnpkg.com/eastasianwidth/-/eastasianwidth-0.2.0.tgz#696ce2ec0aa0e6ea93a397ffcf24aa7840c827cb" - integrity sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA== - ecdsa-sig-formatter@1.0.11: version "1.0.11" resolved "https://registry.yarnpkg.com/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz#ae0f0fa2d85045ef14a817daa3ce9acd0489e5bf" @@ -7098,20 +7360,25 @@ electron-to-chromium@^1.5.218: resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.5.220.tgz#a9853fa5edcf51f4c7db369144377cf31d783b8f" integrity sha512-TWXijEwR1ggr4BdAKrb1nMNqYLTx1/4aD1fkeZU+FVJGTKu53/T7UyHKXlqEX3Ub02csyHePbHmkvnrjcaYzMA== -emittery@^0.13.1: +emittery@^0.13.0, emittery@^0.13.1: version "0.13.1" resolved "https://registry.yarnpkg.com/emittery/-/emittery-0.13.1.tgz#c04b8c3457490e0847ae51fced3af52d338e3dad" integrity sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ== +emoji-regex@^10.3.0: + version "10.6.0" + resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-10.6.0.tgz#bf3d6e8f7f8fd22a65d9703475bc0147357a6b0d" + integrity sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A== + emoji-regex@^8.0.0: version "8.0.0" resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37" integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A== -emoji-regex@^9.2.2: - version "9.2.2" - resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-9.2.2.tgz#840c8803b0d8047f4ff0cf963176b32d4ef3ed72" - integrity sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg== +emojilib@^2.4.0: + version "2.4.0" + resolved "https://registry.yarnpkg.com/emojilib/-/emojilib-2.4.0.tgz#ac518a8bb0d5f76dda57289ccb2fdf9d39ae721e" + integrity sha512-5U0rVMU5Y2n2+ykNLQqMoqklN9ICBT/KsvC1Gz6vqHbz2AXXGkG+Pm5rMWk/8Vjrr/mY9985Hi8DYzn1F09Nyw== encodeurl@^1.0.2, encodeurl@~1.0.2: version "1.0.2" @@ -7149,12 +7416,12 @@ entities@^4.2.0, entities@^4.4.0: resolved "https://registry.yarnpkg.com/entities/-/entities-4.5.0.tgz#5d268ea5e7113ec74c4d033b79ea5a35a488fb48" integrity sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw== -env-ci@^9.0.0: - version "9.1.1" - resolved "https://registry.yarnpkg.com/env-ci/-/env-ci-9.1.1.tgz#f081684c64a639c6ff5cb801bd70464bd40498a4" - integrity sha512-Im2yEWeF4b2RAMAaWvGioXk6m0UNaIjD8hj28j2ij5ldnIFrDQT0+pzDvpbRkcjurhXhf/AsBKv8P2rtmGi9Aw== +env-ci@^11.0.0, env-ci@^11.2.0: + version "11.2.0" + resolved "https://registry.yarnpkg.com/env-ci/-/env-ci-11.2.0.tgz#e7386afdf752962c587e7f3d3fb64d87d68e82c6" + integrity sha512-D5kWfzkmaOQDioPmiviWAVtKmpPT4/iJmMVQxWxMPJTFyTkdc5JQUfc5iXEeWxcOdsYTKSAiA/Age4NUOqKsRA== dependencies: - execa "^7.0.0" + execa "^8.0.0" java-properties "^1.0.2" env-paths@^2.2.0, env-paths@^2.2.1: @@ -7167,6 +7434,11 @@ envinfo@7.13.0: resolved "https://registry.yarnpkg.com/envinfo/-/envinfo-7.13.0.tgz#81fbb81e5da35d74e814941aeab7c325a606fb31" integrity sha512-cvcaMr7KqXVh4nyzGTVqTum+gAiL265x5jUWQIDLq//zOGbW+gSW/C+OWLleY/rs9Qole6AZLMXPbtIFQbqu+Q== +environment@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/environment/-/environment-1.1.0.tgz#8e86c66b180f363c7ab311787e0259665f45a9f1" + integrity sha512-xUtoPkMggbz0MPyPiIWr1Kp4aeWJjDZ6SMvURhimjdZgsRuDplF5/s9hcgGhyXMhs+6vpnuoiZ2kFiu3FMnS8Q== + err-code@^2.0.2: version "2.0.3" resolved "https://registry.yarnpkg.com/err-code/-/err-code-2.0.3.tgz#23c2f3b756ffdfc608d30e27c9a941024807e7f9" @@ -7179,13 +7451,6 @@ error-ex@^1.3.1: dependencies: is-arrayish "^0.2.1" -error-ex@^1.3.2: - version "1.3.4" - resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.4.tgz#b3a8d8bb6f92eecc1629e3e27d3c8607a8a32414" - integrity sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ== - dependencies: - is-arrayish "^0.2.1" - es-abstract@^1.22.1: version "1.22.3" resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.22.3.tgz#48e79f5573198de6dee3589195727f4f74bc4f32" @@ -7416,7 +7681,7 @@ escape-string-regexp@4.0.0, escape-string-regexp@^4.0.0: resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz#14ba83a5d373e3d311e5afca29cf5bfad965bf34" integrity sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA== -escape-string-regexp@5.0.0, escape-string-regexp@^5.0.0: +escape-string-regexp@5.0.0: version "5.0.0" resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz#4683126b500b61762f2dbebace1806e8be31b1c8" integrity sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw== @@ -7746,7 +8011,7 @@ execa@^5.0.0: signal-exit "^3.0.3" strip-final-newline "^2.0.0" -execa@^7.0.0, execa@^7.1.1: +execa@^7.1.1: version "7.2.0" resolved "https://registry.yarnpkg.com/execa/-/execa-7.2.0.tgz#657e75ba984f42a70f38928cedc87d6f2d4fe4e9" integrity sha512-UduyVP7TLB5IcAQl+OzLyLcS/l32W/GLg+AhHJ+ow40FOk2U3SAllPwR44v4vmdFwIWqpdwxxpQbF1n5ta9seA== @@ -7776,6 +8041,24 @@ execa@^8.0.0: signal-exit "^4.1.0" strip-final-newline "^3.0.0" +execa@^9.0.0: + version "9.6.1" + resolved "https://registry.yarnpkg.com/execa/-/execa-9.6.1.tgz#5b90acedc6bdc0fa9b9a6ddf8f9cbb0c75a7c471" + integrity sha512-9Be3ZoN4LmYR90tUoVu2te2BsbzHfhJyfEiAVfz7N5/zv+jduIfLrV2xdQXOHbaD6KgpGdO9PRPM1Y4Q9QkPkA== + dependencies: + "@sindresorhus/merge-streams" "^4.0.0" + cross-spawn "^7.0.6" + figures "^6.1.0" + get-stream "^9.0.0" + human-signals "^8.0.1" + is-plain-obj "^4.1.0" + is-stream "^4.0.1" + npm-run-path "^6.0.0" + pretty-ms "^9.2.0" + signal-exit "^4.1.0" + strip-final-newline "^4.0.0" + yoctocolors "^2.1.1" + exit@^0.1.2: version "0.1.2" resolved "https://registry.yarnpkg.com/exit/-/exit-0.1.2.tgz#0632638f8d877cc82107d30a0fff1a17cba1cd0c" @@ -7981,6 +8264,11 @@ fast-content-type-parse@^1.0.0, fast-content-type-parse@^1.1.0: resolved "https://registry.yarnpkg.com/fast-content-type-parse/-/fast-content-type-parse-1.1.0.tgz#4087162bf5af3294d4726ff29b334f72e3a1092c" integrity sha512-fBHHqSTFLVnR61C+gltJuE5GkVQMV0S2nqUO8TJ+5Z3qAKG8vAx4FKai1s5jq/inV1+sREynIWSuQ6HgoSXpDQ== +fast-content-type-parse@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/fast-content-type-parse/-/fast-content-type-parse-3.0.0.tgz#5590b6c807cc598be125e6740a9fde589d2b7afb" + integrity sha512-ZvLdcY8P+N8mGQJahJV5G4U88CSvT1rP8ApL6uETe88MBXrBHAkZlSEySdUlyztF7ccb+Znos3TFqaepHxdhBg== + fast-decode-uri-component@^1.0.0, fast-decode-uri-component@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/fast-decode-uri-component/-/fast-decode-uri-component-1.0.1.tgz#46f8b6c22b30ff7a81357d4f59abfae938202543" @@ -8007,7 +8295,7 @@ fast-glob@^3.2.9: merge2 "^1.3.0" micromatch "^4.0.4" -fast-glob@^3.3.2, fast-glob@^3.3.3: +fast-glob@^3.3.2: version "3.3.3" resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.3.3.tgz#d06d585ce8dba90a16b0505c543c3ccfb3aeb818" integrity sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg== @@ -8112,18 +8400,21 @@ fast-uri@^3.0.1: resolved "https://registry.yarnpkg.com/fast-uri/-/fast-uri-3.1.0.tgz#66eecff6c764c0df9b762e62ca7edcfb53b4edfa" integrity sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA== -fast-xml-builder@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/fast-xml-builder/-/fast-xml-builder-1.0.0.tgz#a485d7e8381f1db983cf006f849d1066e2935241" - integrity sha512-fpZuDogrAgnyt9oDDz+5DBz0zgPdPZz6D4IR7iESxRXElrlGTRkHJ9eEt+SACRJwT0FNFrt71DFQIUFBJfX/uQ== +fast-xml-builder@^1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/fast-xml-builder/-/fast-xml-builder-1.1.4.tgz#0c407a1d9d5996336c0cd76f7ff785cac6413017" + integrity sha512-f2jhpN4Eccy0/Uz9csxh3Nu6q4ErKxf0XIsasomfOihuSUa3/xw6w8dnOtCDgEItQFJG8KyXPzQXzcODDrrbOg== + dependencies: + path-expression-matcher "^1.1.3" -fast-xml-parser@5.4.1: - version "5.4.1" - resolved "https://registry.yarnpkg.com/fast-xml-parser/-/fast-xml-parser-5.4.1.tgz#0c81b8ecfb3021e5ad83aa3df904af19a05bc601" - integrity sha512-BQ30U1mKkvXQXXkAGcuyUA/GA26oEB7NzOtsxCDtyu62sjGw5QraKFhx2Em3WQNjPw9PG6MQ9yuIIgkSDfGu5A== +fast-xml-parser@5.5.8: + version "5.5.8" + resolved "https://registry.yarnpkg.com/fast-xml-parser/-/fast-xml-parser-5.5.8.tgz#929571ed8c5eb96e6d9bd572ba14fc4b84875716" + integrity sha512-Z7Fh2nVQSb2d+poDViM063ix2ZGt9jmY1nWhPfHBOK2Hgnb/OW3P4Et3P/81SEej0J7QbWtJqxO05h8QYfK7LQ== dependencies: - fast-xml-builder "^1.0.0" - strnum "^2.1.2" + fast-xml-builder "^1.1.4" + path-expression-matcher "^1.2.0" + strnum "^2.2.0" fastest-levenshtein@^1.0.16, fastest-levenshtein@^1.0.7: version "1.0.16" @@ -8252,6 +8543,11 @@ fdir@^6.4.3: resolved "https://registry.yarnpkg.com/fdir/-/fdir-6.4.6.tgz#2b268c0232697063111bbf3f64810a2a741ba281" integrity sha512-hiFoqpyZcfNm1yc4u8oWCf9A2c4D3QjCrks3zmoVKVxpQRzmPNar1hUJcBG2RQHvEVGDN+Jm81ZheVLAQMK6+w== +fdir@^6.5.0: + version "6.5.0" + resolved "https://registry.yarnpkg.com/fdir/-/fdir-6.5.0.tgz#ed2ab967a331ade62f18d077dae192684d50d350" + integrity sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg== + figures@3.2.0, figures@^3.0.0: version "3.2.0" resolved "https://registry.yarnpkg.com/figures/-/figures-3.2.0.tgz#625c18bd293c604dc4a8ddb2febf0c88341746af" @@ -8266,13 +8562,12 @@ figures@^2.0.0: dependencies: escape-string-regexp "^1.0.5" -figures@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/figures/-/figures-5.0.0.tgz#126cd055052dea699f8a54e8c9450e6ecfc44d5f" - integrity sha512-ej8ksPF4x6e5wvK9yevct0UCXh8TTFlWGVLlgjZuoBH1HwjIfKE/IdL5mq89sFA7zELi1VhKpmtDnrs7zWyeyg== +figures@^6.0.0, figures@^6.1.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/figures/-/figures-6.1.0.tgz#935479f51865fa7479f6fa94fc6fc7ac14e62c4a" + integrity sha512-d+l3qxjSesT4V7v2fh+QnmFnUWv9lSpjarhShNTgBOfA0ttejbQUAlHLitbjkoRiDulW0OPoQPYIGhIC8ohejg== dependencies: - escape-string-regexp "^5.0.0" - is-unicode-supported "^1.2.0" + is-unicode-supported "^2.0.0" file-entry-cache@^6.0.1: version "6.0.1" @@ -8375,6 +8670,11 @@ find-my-way@^8.0.0: fast-querystring "^1.0.0" safe-regex2 "^3.1.0" +find-up-simple@^1.0.0, find-up-simple@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/find-up-simple/-/find-up-simple-1.0.1.tgz#18fb90ad49e45252c4d7fca56baade04fa3fca1e" + integrity sha512-afd4O7zpqHeRyg4PfDQsXmlDe2PfdHtJt6Akt8jOWaApLOZk5JXs6VMR29lz03pRe9mpykrRCYIYxaJYcfpncQ== + find-up@^2.0.0: version "2.1.0" resolved "https://registry.yarnpkg.com/find-up/-/find-up-2.1.0.tgz#45d1b7e506c717ddd482775a2b77920a3c0c57a7" @@ -8398,20 +8698,13 @@ find-up@^5.0.0: locate-path "^6.0.0" path-exists "^4.0.0" -find-up@^6.3.0: - version "6.3.0" - resolved "https://registry.yarnpkg.com/find-up/-/find-up-6.3.0.tgz#2abab3d3280b2dc7ac10199ef324c4e002c8c790" - integrity sha512-v2ZsoEuVHYy8ZIlYqwPe/39Cy+cFDzp4dXPaxNvkEuouymu+2Jbz0PxpKarJHYJTmv2HWT3O382qY8l4jMWthw== - dependencies: - locate-path "^7.1.0" - path-exists "^5.0.0" - -find-versions@^5.1.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/find-versions/-/find-versions-5.1.0.tgz#973f6739ce20f5e439a27eba8542a4b236c8e685" - integrity sha512-+iwzCJ7C5v5KgcBuueqVoNiHVoQpwiUK5XFLjf0affFTep+Wcw93tPvmb8tqujDNmzhBDPddnWV/qgWSXgq+Hg== +find-versions@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/find-versions/-/find-versions-6.0.0.tgz#fda285d3bb7c0c098f09e0727c54d31735f0c7d1" + integrity sha512-2kCCtc+JvcZ86IGAz3Z2Y0A1baIz9fL31pH/0S1IqZr9Iwnjq8izfPtrCyQKO6TLMPELLsQMre7VDqeIKCsHkA== dependencies: semver-regex "^4.0.5" + super-regex "^1.0.0" fishery@^2.2.2: version "2.2.2" @@ -8463,14 +8756,6 @@ for-each@^0.3.5: dependencies: is-callable "^1.2.7" -foreground-child@^3.1.0: - version "3.3.1" - resolved "https://registry.yarnpkg.com/foreground-child/-/foreground-child-3.3.1.tgz#32e8e9ed1b68a3497befb9ac2b6adf92a638576f" - integrity sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw== - dependencies: - cross-spawn "^7.0.6" - signal-exit "^4.0.1" - forest-cli@5.3.8: version "5.3.8" resolved "https://registry.yarnpkg.com/forest-cli/-/forest-cli-5.3.8.tgz#61956f4c7363e7de50649dc222c4eea10d47d4c2" @@ -8608,7 +8893,16 @@ fs-extra@^11.2.0: jsonfile "^6.0.1" universalify "^2.0.0" -fs-minipass@^2.0.0, fs-minipass@^2.1.0: +fs-extra@~11.3.0: + version "11.3.4" + resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-11.3.4.tgz#ab6934eca8bcf6f7f6b82742e33591f86301d6fc" + integrity sha512-CTXd6rk/M3/ULNQj8FBqBWHYBVYybQ3VPBw0xGKFe3tuH7ytT6ACnvzpIQ3UZtB8yvUKC2cXn1a+x+5EVQLovA== + dependencies: + graceful-fs "^4.2.0" + jsonfile "^6.0.1" + universalify "^2.0.0" + +fs-minipass@^2.0.0: version "2.1.0" resolved "https://registry.yarnpkg.com/fs-minipass/-/fs-minipass-2.1.0.tgz#7f5036fdbf12c63c169190cbe4199c852271f9fb" integrity sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg== @@ -8637,6 +8931,11 @@ function-bind@^1.1.2: resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.2.tgz#2c02d864d97f3ea6c8830c464cbd11ab6eab7a1c" integrity sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA== +function-timeout@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/function-timeout/-/function-timeout-1.0.2.tgz#e5a7b6ffa523756ff20e1231bbe37b5f373aadd5" + integrity sha512-939eZS4gJ3htTHAldmyyuzlrD58P03fHG49v2JfFXbV6OhvZKRC9j2yAtdHw/zrp2zXHuv05zMIy40F0ge7spA== + function.prototype.name@^1.1.6: version "1.1.6" resolved "https://registry.yarnpkg.com/function.prototype.name/-/function.prototype.name-1.1.6.tgz#cdf315b7d90ee77a4c6ee216c3c3362da07533fd" @@ -8693,20 +8992,6 @@ gauge@^4.0.3: strip-ansi "^6.0.1" wide-align "^1.1.5" -gauge@^5.0.0: - version "5.0.2" - resolved "https://registry.yarnpkg.com/gauge/-/gauge-5.0.2.tgz#7ab44c11181da9766333f10db8cd1e4b17fd6c46" - integrity sha512-pMaFftXPtiGIHCJHdcUUx9Rby/rFT/Kkt3fIIGCs+9PMDIljSyRiqraTlxNtBReJRDfUefpa263RQ3vnp5G/LQ== - dependencies: - aproba "^1.0.3 || ^2.0.0" - color-support "^1.1.3" - console-control-strings "^1.1.0" - has-unicode "^2.0.1" - signal-exit "^4.0.1" - string-width "^4.2.3" - strip-ansi "^6.0.1" - wide-align "^1.1.5" - generate-function@^2.3.1: version "2.3.1" resolved "https://registry.yarnpkg.com/generate-function/-/generate-function-2.3.1.tgz#f069617690c10c868e73b8465746764f97c3479f" @@ -8729,6 +9014,11 @@ get-caller-file@^2.0.5: resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e" integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg== +get-east-asian-width@^1.0.0: + version "1.5.0" + resolved "https://registry.yarnpkg.com/get-east-asian-width/-/get-east-asian-width-1.5.0.tgz#ce7008fe345edcf5497a6f557cfa54bc318a9ce7" + integrity sha512-CQ+bEO+Tva/qlmw24dCejulK5pMzVnUOFOijVogd3KQs07HnRIgp8TGipvCCRT06xeYEbpbgwaCxglFyiuIcmA== + get-intrinsic@^1.0.2, get-intrinsic@^1.1.1, get-intrinsic@^1.1.3, get-intrinsic@^1.2.0, get-intrinsic@^1.2.1, get-intrinsic@^1.2.2: version "1.2.2" resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.2.2.tgz#281b7622971123e1ef4b3c90fd7539306da93f3b" @@ -8821,6 +9111,14 @@ get-stream@^8.0.1: resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-8.0.1.tgz#def9dfd71742cd7754a7761ed43749a27d02eca2" integrity sha512-VaUJspBffn/LMCJVoMvSAdmscJyS1auj5Zulnn5UoYcY531UWmdwhRWkcGKnGU93m5HSXP9LP2usOryrBtQowA== +get-stream@^9.0.0: + version "9.0.1" + resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-9.0.1.tgz#95157d21df8eb90d1647102b63039b1df60ebd27" + integrity sha512-kVCxPF3vQM/N0B1PmoqVUqgHP+EeVjmZSQn+1oCRPxd2P21P2F19lIgbR3HBosbB1PUhOAoctJnfEn2GbN2eZA== + dependencies: + "@sec-ant/readable-stream" "^0.4.1" + is-stream "^4.0.1" + get-symbol-description@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/get-symbol-description/-/get-symbol-description-1.0.0.tgz#7fdb81c900101fbd564dd5f1a30af5aadc1e58d6" @@ -8927,17 +9225,14 @@ glob-parent@^5.1.2, glob-parent@~5.1.2: dependencies: is-glob "^4.0.1" -glob@^10.2.2, glob@^10.3.10: - version "10.5.0" - resolved "https://registry.yarnpkg.com/glob/-/glob-10.5.0.tgz#8ec0355919cd3338c28428a23d4f24ecc5fe738c" - integrity sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg== +glob@>=10.5.0, glob@^10.2.2, glob@^10.3.10, glob@^13.0.0, glob@^13.0.6, glob@^9.2.0: + version "13.0.6" + resolved "https://registry.yarnpkg.com/glob/-/glob-13.0.6.tgz#078666566a425147ccacfbd2e332deb66a2be71d" + integrity sha512-Wjlyrolmm8uDpm/ogGyXZXb1Z+Ca2B8NbJwqBVg0axK9GbBeoS7yGV6vjXnYdGm6X53iehEuxxbyiKp8QmN4Vw== dependencies: - foreground-child "^3.1.0" - jackspeak "^3.1.2" - minimatch "^9.0.4" - minipass "^7.1.2" - package-json-from-dist "^1.0.0" - path-scurry "^1.11.1" + minimatch "^10.2.2" + minipass "^7.1.3" + path-scurry "^2.0.2" glob@^7.1.3, glob@^7.1.4: version "7.2.3" @@ -8951,27 +9246,6 @@ glob@^7.1.3, glob@^7.1.4: once "^1.3.0" path-is-absolute "^1.0.0" -glob@^8.0.1: - version "8.1.0" - resolved "https://registry.yarnpkg.com/glob/-/glob-8.1.0.tgz#d388f656593ef708ee3e34640fdfb99a9fd1c33e" - integrity sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ== - dependencies: - fs.realpath "^1.0.0" - inflight "^1.0.4" - inherits "2" - minimatch "^5.0.1" - once "^1.3.0" - -glob@^9.2.0: - version "9.3.5" - resolved "https://registry.yarnpkg.com/glob/-/glob-9.3.5.tgz#ca2ed8ca452781a3009685607fdf025a899dfe21" - integrity sha512-e1LleDykUz2Iu+MTYdkSsuWX8lvAjAcs0Xef0lNIu0S2wOAzuTxCJtcd9S3cijlwYF18EsU3rzb8jPVobxDh9Q== - dependencies: - fs.realpath "^1.0.0" - minimatch "^8.0.2" - minipass "^4.2.4" - path-scurry "^1.6.1" - global-dirs@^0.1.1: version "0.1.1" resolved "https://registry.yarnpkg.com/global-dirs/-/global-dirs-0.1.1.tgz#b319c0dd4607f353f3be9cca4c72fc148c49f445" @@ -9018,18 +9292,6 @@ globby@^11.1.0: merge2 "^1.4.1" slash "^3.0.0" -globby@^14.0.0: - version "14.1.0" - resolved "https://registry.yarnpkg.com/globby/-/globby-14.1.0.tgz#138b78e77cf5a8d794e327b15dce80bf1fb0a73e" - integrity sha512-0Ia46fDOaT7k4og1PDW4YbodWWr3scS2vAr2lTbsplOt2WkKp0vQbkI9wKis/T5LV/dqPjO3bpS/z6GTJB82LA== - dependencies: - "@sindresorhus/merge-streams" "^2.1.0" - fast-glob "^3.3.3" - ignore "^7.0.3" - path-type "^6.0.0" - slash "^5.1.0" - unicorn-magic "^0.3.0" - gopd@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/gopd/-/gopd-1.0.1.tgz#29ff76de69dac7489b7c0918a5788e56477c332c" @@ -9201,15 +9463,20 @@ hasown@^2.0.2: resolved "https://registry.yarnpkg.com/heap/-/heap-0.2.7.tgz#1e6adf711d3f27ce35a81fe3b7bd576c2260a8fc" integrity sha512-2bsegYkkHO+h/9MGbn6KWcE45cHZgPANo5LXF7EvWdT0yT2EguSVO1nDgU5c8+ZOPwp2vMNa7YFsJhVcDR9Sdg== -hono@^4.11.4: - version "4.12.3" - resolved "https://registry.yarnpkg.com/hono/-/hono-4.12.3.tgz#fd8dd1127c30956a9d58c1b0c4535d21c1ef3e16" - integrity sha512-SFsVSjp8sj5UumXOOFlkZOG6XS9SJDKw0TbwFeV+AJ8xlST8kxK5Z/5EYa111UY8732lK2S/xB653ceuaoGwpg== +highlight.js@^10.7.1: + version "10.7.3" + resolved "https://registry.yarnpkg.com/highlight.js/-/highlight.js-10.7.3.tgz#697272e3991356e40c3cac566a74eef681756531" + integrity sha512-tzcUFauisWKNHaRkN4Wjl/ZA07gENAjFl3J/c480dprkGTg5EQstgaNFqBfUqCq54kZRIEcreTsAgF/m2quD7A== -hook-std@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/hook-std/-/hook-std-3.0.0.tgz#47038a01981e07ce9d83a6a3b2eb98cad0f7bd58" - integrity sha512-jHRQzjSDzMtFy34AGj1DN+vq54WVuhSvKgrHf0OMiFQTwDD4L/qqofVEWjLOBMTn5+lCD3fPg32W9yOfnEJTTw== +hono@>=4.11.10, hono@^4.11.4: + version "4.12.9" + resolved "https://registry.yarnpkg.com/hono/-/hono-4.12.9.tgz#7cd59dec4abf02022f5baad87f6413a04081144c" + integrity sha512-wy3T8Zm2bsEvxKZM5w21VdHDDcwVS1yUFFY6i8UobSsKfFceT7TOwhbhfKsDyx7tYQlmRM5FLpIuYvNFyjctiA== + +hook-std@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/hook-std/-/hook-std-4.0.0.tgz#8ad817e2405f0634fa128822a8b27054a8120262" + integrity sha512-IHI4bEVOt3vRUDJ+bFA9VUJlo7SzvFARPNLw75pqSmAOP2HmTWfFJtPvLBrDrlgjEYXY9zs7SFdHPQaJShkSCQ== hosted-git-info@^2.1.4: version "2.8.9" @@ -9223,13 +9490,6 @@ hosted-git-info@^4.0.0, hosted-git-info@^4.0.1: dependencies: lru-cache "^6.0.0" -hosted-git-info@^6.0.0, hosted-git-info@^6.1.1, hosted-git-info@^6.1.3: - version "6.1.3" - resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-6.1.3.tgz#2ee1a14a097a1236bddf8672c35b613c46c55946" - integrity sha512-HVJyzUrLIL1c0QmviVh5E8VGyUS7xCFPS6yydaVd1UegW+ibV/CohqTH9MkOLDp5o+rb82DMo77PTuc9F/8GKw== - dependencies: - lru-cache "^7.5.1" - hosted-git-info@^7.0.0, hosted-git-info@^7.0.2: version "7.0.2" resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-7.0.2.tgz#9b751acac097757667f30114607ef7b661ff4f17" @@ -9237,6 +9497,13 @@ hosted-git-info@^7.0.0, hosted-git-info@^7.0.2: dependencies: lru-cache "^10.0.1" +hosted-git-info@^9.0.0, hosted-git-info@^9.0.2: + version "9.0.2" + resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-9.0.2.tgz#b38c8a802b274e275eeeccf9f4a1b1a0a8557ada" + integrity sha512-M422h7o/BR3rmCQ8UHi7cyyMqKltdP9Uo+J2fXK+RSAY+wTcKOIRyhTuKv4qn+DJf3g+PL890AzId5KZpX+CBg== + dependencies: + lru-cache "^11.1.0" + html-escaper@^2.0.0: version "2.0.2" resolved "https://registry.yarnpkg.com/html-escaper/-/html-escaper-2.0.2.tgz#dfd60027da36a36dfcbe236262c00a5822681453" @@ -9382,6 +9649,11 @@ human-signals@^5.0.0: resolved "https://registry.yarnpkg.com/human-signals/-/human-signals-5.0.0.tgz#42665a284f9ae0dade3ba41ebc37eb4b852f3a28" integrity sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ== +human-signals@^8.0.1: + version "8.0.1" + resolved "https://registry.yarnpkg.com/human-signals/-/human-signals-8.0.1.tgz#f08bb593b6d1db353933d06156cedec90abe51fb" + integrity sha512-eKCa6bwnJhvxj14kZk5NCPc6Hb6BdsU9DZcOnmQKSnO1VKrfV0zCvtttPZUsBvjmNDn8rpcJfpwSYnHBjc95MQ== + humanize-ms@^1.2.1: version "1.2.1" resolved "https://registry.yarnpkg.com/humanize-ms/-/humanize-ms-1.2.1.tgz#c46e3159a293f6b896da29316d8b6fe8bb79bbed" @@ -9413,7 +9685,7 @@ iconv-lite@^0.6.2, iconv-lite@^0.6.3: dependencies: safer-buffer ">= 2.1.2 < 3.0.0" -iconv-lite@^0.7.0: +iconv-lite@^0.7.0, iconv-lite@^0.7.2: version "0.7.2" resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.7.2.tgz#d0bdeac3f12b4835b7359c2ad89c422a4d1cc72e" integrity sha512-im9DjEDQ55s9fL4EYzOAv0yMqmMBSZp6G0VvFyTMPKWxiSBHUj9NW/qqLmXUwXrrM7AvqSlTCfvqRb0cM8yYqw== @@ -9437,23 +9709,25 @@ ignore-by-default@^1.0.1: resolved "https://registry.yarnpkg.com/ignore-by-default/-/ignore-by-default-1.0.1.tgz#48ca6d72f6c6a3af00a9ad4ae6876be3889e2b09" integrity sha512-Ius2VYcGNk7T90CppJqcIkS5ooHUZyIQK+ClZfMfMNFEF9VSE73Fq+906u/CWu92x4gzZMWOwfFYckPObzdEbA== -ignore-walk@^6.0.0, ignore-walk@^6.0.4: +ignore-walk@^6.0.4: version "6.0.5" resolved "https://registry.yarnpkg.com/ignore-walk/-/ignore-walk-6.0.5.tgz#ef8d61eab7da169078723d1f82833b36e200b0dd" integrity sha512-VuuG0wCnjhnylG1ABXT3dAuIpTNDs/G8jlpmwXY03fXoXy/8ZK8/T+hMzt8L4WnrLCJgdybqgPagnF/f97cg3A== dependencies: minimatch "^9.0.0" +ignore-walk@^8.0.0: + version "8.0.0" + resolved "https://registry.yarnpkg.com/ignore-walk/-/ignore-walk-8.0.0.tgz#380c173badc3a18c57ff33440753f0052f572b14" + integrity sha512-FCeMZT4NiRQGh+YkeKMtWrOmBgWjHjMJ26WQWrRQyoyzqevdaGSakUaJW5xQYmjLlUVk2qUnCjYVBax9EKKg8A== + dependencies: + minimatch "^10.0.3" + ignore@^5.0.4, ignore@^5.2.0: version "5.3.0" resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.3.0.tgz#67418ae40d34d6999c95ff56016759c718c82f78" integrity sha512-g7dmpshy+gD7mh88OC9NwSGTKoc3kyLAZQRU1mt53Aw/vnvfXnbC+F/7F7QoYVKbV+KNvJx8wArewKy1vXMtlg== -ignore@^7.0.3: - version "7.0.5" - resolved "https://registry.yarnpkg.com/ignore/-/ignore-7.0.5.tgz#4cb5f6cd7d4c7ab0365738c7aea888baa6d7efd9" - integrity sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg== - image-size@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/image-size/-/image-size-1.0.2.tgz#d778b6d0ab75b2737c1556dd631652eb963bc486" @@ -9474,10 +9748,18 @@ import-fresh@^3.0.0, import-fresh@^3.2.1, import-fresh@^3.3.0: parent-module "^1.0.0" resolve-from "^4.0.0" -import-from@^4.0.0: +import-from-esm@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/import-from-esm/-/import-from-esm-2.0.0.tgz#184eb9aad4f557573bd6daf967ad5911b537797a" + integrity sha512-YVt14UZCgsX1vZQ3gKjkWVdBdHQ6eu3MPU1TBgL1H5orXe2+jWD006WCPPtOuwlQm10NuzOW5WawiF1Q9veW8g== + dependencies: + debug "^4.3.4" + import-meta-resolve "^4.0.0" + +import-lazy@~4.0.0: version "4.0.0" - resolved "https://registry.yarnpkg.com/import-from/-/import-from-4.0.0.tgz#2710b8d66817d232e16f4166e319248d3d5492e2" - integrity sha512-P9J71vT5nLlDeV8FHs5nNxaLbrpfAV5cF5srvbZfpwpcJoM/xZR3hiv+q+SAnuSmuGbXMWud063iIMx/V/EWZQ== + resolved "https://registry.yarnpkg.com/import-lazy/-/import-lazy-4.0.0.tgz#e8eb627483a0a43da3c03f3e35548be5cb0cc153" + integrity sha512-rKtvo6a868b5Hu3heneU+L4yEQ4jYKLtjpnPeUdK7h0yzXGmyBTypknlkCvHFBqfX9YlorEiMM6Dnq/5atfHkw== import-local@3.1.0, import-local@^3.0.2: version "3.1.0" @@ -9487,6 +9769,11 @@ import-local@3.1.0, import-local@^3.0.2: pkg-dir "^4.2.0" resolve-cwd "^3.0.0" +import-meta-resolve@^4.0.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/import-meta-resolve/-/import-meta-resolve-4.2.0.tgz#08cb85b5bd37ecc8eb1e0f670dc2767002d43734" + integrity sha512-Iqv2fzaTQN28s/FwZAoFq0ZSs/7hMAHJVX+w8PZl3cY19Pxk6jFFalxQoIfW2826i/fDLXv8IiEZRIT0lDuWcg== + imurmurhash@^0.1.4: version "0.1.4" resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea" @@ -9502,6 +9789,11 @@ indent-string@^5.0.0: resolved "https://registry.yarnpkg.com/indent-string/-/indent-string-5.0.0.tgz#4fd2980fccaf8622d14c64d694f4cf33c81951a5" integrity sha512-m6FAo/spmsW2Ab2fU35JTYwtOKa2yAwXSwgjSv1TJzh4Mh7mC3lzAOVLBprb72XsTrgkEIsl7YrFNAiDiRhIGg== +index-to-position@^1.1.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/index-to-position/-/index-to-position-1.2.0.tgz#c800eb34dacf4dbf96b9b06c7eb78d5f704138b4" + integrity sha512-Yg7+ztRkqslMAS2iFaU+Oa4KTSidr63OsFGlOrJoW981kIYO3CGCS3wA95P1mUi/IVSJkn0D479KTJpVpvFNuw== + infer-owner@^1.0.4: version "1.0.4" resolved "https://registry.yarnpkg.com/infer-owner/-/infer-owner-1.0.4.tgz#c4cefcaa8e51051c2a40ba2ce8a3d27295af9467" @@ -9540,11 +9832,16 @@ ini@^1.3.2, ini@^1.3.4, ini@^1.3.8, ini@~1.3.0: resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.8.tgz#a29da425b48806f34767a4efce397269af28432c" integrity sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew== -ini@^4.1.0, ini@^4.1.1, ini@^4.1.3: +ini@^4.1.3: version "4.1.3" resolved "https://registry.yarnpkg.com/ini/-/ini-4.1.3.tgz#4c359675a6071a46985eb39b14e4a2c0ec98a795" integrity sha512-X7rqawQBvfdjS10YU1y1YVreA3SsLrW9dX2CewP2EbBJM4ypVNLDkO5y04gejPwKIY9lR+7r9gn3rFPt/kmWFg== +ini@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/ini/-/ini-6.0.0.tgz#efc7642b276f6a37d22fdf56ef50889d7146bf30" + integrity sha512-IBTdIkzZNOpqm7q3dRqJvMaldXjDHWkEDfrwGEQTs5eaQMWV+djAhR+wahyNNMAa+qpbDUhBMVt4ZKNwpPm7xQ== + init-package-json@6.0.3: version "6.0.3" resolved "https://registry.yarnpkg.com/init-package-json/-/init-package-json-6.0.3.tgz#2552fba75b6eed2495dc97f44183e2e5a5bcf8b0" @@ -9558,18 +9855,17 @@ init-package-json@6.0.3: validate-npm-package-license "^3.0.4" validate-npm-package-name "^5.0.0" -init-package-json@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/init-package-json/-/init-package-json-5.0.0.tgz#030cf0ea9c84cfc1b0dc2e898b45d171393e4b40" - integrity sha512-kBhlSheBfYmq3e0L1ii+VKe3zBTLL5lDCDWR+f9dLmEGSB3MqLlMlsolubSsyI88Bg6EA+BIMlomAnQ1SwgQBw== +init-package-json@^8.2.5: + version "8.2.5" + resolved "https://registry.yarnpkg.com/init-package-json/-/init-package-json-8.2.5.tgz#6e90972b632eb410637a5a532019240ee7227d62" + integrity sha512-IknQ+upLuJU6t3p0uo9wS3GjFD/1GtxIwcIGYOWR8zL2HxQeJwvxYTgZr9brJ8pyZ4kvpkebM8ZKcyqOeLOHSg== dependencies: - npm-package-arg "^10.0.0" - promzard "^1.0.0" - read "^2.0.0" - read-package-json "^6.0.0" - semver "^7.3.5" - validate-npm-package-license "^3.0.4" - validate-npm-package-name "^5.0.0" + "@npmcli/package-json" "^7.0.0" + npm-package-arg "^13.0.0" + promzard "^3.0.1" + read "^5.0.1" + semver "^7.7.2" + validate-npm-package-name "^7.0.0" inquirer@6.2.0: version "6.2.0" @@ -9651,11 +9947,6 @@ ip-address@^5.8.9: lodash "^4.17.15" sprintf-js "1.1.2" -ip-regex@^4.1.0: - version "4.3.0" - resolved "https://registry.yarnpkg.com/ip-regex/-/ip-regex-4.3.0.tgz#687275ab0f57fa76978ff8f4dddc8a23d5990db5" - integrity sha512-B9ZWJxHHOHUhUjCPrMpLD4xEq35bUTClHM1S6CBU5ixQnkZmwipwgc96vAd7AAGM9TGHvJR+Uss+/Ak6UphK+Q== - ip6@0.0.4: version "0.0.4" resolved "https://registry.yarnpkg.com/ip6/-/ip6-0.0.4.tgz#44c5a9db79e39d405201b4d78d13b3870e48db31" @@ -9790,12 +10081,12 @@ is-ci@3.0.1: dependencies: ci-info "^3.2.0" -is-cidr@^4.0.2: - version "4.0.2" - resolved "https://registry.yarnpkg.com/is-cidr/-/is-cidr-4.0.2.tgz#94c7585e4c6c77ceabf920f8cde51b8c0fda8814" - integrity sha512-z4a1ENUajDbEl/Q6/pVBpTR1nBjjEE1X7qb7bmWYanNnPoKAvUCPFKeXV6Fe4mgTkWKBqiHIcwsI3SndiO5FeA== +is-cidr@^6.0.3: + version "6.0.3" + resolved "https://registry.yarnpkg.com/is-cidr/-/is-cidr-6.0.3.tgz#e9b332df01bef4d784a1aef93f920a59caf6b704" + integrity sha512-tPdsizbDiISrc4PoII6ZfpmAokx0oDKeYqAUp5bXOfznauOFXfEeosKBRrl0o0SriE4xoRR05Czn4YPCFMjSHA== dependencies: - cidr-regex "^3.1.1" + cidr-regex "^5.0.1" is-core-module@^2.13.0, is-core-module@^2.5.0: version "2.13.1" @@ -9804,7 +10095,7 @@ is-core-module@^2.13.0, is-core-module@^2.5.0: dependencies: hasown "^2.0.0" -is-core-module@^2.16.1, is-core-module@^2.8.1: +is-core-module@^2.16.1: version "2.16.1" resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.16.1.tgz#2a98801a849f43e2add644fbb6bc6229b19a4ef4" integrity sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w== @@ -9965,6 +10256,11 @@ is-plain-obj@^2.0.0: resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-2.1.0.tgz#45e42e37fccf1f40da8e5f76ee21515840c09287" integrity sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA== +is-plain-obj@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-4.1.0.tgz#d65025edec3657ce032fd7db63c97883eaed71f0" + integrity sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg== + is-plain-object@^2.0.4: version "2.0.4" resolved "https://registry.yarnpkg.com/is-plain-object/-/is-plain-object-2.0.4.tgz#2c163b3fafb1b606d9d17928f05c2a1c38e07677" @@ -10063,6 +10359,11 @@ is-stream@^3.0.0: resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-3.0.0.tgz#e6bfd7aa6bef69f4f472ce9bb681e3e57b4319ac" integrity sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA== +is-stream@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-4.0.1.tgz#375cf891e16d2e4baec250b85926cffc14720d9b" + integrity sha512-Dnz92NInDqYckGEUJv689RbRiTSEHCQ7wOVeALbkOz999YpqT46yMRIGtSNl2iCL1waAZSx40+h59NV/EwzV/A== + is-string@^1.0.5, is-string@^1.0.7: version "1.0.7" resolved "https://registry.yarnpkg.com/is-string/-/is-string-1.0.7.tgz#0dd12bf2006f255bb58f695110eff7491eebc0fd" @@ -10101,13 +10402,6 @@ is-text-path@^1.0.1: dependencies: text-extensions "^1.0.0" -is-text-path@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/is-text-path/-/is-text-path-2.0.0.tgz#b2484e2b720a633feb2e85b67dc193ff72c75636" - integrity sha512-+oDTluR6WEjdXEJMnC2z6A4FRwFoYuvShVVEGsS7ewc0UTi2QtAKMDJuL4BDEVt+5T7MjFo12RP8ghOM75oKJw== - dependencies: - text-extensions "^2.0.0" - is-typed-array@^1.1.10, is-typed-array@^1.1.12, is-typed-array@^1.1.9: version "1.1.12" resolved "https://registry.yarnpkg.com/is-typed-array/-/is-typed-array-1.1.12.tgz#d0bab5686ef4a76f7a73097b95470ab199c57d4a" @@ -10134,10 +10428,10 @@ is-unicode-supported@^0.1.0: resolved "https://registry.yarnpkg.com/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz#3f26c76a809593b52bfa2ecb5710ed2779b522a7" integrity sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw== -is-unicode-supported@^1.2.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/is-unicode-supported/-/is-unicode-supported-1.3.0.tgz#d824984b616c292a2e198207d4a609983842f714" - integrity sha512-43r2mRvz+8JRIKnWJ+3j8JtjRKZ6GmjzfaE/qiBJnikNnYv/6bagRJ1kUhNk8R5EX/GkobD+r+sfxCPJsiKBLQ== +is-unicode-supported@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-unicode-supported/-/is-unicode-supported-2.1.0.tgz#09f0ab0de6d3744d48d265ebb98f65d11f2a9b3a" + integrity sha512-mE00Gnza5EEB3Ds0HfMyllZzbBrmLOX3vfWoj9A9PEnTfratQ/BcaJOuMhnkhjXvb2+FkY3VuHqtAGpTPmglFQ== is-weakmap@^2.0.2: version "2.0.2" @@ -10200,15 +10494,20 @@ isexe@^3.1.1: resolved "https://registry.yarnpkg.com/isexe/-/isexe-3.1.1.tgz#4a407e2bd78ddfb14bea0c27c6f7072dde775f0d" integrity sha512-LpB/54B+/2J5hqQ7imZHfdU31OlgQqx7ZicVlkm9kzg9/w8GKLEcFfJl/t7DCEDueOyBAD6zCCwTO6Fzs0NoEQ== +isexe@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/isexe/-/isexe-4.0.0.tgz#48f6576af8e87a18feb796b7ed5e2e5903b43dca" + integrity sha512-FFUtZMpoZ8RqHS3XeXEmHWLA4thH+ZxCv2lOiPIn1Xc7CxrqhWzNSDzD+/chS/zbYezmiwWLdQC09JdQKmthOw== + isobject@^3.0.1: version "3.0.1" resolved "https://registry.yarnpkg.com/isobject/-/isobject-3.0.1.tgz#4e431e92b11a9731636aa1f9c8d1ccbcfdab78df" integrity sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg== -issue-parser@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/issue-parser/-/issue-parser-6.0.0.tgz#b1edd06315d4f2044a9755daf85fdafde9b4014a" - integrity sha512-zKa/Dxq2lGsBIXQ7CUZWTHfvxPC2ej0KfO7fIPqLlHB9J2hJ7rGhZ5rilhuufylr4RXYPzJUeFjKxz305OsNlA== +issue-parser@^7.0.0: + version "7.0.1" + resolved "https://registry.yarnpkg.com/issue-parser/-/issue-parser-7.0.1.tgz#8a053e5a4952c75bb216204e454b4fc7d4cc9637" + integrity sha512-3YZcUUR2Wt1WsapF+S/WiA2WmlW0cWAoPccMqne7AxEBhCdFeTPjfv/Axb8V2gyCgY3nRw+ksZ3xSUX+R47iAg== dependencies: lodash.capitalize "^4.2.1" lodash.escaperegexp "^4.1.2" @@ -10279,15 +10578,6 @@ iterare@1.2.1: resolved "https://registry.yarnpkg.com/iterare/-/iterare-1.2.1.tgz#139c400ff7363690e33abffa33cbba8920f00042" integrity sha512-RKYVTCjAnRthyJes037NX/IiqeidgN1xc3j1RjFfECFp28A1GVwK9nA+i0rJPaHqSZwygLzRnFlzUuHFoWWy+Q== -jackspeak@^3.1.2: - version "3.4.3" - resolved "https://registry.yarnpkg.com/jackspeak/-/jackspeak-3.4.3.tgz#8833a9d89ab4acde6188942bd1c53b6390ed5a8a" - integrity sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw== - dependencies: - "@isaacs/cliui" "^8.0.2" - optionalDependencies: - "@pkgjs/parseargs" "^0.11.0" - jake@^10.8.5: version "10.8.7" resolved "https://registry.yarnpkg.com/jake/-/jake-10.8.7.tgz#63a32821177940c33f356e0ba44ff9d34e1c7d8f" @@ -10669,6 +10959,11 @@ jest@^29.3.1: import-local "^3.0.2" jest-cli "^29.7.0" +jju@~1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/jju/-/jju-1.4.0.tgz#a3abe2718af241a2b2904f84a625970f389ae32a" + integrity sha512-8wb9Yw966OSxApiCt0K3yNJL8pnNeIv+OEq2YMidz4FKP6nonSRoOXc80iXY4JaN2FC11B9qsNmDsm+ZOfMROA== + joi@17.12.2, joi@^17.12.2: version "17.12.2" resolved "https://registry.yarnpkg.com/joi/-/joi-17.12.2.tgz#283a664dabb80c7e52943c557aab82faea09f521" @@ -10707,10 +11002,10 @@ js-tokens@^4.0.0: resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ== -js-yaml@4.1.0, js-yaml@^4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.0.tgz#c1fb65f8f5017901cdd2c951864ba18458a10602" - integrity sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA== +js-yaml@4.1.0, js-yaml@4.1.1, js-yaml@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.1.tgz#854c292467705b699476e1a2decc0c8a3458806b" + integrity sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA== dependencies: argparse "^2.0.1" @@ -10722,10 +11017,10 @@ js-yaml@^3.10.0, js-yaml@^3.13.1, js-yaml@^3.14.1: argparse "^1.0.7" esprima "^4.0.0" -js-yaml@^4.1.1: - version "4.1.1" - resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.1.tgz#854c292467705b699476e1a2decc0c8a3458806b" - integrity sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA== +js-yaml@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.0.tgz#c1fb65f8f5017901cdd2c951864ba18458a10602" + integrity sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA== dependencies: argparse "^2.0.1" @@ -10780,11 +11075,16 @@ json-parse-even-better-errors@^3.0.0: resolved "https://registry.yarnpkg.com/json-parse-even-better-errors/-/json-parse-even-better-errors-3.0.0.tgz#2cb2ee33069a78870a0c7e3da560026b89669cf7" integrity sha512-iZbGHafX/59r39gPwVPRBGw0QQKnA7tte5pSMrhWOW7swGsVvVTjmfyAV9pNqk8YGT7tRCdxRu8uzcgZwoDooA== -json-parse-even-better-errors@^3.0.1, json-parse-even-better-errors@^3.0.2: +json-parse-even-better-errors@^3.0.2: version "3.0.2" resolved "https://registry.yarnpkg.com/json-parse-even-better-errors/-/json-parse-even-better-errors-3.0.2.tgz#b43d35e89c0f3be6b5fbbe9dc6c82467b30c28da" integrity sha512-fi0NG4bPjCHunUJffmLd0gxssIgkNmArMvis4iNah6Owg1MCJjWhEcDLmsK6iGkJq3tHwbDkTlce70/tmXN4cQ== +json-parse-even-better-errors@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/json-parse-even-better-errors/-/json-parse-even-better-errors-5.0.0.tgz#93c89f529f022e5dadc233409324f0167b1e903e" + integrity sha512-ZF1nxZ28VhQouRWhUcVlUIN3qwSgPuswK05s/HIaoetAoE/9tngVmCHjSxmSQPav1nd+lPtTL0YZ/2AFdR/iYQ== + json-schema-ref-resolver@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/json-schema-ref-resolver/-/json-schema-ref-resolver-1.0.1.tgz#6586f483b76254784fc1d2120f717bdc9f0a99bf" @@ -10835,6 +11135,11 @@ json-stringify-safe@^5.0.1: resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb" integrity sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA== +json-with-bigint@^3.5.3: + version "3.5.8" + resolved "https://registry.yarnpkg.com/json-with-bigint/-/json-with-bigint-3.5.8.tgz#1b1edb55a1bc4816ca87ac684297591acd822383" + integrity sha512-eq/4KP6K34kwa7TcFdtvnftvHCD9KvHOGGICWwMFc4dOOKF5t4iYqnfLK8otCRCRv06FXOzGGyqE8h8ElMvvdw== + json5@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/json5/-/json5-1.0.2.tgz#63d98d60f21b313b77c4d6da18bfa69d80e1d593" @@ -11206,78 +11511,70 @@ libnpmaccess@8.0.6: npm-package-arg "^11.0.2" npm-registry-fetch "^17.0.1" -libnpmaccess@^7.0.2: - version "7.0.3" - resolved "https://registry.yarnpkg.com/libnpmaccess/-/libnpmaccess-7.0.3.tgz#9878b75c5cf36ddfff167dd47c1a6cf1fa21193c" - integrity sha512-It+fk/NRdRfv5giLhaVeyebGi/0S2LDSAwuZ0AGQ4x//PtCVb2Hj29wgSHe+XEL+RUkvLBkxbRV+DqLtOzuVTQ== - dependencies: - npm-package-arg "^10.1.0" - npm-registry-fetch "^14.0.3" - -libnpmdiff@^5.0.20: - version "5.0.21" - resolved "https://registry.yarnpkg.com/libnpmdiff/-/libnpmdiff-5.0.21.tgz#9d3036595a4cf393e1de07df98a40607a054d333" - integrity sha512-Zx+o/qnGoX46osnInyQQ5KI8jn2wIqXXiu4TJzE8GFd+o6kbyblJf+ihG81M1+yHK3AzkD1m4KK3+UTPXh/hBw== - dependencies: - "@npmcli/arborist" "^6.5.0" - "@npmcli/disparity-colors" "^3.0.0" - "@npmcli/installed-package-contents" "^2.0.2" - binary-extensions "^2.2.0" - diff "^5.1.0" - minimatch "^9.0.0" - npm-package-arg "^10.1.0" - pacote "^15.0.8" - tar "^6.1.13" - -libnpmexec@^6.0.4: - version "6.0.5" - resolved "https://registry.yarnpkg.com/libnpmexec/-/libnpmexec-6.0.5.tgz#36eb7e5a94a653478c8dd66b4a967cadf3f2540d" - integrity sha512-yN/7uJ3iYCPaKagHfrqXuCFLKn2ddcnYpEyC/tVhisHULC95uCy8AhUdNkThRXzhFqqptejO25ZfoWOGrdqnxA== - dependencies: - "@npmcli/arborist" "^6.5.0" - "@npmcli/run-script" "^6.0.0" +libnpmaccess@^10.0.3: + version "10.0.3" + resolved "https://registry.yarnpkg.com/libnpmaccess/-/libnpmaccess-10.0.3.tgz#856dc29fd35050159dff0039337aab503367586b" + integrity sha512-JPHTfWJxIK+NVPdNMNGnkz4XGX56iijPbe0qFWbdt68HL+kIvSzh+euBL8npLZvl2fpaxo+1eZSdoG15f5YdIQ== + dependencies: + npm-package-arg "^13.0.0" + npm-registry-fetch "^19.0.0" + +libnpmdiff@^8.1.5: + version "8.1.5" + resolved "https://registry.yarnpkg.com/libnpmdiff/-/libnpmdiff-8.1.5.tgz#369aea4a87053bd25eafa3c2b9da32be75274c54" + integrity sha512-3tknN/GosDOpIYjBplXpr7WVjpBDodAxXkZEtv410XlIsfMD+v/6mt9sYe/s/x+TRmmCRpzP/bxfhUorvV6Cqg== + dependencies: + "@npmcli/arborist" "^9.4.2" + "@npmcli/installed-package-contents" "^4.0.0" + binary-extensions "^3.0.0" + diff "^8.0.2" + minimatch "^10.0.3" + npm-package-arg "^13.0.0" + pacote "^21.0.2" + tar "^7.5.1" + +libnpmexec@^10.2.5: + version "10.2.5" + resolved "https://registry.yarnpkg.com/libnpmexec/-/libnpmexec-10.2.5.tgz#21b2907c72bac11e696f5ea9fb5244254e5e7305" + integrity sha512-ayouyoml/4NmcgH+nWzK6QB5w0gKrftsYB8TAHu5TB5v6Nj3fgz8ZBK9FsG2A1SNuHZVTjvrNMDyF2VzDih/bA== + dependencies: + "@gar/promise-retry" "^1.0.0" + "@npmcli/arborist" "^9.4.2" + "@npmcli/package-json" "^7.0.0" + "@npmcli/run-script" "^10.0.0" ci-info "^4.0.0" - npm-package-arg "^10.1.0" - npmlog "^7.0.1" - pacote "^15.0.8" - proc-log "^3.0.0" - read "^2.0.0" - read-package-json-fast "^3.0.2" + npm-package-arg "^13.0.0" + pacote "^21.0.2" + proc-log "^6.0.0" + read "^5.0.1" semver "^7.3.7" - walk-up-path "^3.0.1" - -libnpmfund@^4.2.1: - version "4.2.2" - resolved "https://registry.yarnpkg.com/libnpmfund/-/libnpmfund-4.2.2.tgz#4e50507212e64fcb6a396e4c02369f6c0fc40369" - integrity sha512-qnkP09tpryxD/iPYasHM7+yG4ZVe0e91sBVI/R8HJ1+ajeR9poWDckwiN2LEWGvtV/T/dqB++6A1NLrA5NPryw== - dependencies: - "@npmcli/arborist" "^6.5.0" + signal-exit "^4.1.0" + walk-up-path "^4.0.0" -libnpmhook@^9.0.3: - version "9.0.4" - resolved "https://registry.yarnpkg.com/libnpmhook/-/libnpmhook-9.0.4.tgz#43d893e19944a2e729b2b165a74f84a69443880d" - integrity sha512-bYD8nJiPnqeMtSsRc5bztqSh6/v16M0jQjLeO959HJqf9ZRWKRpVnFx971Rz5zbPGOB2BrQa6iopsh5vons5ww== +libnpmfund@^7.0.19: + version "7.0.19" + resolved "https://registry.yarnpkg.com/libnpmfund/-/libnpmfund-7.0.19.tgz#03d766943b0052c1e3c19ff2c39ea1cb25f0a03d" + integrity sha512-RNyp5gnjVXaqlx0asRLmAOrFkTwANntzqkRyTT6Iu2nUt1F2eiMZNMOpO2HNfA7/NceBVBk/xsrzas3miCz9oQ== dependencies: - aproba "^2.0.0" - npm-registry-fetch "^14.0.3" + "@npmcli/arborist" "^9.4.2" -libnpmorg@^5.0.4: - version "5.0.5" - resolved "https://registry.yarnpkg.com/libnpmorg/-/libnpmorg-5.0.5.tgz#baaba5c77bdfa6808975be9134a330f84b3fa4d4" - integrity sha512-0EbtEIFthVlmaj0hhC3LlEEXUZU3vKfJwfWL//iAqKjHreMhCD3cgdkld+UeWYDgsZzwzvXmopoY0l38I0yx9Q== +libnpmorg@^8.0.1: + version "8.0.1" + resolved "https://registry.yarnpkg.com/libnpmorg/-/libnpmorg-8.0.1.tgz#975b61c2635f7edc07552ab8a455ce026decb88c" + integrity sha512-/QeyXXg4hqMw0ESM7pERjIT2wbR29qtFOWIOug/xO4fRjS3jJJhoAPQNsnHtdwnCqgBdFpGQ45aIdFFZx2YhTA== dependencies: aproba "^2.0.0" - npm-registry-fetch "^14.0.3" + npm-registry-fetch "^19.0.0" -libnpmpack@^5.0.20: - version "5.0.21" - resolved "https://registry.yarnpkg.com/libnpmpack/-/libnpmpack-5.0.21.tgz#bcc608279840448fa8c28d8df0f326694d0b6061" - integrity sha512-mQd3pPx7Xf6i2A6QnYcCmgq34BmfVG3HJvpl422B5dLKfi9acITqcJiJ2K7adhxPKZMF5VbP2+j391cs5w+xww== +libnpmpack@^9.1.5: + version "9.1.5" + resolved "https://registry.yarnpkg.com/libnpmpack/-/libnpmpack-9.1.5.tgz#b0ba7affe4683f81e1c9e726212d0adb88bb9721" + integrity sha512-H1IX364ZwpeRfrL6UYSuxFNgP16/TvlwtCm8ZallbB7/1FZ3h1FBZHamQtv7PqcZUTWE27mygdQ4wCCW2BmVlg== dependencies: - "@npmcli/arborist" "^6.5.0" - "@npmcli/run-script" "^6.0.0" - npm-package-arg "^10.1.0" - pacote "^15.0.8" + "@npmcli/arborist" "^9.4.2" + "@npmcli/run-script" "^10.0.0" + npm-package-arg "^13.0.0" + pacote "^21.0.2" libnpmpublish@9.0.9: version "9.0.9" @@ -11293,44 +11590,44 @@ libnpmpublish@9.0.9: sigstore "^2.2.0" ssri "^10.0.6" -libnpmpublish@^7.5.1: - version "7.5.2" - resolved "https://registry.yarnpkg.com/libnpmpublish/-/libnpmpublish-7.5.2.tgz#1b2780a4a56429d6dea332174286179b8d6f930c" - integrity sha512-azAxjEjAgBkbPHUGsGdMbTScyiLcTKdEnNYwGS+9yt+fUsNyiYn8hNH3+HeWKaXzFjvxi50MrHw1yp1gg5pumQ== +libnpmpublish@^11.1.3: + version "11.1.3" + resolved "https://registry.yarnpkg.com/libnpmpublish/-/libnpmpublish-11.1.3.tgz#fcda5c113798155fa111e04be63c9599d38ae4c2" + integrity sha512-NVPTth/71cfbdYHqypcO9Lt5WFGTzFEcx81lWd7GDJIgZ95ERdYHGUfCtFejHCyqodKsQkNEx2JCkMpreDty/A== dependencies: + "@npmcli/package-json" "^7.0.0" ci-info "^4.0.0" - normalize-package-data "^5.0.0" - npm-package-arg "^10.1.0" - npm-registry-fetch "^14.0.3" - proc-log "^3.0.0" + npm-package-arg "^13.0.0" + npm-registry-fetch "^19.0.0" + proc-log "^6.0.0" semver "^7.3.7" - sigstore "^1.4.0" - ssri "^10.0.1" + sigstore "^4.0.0" + ssri "^13.0.0" -libnpmsearch@^6.0.2: - version "6.0.3" - resolved "https://registry.yarnpkg.com/libnpmsearch/-/libnpmsearch-6.0.3.tgz#f6001910b4a68341c2aa3f6f9505e665ed98759e" - integrity sha512-4FLTFsygxRKd+PL32WJlFN1g6gkfx3d90PjgSgd6kl9nJ55sZQAqNyi1M7QROKB4kN8JCNCphK8fQYDMg5bCcg== +libnpmsearch@^9.0.1: + version "9.0.1" + resolved "https://registry.yarnpkg.com/libnpmsearch/-/libnpmsearch-9.0.1.tgz#674a88ffc9ab5826feb34c2c66e90797b38f4c2e" + integrity sha512-oKw58X415ERY/BOGV3jQPVMcep8YeMRWMzuuqB0BAIM5VxicOU1tQt19ExCu4SV77SiTOEoziHxGEgJGw3FBYQ== dependencies: - npm-registry-fetch "^14.0.3" + npm-registry-fetch "^19.0.0" -libnpmteam@^5.0.3: - version "5.0.4" - resolved "https://registry.yarnpkg.com/libnpmteam/-/libnpmteam-5.0.4.tgz#255ac22d94e4b9e911456bf97c1dc1013df03659" - integrity sha512-yN2zxNb8Urvvo7fTWRcP3E/KPtpZJXFweDWcl+H/s3zopGDI9ahpidddGVG98JhnPl3vjqtZvFGU3/sqVTfuIw== +libnpmteam@^8.0.2: + version "8.0.2" + resolved "https://registry.yarnpkg.com/libnpmteam/-/libnpmteam-8.0.2.tgz#0417161bfcd155f5e8391cc2b6a05260ccbf1f41" + integrity sha512-ypLrDUQoi8EhG+gzx5ENMcYq23YjPV17Mfvx4nOnQiHOi8vp47+4GvZBrMsEM4yeHPwxguF/HZoXH4rJfHdH/w== dependencies: aproba "^2.0.0" - npm-registry-fetch "^14.0.3" + npm-registry-fetch "^19.0.0" -libnpmversion@^4.0.2: - version "4.0.3" - resolved "https://registry.yarnpkg.com/libnpmversion/-/libnpmversion-4.0.3.tgz#f4d85d3eb6bdbf7de8d9317abda92528e84b1a53" - integrity sha512-eD1O5zr0ko5pjOdz+2NyTEzP0kzKG8VIVyU+hIsz61cRmTrTxFRJhVBNOI1Q/inifkcM/UTl8EMfa0vX48zfoQ== +libnpmversion@^8.0.3: + version "8.0.3" + resolved "https://registry.yarnpkg.com/libnpmversion/-/libnpmversion-8.0.3.tgz#f50030c72a85e35b70a4ea4c075347f1999f9fe5" + integrity sha512-Avj1GG3DT6MGzWOOk3yA7rORcMDUPizkIGbI8glHCO7WoYn3NYNmskLDwxg2NMY1Tyf2vrHAqTuSG58uqd1lJg== dependencies: - "@npmcli/git" "^4.0.1" - "@npmcli/run-script" "^6.0.0" - json-parse-even-better-errors "^3.0.0" - proc-log "^3.0.0" + "@npmcli/git" "^7.0.0" + "@npmcli/run-script" "^10.0.0" + json-parse-even-better-errors "^5.0.0" + proc-log "^6.0.0" semver "^7.3.7" lie@~3.3.0: @@ -11393,11 +11690,6 @@ lines-and-columns@^1.1.6: resolved "https://registry.yarnpkg.com/lines-and-columns/-/lines-and-columns-1.2.4.tgz#eca284f75d2965079309dc0ad9255abb2ebc1632" integrity sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg== -lines-and-columns@^2.0.3: - version "2.0.4" - resolved "https://registry.yarnpkg.com/lines-and-columns/-/lines-and-columns-2.0.4.tgz#d00318855905d2660d8c0822e3f5a4715855fc42" - integrity sha512-wM1+Z03eypVAVUCE7QdSqpVIvelbOakn1M0bPDoA4SGWPx3sNDVUiMo3L6To6WWGClB7VyXnhQ4Sn7gxiJbE6A== - link-check@^5.2.0: version "5.2.0" resolved "https://registry.yarnpkg.com/link-check/-/link-check-5.2.0.tgz#595a339d305900bed8c1302f4342a29c366bf478" @@ -11457,13 +11749,6 @@ locate-path@^6.0.0: dependencies: p-locate "^5.0.0" -locate-path@^7.1.0: - version "7.2.0" - resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-7.2.0.tgz#69cb1779bd90b35ab1e771e1f2f89a202c2a8a8a" - integrity sha512-gvVijfZvn7R+2qyPX8mAuKcFGDf6Nc61GdvGafQsHL0sBIxfKzA+usWn4GFC/bk+QdwPUD4kWFJLhElipq+0VA== - dependencies: - p-locate "^6.0.0" - lodash-es@^4.17.21: version "4.17.23" resolved "https://registry.yarnpkg.com/lodash-es/-/lodash-es-4.17.23.tgz#58c4360fd1b5d33afc6c0bbd3d1149349b1138e0" @@ -11654,11 +11939,16 @@ lru-cache@^10.0.1: dependencies: semver "^7.3.5" -lru-cache@^10.2.0, lru-cache@^10.2.2: +lru-cache@^10.2.2: version "10.4.3" resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-10.4.3.tgz#410fc8a17b70e598013df257c2446b7f3383f119" integrity sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ== +lru-cache@^11.0.0, lru-cache@^11.1.0, lru-cache@^11.2.1: + version "11.2.7" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-11.2.7.tgz#9127402617f34cd6767b96daee98c28e74458d35" + integrity sha512-aY/R+aEsRelme17KGQa/1ZSIpLpNYYrhcrepKTZgE+W3WM16YMCaPwOHLHsmopZHELU0Ojin1lPVxKR0MihncA== + lru-cache@^5.1.1: version "5.1.1" resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-5.1.1.tgz#1da27e6710271947695daf6848e847f01d84b920" @@ -11673,7 +11963,7 @@ lru-cache@^6.0.0: dependencies: yallist "^4.0.0" -lru-cache@^7.14.1, lru-cache@^7.4.4, lru-cache@^7.5.1, lru-cache@^7.7.1: +lru-cache@^7.14.1: version "7.18.3" resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-7.18.3.tgz#f793896e0fd0e954a59dfdd82f0773808df6aa89" integrity sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA== @@ -11700,6 +11990,15 @@ luxon@^3.2.1: resolved "https://registry.yarnpkg.com/luxon/-/luxon-3.4.4.tgz#cf20dc27dc532ba41a169c43fdcc0063601577af" integrity sha512-zobTr7akeGHnv7eBOXcRgMeCP6+uyYsczwmeRCauvpvaAltgNyTbLH/+VaEAPUeWBT+1GuNmz4wC/6jtQzbbVA== +make-asynchronous@^1.0.1: + version "1.1.0" + resolved "https://registry.yarnpkg.com/make-asynchronous/-/make-asynchronous-1.1.0.tgz#6225f7f1ccaab9acaac5e2fcd0b075afefff19aa" + integrity sha512-ayF7iT+44LXdxJLTrTd3TLQpFDDvPCBxXxbv+pMUSuHA5Q8zyAfwkRP6aHHwNVFBUFWtxAHqwNJxF8vMZLAbVg== + dependencies: + p-event "^6.0.0" + type-fest "^4.6.0" + web-worker "^1.5.0" + make-dir@4.0.0, make-dir@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-4.0.0.tgz#c3c2307a771277cd9638305f915c29ae741b614e" @@ -11727,49 +12026,6 @@ make-error@1.x, make-error@^1.1.1: resolved "https://registry.yarnpkg.com/make-error/-/make-error-1.3.6.tgz#2eb2e37ea9b67c4891f684a1394799af484cf7a2" integrity sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw== -make-fetch-happen@^10.0.3: - version "10.2.1" - resolved "https://registry.yarnpkg.com/make-fetch-happen/-/make-fetch-happen-10.2.1.tgz#f5e3835c5e9817b617f2770870d9492d28678164" - integrity sha512-NgOPbRiaQM10DYXvN3/hhGVI2M5MtITFryzBGxHM5p4wnFxsVCbxkrBrDsk+EZ5OB4jEOT7AjDxtdF+KVEFT7w== - dependencies: - agentkeepalive "^4.2.1" - cacache "^16.1.0" - http-cache-semantics "^4.1.0" - http-proxy-agent "^5.0.0" - https-proxy-agent "^5.0.0" - is-lambda "^1.0.1" - lru-cache "^7.7.1" - minipass "^3.1.6" - minipass-collect "^1.0.2" - minipass-fetch "^2.0.3" - minipass-flush "^1.0.5" - minipass-pipeline "^1.2.4" - negotiator "^0.6.3" - promise-retry "^2.0.1" - socks-proxy-agent "^7.0.0" - ssri "^9.0.0" - -make-fetch-happen@^11.0.0, make-fetch-happen@^11.0.1, make-fetch-happen@^11.1.1: - version "11.1.1" - resolved "https://registry.yarnpkg.com/make-fetch-happen/-/make-fetch-happen-11.1.1.tgz#85ceb98079584a9523d4bf71d32996e7e208549f" - integrity sha512-rLWS7GCSTcEujjVBs2YqG7Y4643u8ucvCJeSRqiLYhesrDuzeuFIk37xREzAsfQaqzl8b9rNCE4m6J8tvX4Q8w== - dependencies: - agentkeepalive "^4.2.1" - cacache "^17.0.0" - http-cache-semantics "^4.1.1" - http-proxy-agent "^5.0.0" - https-proxy-agent "^5.0.0" - is-lambda "^1.0.1" - lru-cache "^7.7.1" - minipass "^5.0.0" - minipass-fetch "^3.0.0" - minipass-flush "^1.0.5" - minipass-pipeline "^1.2.4" - negotiator "^0.6.3" - promise-retry "^2.0.1" - socks-proxy-agent "^7.0.0" - ssri "^10.0.0" - make-fetch-happen@^13.0.0, make-fetch-happen@^13.0.1: version "13.0.1" resolved "https://registry.yarnpkg.com/make-fetch-happen/-/make-fetch-happen-13.0.1.tgz#273ba2f78f45e1f3a6dca91cede87d9fa4821e36" @@ -11788,6 +12044,24 @@ make-fetch-happen@^13.0.0, make-fetch-happen@^13.0.1: promise-retry "^2.0.1" ssri "^10.0.0" +make-fetch-happen@^15.0.0, make-fetch-happen@^15.0.1, make-fetch-happen@^15.0.4, make-fetch-happen@^15.0.5: + version "15.0.5" + resolved "https://registry.yarnpkg.com/make-fetch-happen/-/make-fetch-happen-15.0.5.tgz#b0e3dd53d487b2733e4ea232c2bebf1bd16afb03" + integrity sha512-uCbIa8jWWmQZt4dSnEStkVC6gdakiinAm4PiGsywIkguF0eWMdcjDz0ECYhUolFU3pFLOev9VNPCEygydXnddg== + dependencies: + "@gar/promise-retry" "^1.0.0" + "@npmcli/agent" "^4.0.0" + "@npmcli/redact" "^4.0.0" + cacache "^20.0.1" + http-cache-semantics "^4.1.1" + minipass "^7.0.2" + minipass-fetch "^5.0.0" + minipass-flush "^1.0.5" + minipass-pipeline "^1.2.4" + negotiator "^1.0.0" + proc-log "^6.0.0" + ssri "^13.0.0" + make-fetch-happen@^9.1.0: version "9.1.0" resolved "https://registry.yarnpkg.com/make-fetch-happen/-/make-fetch-happen-9.1.0.tgz#53085a09e7971433e6765f7971bf63f4e05cb968" @@ -11838,7 +12112,7 @@ mariadb@^3.0.2: iconv-lite "^0.6.3" lru-cache "^10.0.1" -markdown-it@^14.1.0: +markdown-it@>=14.1.1, markdown-it@^14.1.0: version "14.1.1" resolved "https://registry.yarnpkg.com/markdown-it/-/markdown-it-14.1.1.tgz#856f90b66fc39ae70affd25c1b18b581d7deee1f" integrity sha512-BuU2qnTti9YKgK5N+IeMubp14ZUKUUw7yeJbkjtosvHiP0AZ5c8IAgEMk79D0eC8F23r4Ac/q8cAIFdm2FtyoA== @@ -11879,28 +12153,29 @@ markdown-table@^2.0.0: dependencies: repeat-string "^1.0.0" -marked-terminal@^5.1.1: - version "5.2.0" - resolved "https://registry.yarnpkg.com/marked-terminal/-/marked-terminal-5.2.0.tgz#c5370ec2bae24fb2b34e147b731c94fa933559d3" - integrity sha512-Piv6yNwAQXGFjZSaiNljyNFw7jKDdGrw70FSbtxEyldLsyeuV5ZHm/1wW++kWbrOF1VPnUgYOhB2oLL0ZpnekA== - dependencies: - ansi-escapes "^6.2.0" - cardinal "^2.1.1" - chalk "^5.2.0" - cli-table3 "^0.6.3" - node-emoji "^1.11.0" - supports-hyperlinks "^2.3.0" +marked-terminal@^7.3.0: + version "7.3.0" + resolved "https://registry.yarnpkg.com/marked-terminal/-/marked-terminal-7.3.0.tgz#7a86236565f3dd530f465ffce9c3f8b62ef270e8" + integrity sha512-t4rBvPsHc57uE/2nJOLmMbZCQ4tgAccAED3ngXQqW6g+TxA488JzJ+FK3lQkzBQOI1mRV/r/Kq+1ZlJ4D0owQw== + dependencies: + ansi-escapes "^7.0.0" + ansi-regex "^6.1.0" + chalk "^5.4.1" + cli-highlight "^2.1.11" + cli-table3 "^0.6.5" + node-emoji "^2.2.0" + supports-hyperlinks "^3.1.0" + +marked@^15.0.0: + version "15.0.12" + resolved "https://registry.yarnpkg.com/marked/-/marked-15.0.12.tgz#30722c7346e12d0a2d0207ab9b0c4f0102d86c4e" + integrity sha512-8dD6FusOQSrpv9Z1rdNMdlSgQOIP880DHqnohobOmYLElGEqAL/JvxvuxZO16r4HtjTlfPRDC1hbvxC9dPN2nA== marked@^4.1.0: version "4.3.0" resolved "https://registry.yarnpkg.com/marked/-/marked-4.3.0.tgz#796362821b019f734054582038b116481b456cf3" integrity sha512-PRsaiG84bK+AMvxziE/lCFss8juXjNaWzVbN5tXAm4XjeaS9NAHhop+PjQxz2A9h8Q4M/xGmzP8vqNwy6JeK0A== -marked@^5.0.0: - version "5.1.2" - resolved "https://registry.yarnpkg.com/marked/-/marked-5.1.2.tgz#62b5ccfc75adf72ca3b64b2879b551d89e77677f" - integrity sha512-ahRPGXJpjMjwSOlBoTMZAK7ATXkli5qCPxZ21TG44rx1KEo44bii4ekgTDQPNRQ4Kh7JMb9Ub1PVk1NxRSsorg== - math-expression-evaluator@^2.0.0: version "2.0.7" resolved "https://registry.yarnpkg.com/math-expression-evaluator/-/math-expression-evaluator-2.0.7.tgz#dc99a80ce2bf7f9b7df878126feb5c506c1fdf5f" @@ -12029,6 +12304,11 @@ meow@^12.0.1: resolved "https://registry.yarnpkg.com/meow/-/meow-12.1.1.tgz#e558dddbab12477b69b2e9a2728c327f191bace6" integrity sha512-BhXM0Au22RwUneMPwSCnyhTOizdWoIEPU9sp0Aqa1PnDMR5Wv2FGXYDjuzJEIX+Eo2Rb8xuYe5jrnm5QowQFkw== +meow@^13.0.0: + version "13.2.0" + resolved "https://registry.yarnpkg.com/meow/-/meow-13.2.0.tgz#6b7d63f913f984063b3cc261b6e8800c4cd3474f" + integrity sha512-pxQJQzB6djGPXh08dacEloMFopsOqGVRKFPYvPOt9XDZ1HasbgDZA74CJGreSU4G3Ak7EFJGoiH2auq+yXISgA== + meow@^8.0.0, meow@^8.1.2: version "8.1.2" resolved "https://registry.yarnpkg.com/meow/-/meow-8.1.2.tgz#bcbe45bda0ee1729d350c03cffc8395a36c4e897" @@ -12129,15 +12409,7 @@ micromark@^2.11.3, micromark@~2.11.0, micromark@~2.11.3: debug "^4.0.0" parse-entities "^2.0.0" -micromatch@4.0.2: - version "4.0.2" - resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.2.tgz#4fcb0999bf9fbc2fcbdd212f6d629b9a56c39259" - integrity sha512-y7FpHSbMUMoyPbYUSzO6PaZ6FyRnQOpHuKwbo1G+Knck95XVU4QAiKdGEnj5wwoS7PlOgthX/09u5iFJ+aYf5Q== - dependencies: - braces "^3.0.1" - picomatch "^2.0.5" - -micromatch@^4.0.0, micromatch@^4.0.2, micromatch@^4.0.4, micromatch@^4.0.8: +micromatch@4.0.2, micromatch@^4.0.0, micromatch@^4.0.2, micromatch@^4.0.4, micromatch@^4.0.8: version "4.0.8" resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.8.tgz#d66fa18f3a47076789320b9b1af32bd86d9fa202" integrity sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA== @@ -12243,6 +12515,13 @@ minimatch@9.0.3: dependencies: brace-expansion "^2.0.1" +minimatch@^10.0.3, minimatch@^10.1.1, minimatch@^10.2.2, minimatch@^10.2.4: + version "10.2.4" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-10.2.4.tgz#465b3accbd0218b8281f5301e27cedc697f96fde" + integrity sha512-oRjTw/97aTBN0RHbYCdtF1MQfvusSIBQM0IZEgzl6426+8jSC0nF1a/GmnVLpfB9yyr6g6FTqWqiZVbxrtaCIg== + dependencies: + brace-expansion "^5.0.2" + minimatch@^3.0.4, minimatch@^3.0.5, minimatch@^3.1.1, minimatch@^3.1.2: version "3.1.5" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.5.tgz#580c88f8d5445f2bd6aa8f3cadefa0de79fbd69e" @@ -12257,14 +12536,7 @@ minimatch@^5.0.1: dependencies: brace-expansion "^2.0.1" -minimatch@^8.0.2: - version "8.0.7" - resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-8.0.7.tgz#954766e22da88a3e0a17ad93b58c15c9d8a579de" - integrity sha512-V+1uQNdzybxa14e/p00HZnQNNcTjnRJjDxg2V8wtkjFctq4M7hXFws4oekyTP0Jebeq7QYtpFyOeBAjc88zvYg== - dependencies: - brace-expansion "^2.0.1" - -minimatch@^9.0.0, minimatch@^9.0.3, minimatch@^9.0.4, minimatch@^9.0.5: +minimatch@^9.0.0, minimatch@^9.0.4, minimatch@^9.0.5: version "9.0.9" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.9.tgz#9b0cb9fcb78087f6fd7eababe2511c4d3d60574e" integrity sha512-OBwBN9AL4dqmETlpS2zasx+vTeWclWzkblfZk7KTA5j3jeOONz/tRCnZomUyvNg83wL5Zv9Ss6HMJXAgL8R2Yg== @@ -12310,17 +12582,6 @@ minipass-fetch@^1.3.2: optionalDependencies: encoding "^0.1.12" -minipass-fetch@^2.0.3: - version "2.1.2" - resolved "https://registry.yarnpkg.com/minipass-fetch/-/minipass-fetch-2.1.2.tgz#95560b50c472d81a3bc76f20ede80eaed76d8add" - integrity sha512-LT49Zi2/WMROHYoqGgdlQIZh8mLPZmOrN2NdJjMXxYe4nkN6FUyuPuOAOedNJDrx0IRGg9+4guZewtp8hE6TxA== - dependencies: - minipass "^3.1.6" - minipass-sized "^1.0.3" - minizlib "^2.1.2" - optionalDependencies: - encoding "^0.1.13" - minipass-fetch@^3.0.0: version "3.0.4" resolved "https://registry.yarnpkg.com/minipass-fetch/-/minipass-fetch-3.0.4.tgz#4d4d9b9f34053af6c6e597a64be8e66e42bf45b7" @@ -12332,6 +12593,17 @@ minipass-fetch@^3.0.0: optionalDependencies: encoding "^0.1.13" +minipass-fetch@^5.0.0: + version "5.0.2" + resolved "https://registry.yarnpkg.com/minipass-fetch/-/minipass-fetch-5.0.2.tgz#3973a605ddfd8abb865e50d6fc634853c8239729" + integrity sha512-2d0q2a8eCi2IRg/IGubCNRJoYbA1+YPXAzQVRFmB45gdGZafyivnZ5YSEfo3JikbjGxOdntGFvBQGqaSMXlAFQ== + dependencies: + minipass "^7.0.3" + minipass-sized "^2.0.0" + minizlib "^3.0.1" + optionalDependencies: + iconv-lite "^0.7.2" + minipass-flush@^1.0.5: version "1.0.5" resolved "https://registry.yarnpkg.com/minipass-flush/-/minipass-flush-1.0.5.tgz#82e7135d7e89a50ffe64610a787953c4c4cbb373" @@ -12339,14 +12611,6 @@ minipass-flush@^1.0.5: dependencies: minipass "^3.0.0" -minipass-json-stream@^1.0.1: - version "1.0.2" - resolved "https://registry.yarnpkg.com/minipass-json-stream/-/minipass-json-stream-1.0.2.tgz#5121616c77a11c406c3ffa77509e0b77bb267ec3" - integrity sha512-myxeeTm57lYs8pH2nxPzmEEg8DGIgW+9mv6D4JZD2pa81I/OBjeU7PtICXV6c9eRGTA5JMDsuIPUZRCyBMYNhg== - dependencies: - jsonparse "^1.3.1" - minipass "^3.0.0" - minipass-pipeline@^1.2.2, minipass-pipeline@^1.2.4: version "1.2.4" resolved "https://registry.yarnpkg.com/minipass-pipeline/-/minipass-pipeline-1.2.4.tgz#68472f79711c084657c067c5c6ad93cddea8214c" @@ -12361,28 +12625,20 @@ minipass-sized@^1.0.3: dependencies: minipass "^3.0.0" -minipass@^3.0.0, minipass@^3.1.0, minipass@^3.1.1, minipass@^3.1.3, minipass@^3.1.6: +minipass-sized@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/minipass-sized/-/minipass-sized-2.0.0.tgz#2228ee97e3f74f6b22ba6d1319addb7621534306" + integrity sha512-zSsHhto5BcUVM2m1LurnXY6M//cGhVaegT71OfOXoprxT6o780GZd792ea6FfrQkuU4usHZIUczAQMRUE2plzA== + dependencies: + minipass "^7.1.2" + +minipass@^3.0.0, minipass@^3.1.0, minipass@^3.1.1, minipass@^3.1.3: version "3.3.6" resolved "https://registry.yarnpkg.com/minipass/-/minipass-3.3.6.tgz#7bba384db3a1520d18c9c0e5251c3444e95dd94a" integrity sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw== dependencies: yallist "^4.0.0" -minipass@^4.2.4: - version "4.2.8" - resolved "https://registry.yarnpkg.com/minipass/-/minipass-4.2.8.tgz#f0010f64393ecfc1d1ccb5f582bcaf45f48e1a3a" - integrity sha512-fNzuVyifolSLFL4NzpF+wEF4qrgqaaKX0haXPQEdQ7NKAN+WecoKMHV09YcuL/DHxrUsYQOK3MiuDf7Ip2OXfQ== - -minipass@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/minipass/-/minipass-5.0.0.tgz#3e9788ffb90b694a5d0ec94479a45b5d8738133d" - integrity sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ== - -"minipass@^5.0.0 || ^6.0.2 || ^7.0.0": - version "7.1.3" - resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.1.3.tgz#79389b4eb1bb2d003a9bba87d492f2bd37bdc65b" - integrity sha512-tEBHqDnIoM/1rXME1zgka9g6Q2lcoCkxHLuc7ODJ5BxbP5d4c2Z5cGgtXAku59200Cx7diuHTOYfSBD8n6mm8A== - minipass@^7.0.2, minipass@^7.0.4, minipass@^7.1.2: version "7.1.2" resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.1.2.tgz#93a9626ce5e5e66bd4db86849e7515e92340a707" @@ -12393,7 +12649,12 @@ minipass@^7.0.3: resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.0.4.tgz#dbce03740f50a4786ba994c1fb908844d27b038c" integrity sha512-jYofLM5Dam9279rdkWzqHozUo4ybjdZmCsDHePy5V/PbBcVMiSZR97gmAy45aqi8CK1lG2ECd356FU86avfwUQ== -minizlib@^2.0.0, minizlib@^2.1.1, minizlib@^2.1.2: +minipass@^7.1.3: + version "7.1.3" + resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.1.3.tgz#79389b4eb1bb2d003a9bba87d492f2bd37bdc65b" + integrity sha512-tEBHqDnIoM/1rXME1zgka9g6Q2lcoCkxHLuc7ODJ5BxbP5d4c2Z5cGgtXAku59200Cx7diuHTOYfSBD8n6mm8A== + +minizlib@^2.0.0, minizlib@^2.1.2: version "2.1.2" resolved "https://registry.yarnpkg.com/minizlib/-/minizlib-2.1.2.tgz#e90d3466ba209b932451508a11ce3d3632145931" integrity sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg== @@ -12401,6 +12662,13 @@ minizlib@^2.0.0, minizlib@^2.1.1, minizlib@^2.1.2: minipass "^3.0.0" yallist "^4.0.0" +minizlib@^3.0.1, minizlib@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/minizlib/-/minizlib-3.1.0.tgz#6ad76c3a8f10227c9b51d1c9ac8e30b27f5a251c" + integrity sha512-KZxYo1BUkWD2TVFLr0MQoM8vUUigWD3LlD83a/75BqC+4qE0Hb1Vo5v1FgcfaNXvfXzr+5EhQ6ing/CaBijTlw== + dependencies: + minipass "^7.1.2" + mkdirp-classic@^0.5.2, mkdirp-classic@^0.5.3: version "0.5.3" resolved "https://registry.yarnpkg.com/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz#fa10c9115cc6d8865be221ba47ee9bed78601113" @@ -12558,7 +12826,7 @@ mute-stream@0.0.8: resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.8.tgz#1630c42b2251ff81e2a283de96a5497ea92e5e0d" integrity sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA== -mute-stream@^1.0.0, mute-stream@~1.0.0: +mute-stream@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-1.0.0.tgz#e31bd9fe62f0aed23520aa4324ea6671531e013e" integrity sha512-avsJQhyd+680gKXyG/sQc0nXaC6rBkPOfyHYcFb9+hdkqQkR9bdnkJ0AMZhke0oesPqIO+mFFJ+IdBc7mst4IA== @@ -12568,6 +12836,11 @@ mute-stream@^2.0.0: resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-2.0.0.tgz#a5446fc0c512b71c83c44d908d5c7b7b4c493b2b" integrity sha512-WWdIxpyjEn+FhQJQQv9aQAYlHoNVdzIzUySNV1gHUPDSdZJ3yZn7pAAbQcV7B56Mvu881q9FZV+0Vx2xC44VWA== +mute-stream@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-3.0.0.tgz#cd8014dd2acb72e1e91bb67c74f0019e620ba2d1" + integrity sha512-dkEJPVvun4FryqBmZ5KhDo0K9iDXAwn08tMLDinNdRBNPcYEDiWYysLcc6k3mjTMlbP9KyylvRpd4wFtwrT9rw== + mysql2@3.9.8: version "3.9.8" resolved "https://registry.yarnpkg.com/mysql2/-/mysql2-3.9.8.tgz#fe8a0f975f2c495ed76ca988ddc5505801dc49ce" @@ -12596,6 +12869,15 @@ mysql2@^3.0.1: seq-queue "^0.0.5" sqlstring "^2.3.2" +mz@^2.4.0: + version "2.7.0" + resolved "https://registry.yarnpkg.com/mz/-/mz-2.7.0.tgz#95008057a56cafadc2bc63dde7f9ff6955948e32" + integrity sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q== + dependencies: + any-promise "^1.0.0" + object-assign "^4.0.1" + thenify-all "^1.0.0" + named-placeholders@^1.1.3: version "1.1.3" resolved "https://registry.yarnpkg.com/named-placeholders/-/named-placeholders-1.1.3.tgz#df595799a36654da55dda6152ba7a137ad1d9351" @@ -12703,12 +12985,20 @@ node-addon-api@^7.0.0: resolved "https://registry.yarnpkg.com/node-addon-api/-/node-addon-api-7.1.1.tgz#1aba6693b0f255258a049d621329329322aad558" integrity sha512-5m3bsyrjFWE1xf7nz7YXdN4udnVtXK6/Yfgn5qnahL6bCkf2yKt4k3nuTKAtT4r3IG8JNR2ncsIMdZuAzJjHQQ== -node-emoji@^1.11.0: - version "1.11.0" - resolved "https://registry.yarnpkg.com/node-emoji/-/node-emoji-1.11.0.tgz#69a0150e6946e2f115e9d7ea4df7971e2628301c" - integrity sha512-wo2DpQkQp7Sjm2A0cq+sN7EHKO6Sl0ctXeBdFZrL9T9+UywORbufTcTZxom8YqpLQt/FqNMUkOpkZrJVYSKD3A== +node-addon-api@^8.0.0: + version "8.6.0" + resolved "https://registry.yarnpkg.com/node-addon-api/-/node-addon-api-8.6.0.tgz#b22497201b465cd0a92ef2c01074ee5068c79a6d" + integrity sha512-gBVjCaqDlRUk0EwoPNKzIr9KkS9041G/q31IBShPs1Xz6UTA+EXdZADbzqAJQrpDRq71CIMnOP5VMut3SL0z5Q== + +node-emoji@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/node-emoji/-/node-emoji-2.2.0.tgz#1d000e3c76e462577895be1b436f4aa2d6760eb0" + integrity sha512-Z3lTE9pLaJF47NyMhd4ww1yFTAP8YhYI8SleJiHzM46Fgpm5cnNzSl9XfzFNqbaz+VlJrIj3fXQ4DeN1Rjm6cw== dependencies: - lodash "^4.17.21" + "@sindresorhus/is" "^4.6.0" + char-regex "^1.0.2" + emojilib "^2.4.0" + skin-tone "^2.0.0" node-fetch@2.6.7: version "2.6.7" @@ -12724,6 +13014,22 @@ node-fetch@^2.3.0, node-fetch@^2.6.1, node-fetch@^2.6.7, node-fetch@^2.7.0: dependencies: whatwg-url "^5.0.0" +node-gyp@12.x, node-gyp@^12.1.0, node-gyp@^12.2.0: + version "12.2.0" + resolved "https://registry.yarnpkg.com/node-gyp/-/node-gyp-12.2.0.tgz#ff73f6f509e33d8b7e768f889ffc9738ad117b07" + integrity sha512-q23WdzrQv48KozXlr0U1v9dwO/k59NHeSzn6loGcasyf0UnSrtzs8kRxM+mfwJSf0DkX0s43hcqgnSO4/VNthQ== + dependencies: + env-paths "^2.2.0" + exponential-backoff "^3.1.1" + graceful-fs "^4.2.6" + make-fetch-happen "^15.0.0" + nopt "^9.0.0" + proc-log "^6.0.0" + semver "^7.3.5" + tar "^7.5.4" + tinyglobby "^0.2.12" + which "^6.0.0" + node-gyp@8.x: version "8.4.1" resolved "https://registry.yarnpkg.com/node-gyp/-/node-gyp-8.4.1.tgz#3d49308fc31f768180957d6b5746845fbd429937" @@ -12756,23 +13062,6 @@ node-gyp@^10.0.0: tar "^6.2.1" which "^4.0.0" -node-gyp@^9.0.0, node-gyp@^9.4.1: - version "9.4.1" - resolved "https://registry.yarnpkg.com/node-gyp/-/node-gyp-9.4.1.tgz#8a1023e0d6766ecb52764cc3a734b36ff275e185" - integrity sha512-OQkWKbjQKbGkMf/xqI1jjy3oCTgMKJac58G2+bjZb3fza6gW2YrCSdMQYaoTb70crvE//Gngr4f0AgVHmqHvBQ== - dependencies: - env-paths "^2.2.0" - exponential-backoff "^3.1.1" - glob "^7.1.4" - graceful-fs "^4.2.6" - make-fetch-happen "^10.0.3" - nopt "^6.0.0" - npmlog "^6.0.0" - rimraf "^3.0.2" - semver "^7.3.5" - tar "^6.1.2" - which "^2.0.2" - node-int64@^0.4.0: version "0.4.0" resolved "https://registry.yarnpkg.com/node-int64/-/node-int64-0.4.0.tgz#87a9065cdb355d3182d8f94ce11188b825c68a3b" @@ -12839,20 +13128,20 @@ nopt@^5.0.0: dependencies: abbrev "1" -nopt@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/nopt/-/nopt-6.0.0.tgz#245801d8ebf409c6df22ab9d95b65e1309cdb16d" - integrity sha512-ZwLpbTgdhuZUnZzjd7nb1ZV+4DoiC6/sfiVKok72ym/4Tlf+DFdlHYmT2JPmcNNWV6Pi3SDf1kT+A4r9RTuT9g== - dependencies: - abbrev "^1.0.0" - -nopt@^7.0.0, nopt@^7.2.0, nopt@^7.2.1: +nopt@^7.0.0, nopt@^7.2.1: version "7.2.1" resolved "https://registry.yarnpkg.com/nopt/-/nopt-7.2.1.tgz#1cac0eab9b8e97c9093338446eddd40b2c8ca1e7" integrity sha512-taM24ViiimT/XntxbPyJQzCG+p4EKOpgD3mxFwW38mGjVUrfERQOeY4EDHjdnptttfHuHQXFx+lTP08Q+mLa/w== dependencies: abbrev "^2.0.0" +nopt@^9.0.0: + version "9.0.0" + resolved "https://registry.yarnpkg.com/nopt/-/nopt-9.0.0.tgz#6bff0836b2964d24508b6b41b5a9a49c4f4a1f96" + integrity sha512-Zhq3a+yFKrYwSBluL4H9XP3m3y5uvQkB/09CwDruCiRmR/UJYnn9W4R48ry0uGC70aeTPKLynBtscP9efFFcPw== + dependencies: + abbrev "^4.0.0" + nopt@~1.0.10: version "1.0.10" resolved "https://registry.yarnpkg.com/nopt/-/nopt-1.0.10.tgz#6ddd21bd2a31417b92727dd585f8a6f37608ebee" @@ -12880,16 +13169,6 @@ normalize-package-data@^3.0.0, normalize-package-data@^3.0.3: semver "^7.3.4" validate-npm-package-license "^3.0.1" -normalize-package-data@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-5.0.0.tgz#abcb8d7e724c40d88462b84982f7cbf6859b4588" - integrity sha512-h9iPVIfrVZ9wVYQnxFgtw1ugSvGEMOlyPWWtm8BMJhnwyEL/FLbYbTY3V3PpjI/BUK67n9PEWDu6eHzu1fB15Q== - dependencies: - hosted-git-info "^6.0.0" - is-core-module "^2.8.1" - semver "^7.3.5" - validate-npm-package-license "^3.0.4" - normalize-package-data@^6.0.0, normalize-package-data@^6.0.1: version "6.0.2" resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-6.0.2.tgz#a7bc22167fe24025412bcff0a9651eb768b03506" @@ -12899,20 +13178,29 @@ normalize-package-data@^6.0.0, normalize-package-data@^6.0.1: semver "^7.3.5" validate-npm-package-license "^3.0.4" +normalize-package-data@^8.0.0: + version "8.0.0" + resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-8.0.0.tgz#bdce7ff2d6ba891b853e179e45a5337766e304a7" + integrity sha512-RWk+PI433eESQ7ounYxIp67CYuVsS1uYSonX3kA6ps/3LWfjVQa/ptEg6Y3T6uAMq1mWpX9PQ+qx+QaHpsc7gQ== + dependencies: + hosted-git-info "^9.0.0" + semver "^7.3.5" + validate-npm-package-license "^3.0.4" + normalize-path@^3.0.0, normalize-path@~3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-3.0.0.tgz#0dcd69ff23a1c9b11fd0978316644a0388216a65" integrity sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA== -normalize-url@^8.0.0: - version "8.1.0" - resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-8.1.0.tgz#d33504f67970decf612946fd4880bc8c0983486d" - integrity sha512-X06Mfd/5aKsRHc0O0J5CUedwnPmnDtLF2+nq+KN9KSDlJHkPuh0JUviWjEWMe0SW/9TDdSLVPuk7L5gGTIA1/w== +normalize-url@^9.0.0: + version "9.0.0" + resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-9.0.0.tgz#9a2c3e23dcc3cb4c5be7d70c6377cddd76e57dc1" + integrity sha512-z9nC87iaZXXySbWWtTHfCFJyFvKaUAW6lODhikG7ILSbVgmwuFjUqkgnheHvAUcGedO29e2QGBRXMUD64aurqQ== -npm-audit-report@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/npm-audit-report/-/npm-audit-report-5.0.0.tgz#83ac14aeff249484bde81eff53c3771d5048cf95" - integrity sha512-EkXrzat7zERmUhHaoren1YhTxFwsOu5jypE84k6632SXTHcQE1z8V51GC6GVZt8LxkC+tbBcKMUBZAgk8SUSbw== +npm-audit-report@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/npm-audit-report/-/npm-audit-report-7.0.0.tgz#c384ac4afede55f21b30778202ad568e54644c35" + integrity sha512-bluLL4xwGr/3PERYz50h2Upco0TJMDcLcymuFnfDWeGO99NqH724MNzhWi5sXXuXf2jbytFF0LyR8W+w1jTI6A== npm-bundled@^3.0.0: version "3.0.0" @@ -12921,18 +13209,37 @@ npm-bundled@^3.0.0: dependencies: npm-normalize-package-bin "^3.0.0" -npm-install-checks@^6.0.0, npm-install-checks@^6.2.0, npm-install-checks@^6.3.0: +npm-bundled@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/npm-bundled/-/npm-bundled-5.0.0.tgz#5025d847cfd06c7b8d9432df01695d0133d9ee80" + integrity sha512-JLSpbzh6UUXIEoqPsYBvVNVmyrjVZ1fzEFbqxKkTJQkWBO3xFzFT+KDnSKQWwOQNbuWRwt5LSD6HOTLGIWzfrw== + dependencies: + npm-normalize-package-bin "^5.0.0" + +npm-install-checks@^6.0.0, npm-install-checks@^6.2.0: version "6.3.0" resolved "https://registry.yarnpkg.com/npm-install-checks/-/npm-install-checks-6.3.0.tgz#046552d8920e801fa9f919cad569545d60e826fe" integrity sha512-W29RiK/xtpCGqn6f3ixfRYGk+zRyr+Ew9F2E20BfXxT5/euLdA/Nm7fO7OeTGuAmTs30cpgInyJ0cYe708YTZw== dependencies: semver "^7.1.1" +npm-install-checks@^8.0.0: + version "8.0.0" + resolved "https://registry.yarnpkg.com/npm-install-checks/-/npm-install-checks-8.0.0.tgz#f5d18e909bb8318d85093e9d8f36ac427c1cbe30" + integrity sha512-ScAUdMpyzkbpxoNekQ3tNRdFI8SJ86wgKZSQZdUxT+bj0wVFpsEMWnkXP0twVe1gJyNF5apBWDJhhIbgrIViRA== + dependencies: + semver "^7.1.1" + npm-normalize-package-bin@^3.0.0: version "3.0.1" resolved "https://registry.yarnpkg.com/npm-normalize-package-bin/-/npm-normalize-package-bin-3.0.1.tgz#25447e32a9a7de1f51362c61a559233b89947832" integrity sha512-dMxCf+zZ+3zeQZXKxmyuCKlIDPGuv8EF940xbkC4kQVDTtqoh6rJFO+JTKSA6/Rwi0getWmtuy4Itup0AMcaDQ== +npm-normalize-package-bin@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/npm-normalize-package-bin/-/npm-normalize-package-bin-5.0.0.tgz#2b207ff260f2e525ddce93356614e2f736728f89" + integrity sha512-CJi3OS4JLsNMmr2u07OJlhcrPxCeOeP/4xq67aWNai6TNWWbTrlNDgl8NcFKVlcBKp18GPj+EzbNIgrBfZhsag== + npm-package-arg@11.0.2: version "11.0.2" resolved "https://registry.yarnpkg.com/npm-package-arg/-/npm-package-arg-11.0.2.tgz#1ef8006c4a9e9204ddde403035f7ff7d718251ca" @@ -12943,16 +13250,6 @@ npm-package-arg@11.0.2: semver "^7.3.5" validate-npm-package-name "^5.0.0" -npm-package-arg@^10.0.0, npm-package-arg@^10.1.0: - version "10.1.0" - resolved "https://registry.yarnpkg.com/npm-package-arg/-/npm-package-arg-10.1.0.tgz#827d1260a683806685d17193073cc152d3c7e9b1" - integrity sha512-uFyyCEmgBfZTtrKk/5xDfHp6+MdrqGotX/VoOyEEl3mBwiEE5FlBaePanazJSVMPT7vKepcjYBY2ztg9A3yPIA== - dependencies: - hosted-git-info "^6.0.0" - proc-log "^3.0.0" - semver "^7.3.5" - validate-npm-package-name "^5.0.0" - npm-package-arg@^11.0.0, npm-package-arg@^11.0.2: version "11.0.3" resolved "https://registry.yarnpkg.com/npm-package-arg/-/npm-package-arg-11.0.3.tgz#dae0c21199a99feca39ee4bfb074df3adac87e2d" @@ -12963,6 +13260,16 @@ npm-package-arg@^11.0.0, npm-package-arg@^11.0.2: semver "^7.3.5" validate-npm-package-name "^5.0.0" +npm-package-arg@^13.0.0, npm-package-arg@^13.0.2: + version "13.0.2" + resolved "https://registry.yarnpkg.com/npm-package-arg/-/npm-package-arg-13.0.2.tgz#72a80f2afe8329860e63854489415e9e9a2f78a7" + integrity sha512-IciCE3SY3uE84Ld8WZU23gAPPV9rIYod4F+rc+vJ7h7cwAJt9Vk6TVsK60ry7Uj3SRS3bqRRIGuTp9YVlk6WNA== + dependencies: + hosted-git-info "^9.0.0" + proc-log "^6.0.0" + semver "^7.3.5" + validate-npm-package-name "^7.0.0" + npm-packlist@8.0.2, npm-packlist@^8.0.0: version "8.0.2" resolved "https://registry.yarnpkg.com/npm-packlist/-/npm-packlist-8.0.2.tgz#5b8d1d906d96d21c85ebbeed2cf54147477c8478" @@ -12970,21 +13277,22 @@ npm-packlist@8.0.2, npm-packlist@^8.0.0: dependencies: ignore-walk "^6.0.4" -npm-packlist@^7.0.0: - version "7.0.4" - resolved "https://registry.yarnpkg.com/npm-packlist/-/npm-packlist-7.0.4.tgz#033bf74110eb74daf2910dc75144411999c5ff32" - integrity sha512-d6RGEuRrNS5/N84iglPivjaJPxhDbZmlbTwTDX2IbcRHG5bZCdtysYMhwiPvcF4GisXHGn7xsxv+GQ7T/02M5Q== +npm-packlist@^10.0.1: + version "10.0.4" + resolved "https://registry.yarnpkg.com/npm-packlist/-/npm-packlist-10.0.4.tgz#aa2e0e4daf910eae8c5745c2645cf8bb8813de01" + integrity sha512-uMW73iajD8hiH4ZBxEV3HC+eTnppIqwakjOYuvgddnalIw2lJguKviK1pcUJDlIWm1wSJkchpDZDSVVsZEYRng== dependencies: - ignore-walk "^6.0.0" + ignore-walk "^8.0.0" + proc-log "^6.0.0" -npm-pick-manifest@^8.0.0, npm-pick-manifest@^8.0.1, npm-pick-manifest@^8.0.2: - version "8.0.2" - resolved "https://registry.yarnpkg.com/npm-pick-manifest/-/npm-pick-manifest-8.0.2.tgz#2159778d9c7360420c925c1a2287b5a884c713aa" - integrity sha512-1dKY+86/AIiq1tkKVD3l0WI+Gd3vkknVGAggsFeBkTvbhMQ1OND/LKkYv4JtXPKUJ8bOTCyLiqEg2P6QNdK+Gg== +npm-pick-manifest@^11.0.1, npm-pick-manifest@^11.0.3: + version "11.0.3" + resolved "https://registry.yarnpkg.com/npm-pick-manifest/-/npm-pick-manifest-11.0.3.tgz#76cf6593a351849006c36b38a7326798e2a76d13" + integrity sha512-buzyCfeoGY/PxKqmBqn1IUJrZnUi1VVJTdSSRPGI60tJdUhUoSQFhs0zycJokDdOznQentgrpf8LayEHyyYlqQ== dependencies: - npm-install-checks "^6.0.0" - npm-normalize-package-bin "^3.0.0" - npm-package-arg "^10.0.0" + npm-install-checks "^8.0.0" + npm-normalize-package-bin "^5.0.0" + npm-package-arg "^13.0.0" semver "^7.3.5" npm-pick-manifest@^9.0.0, npm-pick-manifest@^9.0.1: @@ -12997,26 +13305,13 @@ npm-pick-manifest@^9.0.0, npm-pick-manifest@^9.0.1: npm-package-arg "^11.0.0" semver "^7.3.5" -npm-profile@^7.0.1: - version "7.0.1" - resolved "https://registry.yarnpkg.com/npm-profile/-/npm-profile-7.0.1.tgz#a37dae08b22e662ece2c6e08946f9fcd9fdef663" - integrity sha512-VReArOY/fCx5dWL66cbJ2OMogTQAVVQA//8jjmjkarboki3V7UJ0XbGFW+khRwiAJFQjuH0Bqr/yF7Y5RZdkMQ== +npm-profile@^12.0.1: + version "12.0.1" + resolved "https://registry.yarnpkg.com/npm-profile/-/npm-profile-12.0.1.tgz#f5aa0d931a4a75013a7521c86c30048e497310de" + integrity sha512-Xs1mejJ1/9IKucCxdFMkiBJUre0xaxfCpbsO7DB7CadITuT4k68eI05HBlw4kj+Em1rsFMgeFNljFPYvPETbVQ== dependencies: - npm-registry-fetch "^14.0.0" - proc-log "^3.0.0" - -npm-registry-fetch@^14.0.0, npm-registry-fetch@^14.0.3, npm-registry-fetch@^14.0.5: - version "14.0.5" - resolved "https://registry.yarnpkg.com/npm-registry-fetch/-/npm-registry-fetch-14.0.5.tgz#fe7169957ba4986a4853a650278ee02e568d115d" - integrity sha512-kIDMIo4aBm6xg7jOttupWZamsZRkAqMqwqqbVXnUqstY5+tapvv6bkH/qMR76jdgV+YljEUCyWx3hRYMrJiAgA== - dependencies: - make-fetch-happen "^11.0.0" - minipass "^5.0.0" - minipass-fetch "^3.0.0" - minipass-json-stream "^1.0.1" - minizlib "^2.1.2" - npm-package-arg "^10.0.0" - proc-log "^3.0.0" + npm-registry-fetch "^19.0.0" + proc-log "^6.0.0" npm-registry-fetch@^17.0.0, npm-registry-fetch@^17.0.1, npm-registry-fetch@^17.1.0: version "17.1.0" @@ -13032,6 +13327,20 @@ npm-registry-fetch@^17.0.0, npm-registry-fetch@^17.0.1, npm-registry-fetch@^17.1 npm-package-arg "^11.0.0" proc-log "^4.0.0" +npm-registry-fetch@^19.0.0, npm-registry-fetch@^19.1.1: + version "19.1.1" + resolved "https://registry.yarnpkg.com/npm-registry-fetch/-/npm-registry-fetch-19.1.1.tgz#51e96d21f409a9bc4f96af218a8603e884459024" + integrity sha512-TakBap6OM1w0H73VZVDf44iFXsOS3h+L4wVMXmbWOQroZgFhMch0juN6XSzBNlD965yIKvWg2dfu7NSiaYLxtw== + dependencies: + "@npmcli/redact" "^4.0.0" + jsonparse "^1.3.1" + make-fetch-happen "^15.0.0" + minipass "^7.0.2" + minipass-fetch "^5.0.0" + minizlib "^3.0.1" + npm-package-arg "^13.0.0" + proc-log "^6.0.0" + npm-run-path@^2.0.0: version "2.0.2" resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-2.0.2.tgz#35a9232dfa35d7067b4cb2ddf2357b1871536c5f" @@ -13053,86 +13362,89 @@ npm-run-path@^5.1.0: dependencies: path-key "^4.0.0" -npm-user-validate@^2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/npm-user-validate/-/npm-user-validate-2.0.1.tgz#097afbf0a2351e2a8f478f1ba07960b368f2a25c" - integrity sha512-d17PKaF2h8LSGFl5j4b1gHOJt1fgH7YUcCm1kNSJvaLWWKXlBsuUvx0bBEkr0qhsVA9XP5LtRZ83hdlhm2QkgA== +npm-run-path@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-6.0.0.tgz#25cfdc4eae04976f3349c0b1afc089052c362537" + integrity sha512-9qny7Z9DsQU8Ou39ERsPU4OZQlSTP47ShQzuKZ6PRXpYLtIFgl/DEBYEXKlvcEa+9tHVcK8CF81Y2V72qaZhWA== + dependencies: + path-key "^4.0.0" + unicorn-magic "^0.3.0" -npm@^9.5.0: - version "9.9.4" - resolved "https://registry.yarnpkg.com/npm/-/npm-9.9.4.tgz#572bef36e61852c5a391bb3b4eb86c231b1365cd" - integrity sha512-NzcQiLpqDuLhavdyJ2J3tGJ/ni/ebcqHVFZkv1C4/6lblraUPbPgCJ4Vhb4oa3FFhRa2Yj9gA58jGH/ztKueNQ== +npm-user-validate@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/npm-user-validate/-/npm-user-validate-4.0.0.tgz#f3c7e8360e46c651dbaf2fc4eea8f66df51ae6df" + integrity sha512-TP+Ziq/qPi/JRdhaEhnaiMkqfMGjhDLoh/oRfW+t5aCuIfJxIUxvwk6Sg/6ZJ069N/Be6gs00r+aZeJTfS9uHQ== + +npm@^11.6.2: + version "11.12.0" + resolved "https://registry.yarnpkg.com/npm/-/npm-11.12.0.tgz#b97efe05f16b9f4d31752aea58e2f0d66c6fbecf" + integrity sha512-xPhOap4ZbJWyd7DAOukP564WFwNSGu/2FeTRFHhiiKthcauxhH/NpkJAQm24xD+cAn8av5tQ00phi98DqtfLsg== dependencies: "@isaacs/string-locale-compare" "^1.1.0" - "@npmcli/arborist" "^6.5.0" - "@npmcli/config" "^6.4.0" - "@npmcli/fs" "^3.1.0" - "@npmcli/map-workspaces" "^3.0.4" - "@npmcli/package-json" "^4.0.1" - "@npmcli/promise-spawn" "^6.0.2" - "@npmcli/run-script" "^6.0.2" - abbrev "^2.0.0" + "@npmcli/arborist" "^9.4.2" + "@npmcli/config" "^10.8.0" + "@npmcli/fs" "^5.0.0" + "@npmcli/map-workspaces" "^5.0.3" + "@npmcli/metavuln-calculator" "^9.0.3" + "@npmcli/package-json" "^7.0.5" + "@npmcli/promise-spawn" "^9.0.1" + "@npmcli/redact" "^4.0.0" + "@npmcli/run-script" "^10.0.4" + "@sigstore/tuf" "^4.0.2" + abbrev "^4.0.0" archy "~1.0.0" - cacache "^17.1.4" - chalk "^5.3.0" - ci-info "^4.0.0" - cli-columns "^4.0.0" - cli-table3 "^0.6.3" - columnify "^1.6.0" + cacache "^20.0.4" + chalk "^5.6.2" + ci-info "^4.4.0" fastest-levenshtein "^1.0.16" fs-minipass "^3.0.3" - glob "^10.3.10" + glob "^13.0.6" graceful-fs "^4.2.11" - hosted-git-info "^6.1.3" - ini "^4.1.1" - init-package-json "^5.0.0" - is-cidr "^4.0.2" - json-parse-even-better-errors "^3.0.1" - libnpmaccess "^7.0.2" - libnpmdiff "^5.0.20" - libnpmexec "^6.0.4" - libnpmfund "^4.2.1" - libnpmhook "^9.0.3" - libnpmorg "^5.0.4" - libnpmpack "^5.0.20" - libnpmpublish "^7.5.1" - libnpmsearch "^6.0.2" - libnpmteam "^5.0.3" - libnpmversion "^4.0.2" - make-fetch-happen "^11.1.1" - minimatch "^9.0.3" - minipass "^7.0.4" + hosted-git-info "^9.0.2" + ini "^6.0.0" + init-package-json "^8.2.5" + is-cidr "^6.0.3" + json-parse-even-better-errors "^5.0.0" + libnpmaccess "^10.0.3" + libnpmdiff "^8.1.5" + libnpmexec "^10.2.5" + libnpmfund "^7.0.19" + libnpmorg "^8.0.1" + libnpmpack "^9.1.5" + libnpmpublish "^11.1.3" + libnpmsearch "^9.0.1" + libnpmteam "^8.0.2" + libnpmversion "^8.0.3" + make-fetch-happen "^15.0.5" + minimatch "^10.2.4" + minipass "^7.1.3" minipass-pipeline "^1.2.4" ms "^2.1.2" - node-gyp "^9.4.1" - nopt "^7.2.0" - normalize-package-data "^5.0.0" - npm-audit-report "^5.0.0" - npm-install-checks "^6.3.0" - npm-package-arg "^10.1.0" - npm-pick-manifest "^8.0.2" - npm-profile "^7.0.1" - npm-registry-fetch "^14.0.5" - npm-user-validate "^2.0.0" - npmlog "^7.0.1" - p-map "^4.0.0" - pacote "^15.2.0" - parse-conflict-json "^3.0.1" - proc-log "^3.0.0" + node-gyp "^12.2.0" + nopt "^9.0.0" + npm-audit-report "^7.0.0" + npm-install-checks "^8.0.0" + npm-package-arg "^13.0.2" + npm-pick-manifest "^11.0.3" + npm-profile "^12.0.1" + npm-registry-fetch "^19.1.1" + npm-user-validate "^4.0.0" + p-map "^7.0.4" + pacote "^21.5.0" + parse-conflict-json "^5.0.1" + proc-log "^6.1.0" qrcode-terminal "^0.12.0" - read "^2.1.0" - semver "^7.6.0" - sigstore "^1.9.0" - spdx-expression-parse "^3.0.1" - ssri "^10.0.5" - supports-color "^9.4.0" - tar "^6.2.1" + read "^5.0.1" + semver "^7.7.4" + spdx-expression-parse "^4.0.0" + ssri "^13.0.1" + supports-color "^10.2.2" + tar "^7.5.11" text-table "~0.2.0" - tiny-relative-date "^1.3.0" + tiny-relative-date "^2.0.2" treeverse "^3.0.0" - validate-npm-package-name "^5.0.0" - which "^3.0.1" - write-file-atomic "^5.0.1" + validate-npm-package-name "^7.0.2" + which "^6.0.1" npmlog@^5.0.1: version "5.0.1" @@ -13154,16 +13466,6 @@ npmlog@^6.0.0: gauge "^4.0.3" set-blocking "^2.0.0" -npmlog@^7.0.1: - version "7.0.1" - resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-7.0.1.tgz#7372151a01ccb095c47d8bf1d0771a4ff1f53ac8" - integrity sha512-uJ0YFk/mCQpLBt+bxN88AKd+gyqZvZDbtiNxk6Waqcj2aPRyfVx8ITawkyQynxUagInjdYT1+qj4NfA5KJJUxg== - dependencies: - are-we-there-yet "^4.0.0" - console-control-strings "^1.1.0" - gauge "^5.0.0" - set-blocking "^2.0.0" - nth-check@^2.0.1: version "2.1.1" resolved "https://registry.yarnpkg.com/nth-check/-/nth-check-2.1.1.tgz#c9eab428effce36cd6b92c924bdb000ef1f1ed1d" @@ -13222,7 +13524,7 @@ nth-check@^2.0.1: "@nx/nx-win32-arm64-msvc" "20.8.1" "@nx/nx-win32-x64-msvc" "20.8.1" -object-assign@^4, object-assign@^4.1.1: +object-assign@^4, object-assign@^4.0.1, object-assign@^4.1.1: version "4.1.1" resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" integrity sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg== @@ -13507,6 +13809,13 @@ p-each-series@^3.0.0: resolved "https://registry.yarnpkg.com/p-each-series/-/p-each-series-3.0.0.tgz#d1aed5e96ef29864c897367a7d2a628fdc960806" integrity sha512-lastgtAdoH9YaLyDa5i5z64q+kzOcQHsQ5SsZJD3q0VEyI8mq872S3geuNbRUQLVAE9siMfgKrpj7MloKFHruw== +p-event@^6.0.0: + version "6.0.1" + resolved "https://registry.yarnpkg.com/p-event/-/p-event-6.0.1.tgz#8f62a1e3616d4bc01fce3abda127e0383ef4715b" + integrity sha512-Q6Bekk5wpzW5qIyUP4gdMEujObYstZl6DMMOSenwBvV0BlE5LkDwkjs5yHbZmdCEq2o4RJx4tE1vwxFVf2FG1w== + dependencies: + p-timeout "^6.1.2" + p-filter@^4.0.0: version "4.1.0" resolved "https://registry.yarnpkg.com/p-filter/-/p-filter-4.1.0.tgz#fe0aa794e2dfad8ecf595a39a245484fcd09c6e4" @@ -13545,13 +13854,6 @@ p-limit@^3.0.2, p-limit@^3.1.0: dependencies: yocto-queue "^0.1.0" -p-limit@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-4.0.0.tgz#914af6544ed32bfa54670b061cafcbd04984b644" - integrity sha512-5b0R4txpzjPWVw/cXXUResoD4hb6U/x9BH08L7nw+GN1sezDzPdxeRvpc9c433fZhBan/wusjbCsqwqm4EIBIQ== - dependencies: - yocto-queue "^1.0.0" - p-locate@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-2.0.0.tgz#20a0103b222a70c8fd39cc2e580680f3dde5ec43" @@ -13573,13 +13875,6 @@ p-locate@^5.0.0: dependencies: p-limit "^3.0.2" -p-locate@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-6.0.0.tgz#3da9a49d4934b901089dca3302fa65dc5a05c04f" - integrity sha512-wPrq66Llhl7/4AGC6I+cqxT07LhXvWL08LNXz1fENOw0Ap4sRZZ/gZpTTJ5jpurzzzfS2W/Ge9BY3LgLjCShcw== - dependencies: - p-limit "^4.0.0" - p-map-series@2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/p-map-series/-/p-map-series-2.1.0.tgz#7560d4c452d9da0c07e692fdbfe6e2c81a2a91f2" @@ -13597,6 +13892,11 @@ p-map@^7.0.1: resolved "https://registry.yarnpkg.com/p-map/-/p-map-7.0.3.tgz#7ac210a2d36f81ec28b736134810f7ba4418cdb6" integrity sha512-VkndIv2fIB99swvQoA65bm+fsmt6UNdGeIB0oxBs+WhAhdh08QA04JXpI7rbB9r08/nkbysKoya9rtDERYOYMA== +p-map@^7.0.2, p-map@^7.0.4: + version "7.0.4" + resolved "https://registry.yarnpkg.com/p-map/-/p-map-7.0.4.tgz#b81814255f542e252d5729dca4d66e5ec14935b8" + integrity sha512-tkAQEw8ysMzmkhgw8k+1U/iPhWNhykKnSk4Rd5zLoPJCuJaGRPo6YposrZgaxHKzDHdDWWZvE/Sk7hsL2X/CpQ== + p-pipe@3.1.0: version "3.1.0" resolved "https://registry.yarnpkg.com/p-pipe/-/p-pipe-3.1.0.tgz#48b57c922aa2e1af6a6404cb7c6bf0eb9cc8e60e" @@ -13650,6 +13950,11 @@ p-timeout@^3.2.0: dependencies: p-finally "^1.0.0" +p-timeout@^6.1.2: + version "6.1.4" + resolved "https://registry.yarnpkg.com/p-timeout/-/p-timeout-6.1.4.tgz#418e1f4dd833fa96a2e3f532547dd2abdb08dbc2" + integrity sha512-MyIV3ZA/PmyBN/ud8vV9XzwTrNtR4jFrObymZYnZqMmW0zA8Z17vnT0rBgFE/TlohB+YCHqXMgZzb3Csp49vqg== + p-timeout@^7.0.0: version "7.0.1" resolved "https://registry.yarnpkg.com/p-timeout/-/p-timeout-7.0.1.tgz#95680a6aa693c530f14ac337b8bd32d4ec6ae4f0" @@ -13672,40 +13977,11 @@ p-waterfall@2.1.1: dependencies: p-reduce "^2.0.0" -package-json-from-dist@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz#4f1471a010827a86f94cfd9b0727e36d267de505" - integrity sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw== - packet-reader@1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/packet-reader/-/packet-reader-1.0.0.tgz#9238e5480dedabacfe1fe3f2771063f164157d74" integrity sha512-HAKu/fG3HpHFO0AA8WE8q2g+gBJaZ9MG7fcKk+IJPLTGAD6Psw4443l+9DGRbOIh3/aXr7Phy0TjilYivJo5XQ== -pacote@^15.0.0, pacote@^15.0.8, pacote@^15.2.0: - version "15.2.0" - resolved "https://registry.yarnpkg.com/pacote/-/pacote-15.2.0.tgz#0f0dfcc3e60c7b39121b2ac612bf8596e95344d3" - integrity sha512-rJVZeIwHTUta23sIZgEIM62WYwbmGbThdbnkt81ravBplQv+HjyroqnLRNH2+sLJHcGZmLRmhPwACqhfTcOmnA== - dependencies: - "@npmcli/git" "^4.0.0" - "@npmcli/installed-package-contents" "^2.0.1" - "@npmcli/promise-spawn" "^6.0.1" - "@npmcli/run-script" "^6.0.0" - cacache "^17.0.0" - fs-minipass "^3.0.0" - minipass "^5.0.0" - npm-package-arg "^10.0.0" - npm-packlist "^7.0.0" - npm-pick-manifest "^8.0.0" - npm-registry-fetch "^14.0.0" - proc-log "^3.0.0" - promise-retry "^2.0.1" - read-package-json "^6.0.0" - read-package-json-fast "^3.0.0" - sigstore "^1.3.0" - ssri "^10.0.0" - tar "^6.1.11" - pacote@^18.0.0, pacote@^18.0.6: version "18.0.6" resolved "https://registry.yarnpkg.com/pacote/-/pacote-18.0.6.tgz#ac28495e24f4cf802ef911d792335e378e86fac7" @@ -13729,6 +14005,29 @@ pacote@^18.0.0, pacote@^18.0.6: ssri "^10.0.0" tar "^6.1.11" +pacote@^21.0.0, pacote@^21.0.2, pacote@^21.5.0: + version "21.5.0" + resolved "https://registry.yarnpkg.com/pacote/-/pacote-21.5.0.tgz#475fe00db73585dec296590bec484109522e9e6f" + integrity sha512-VtZ0SB8mb5Tzw3dXDfVAIjhyVKUHZkS/ZH9/5mpKenwC9sFOXNI0JI7kEF7IMkwOnsWMFrvAZHzx1T5fmrp9FQ== + dependencies: + "@gar/promise-retry" "^1.0.0" + "@npmcli/git" "^7.0.0" + "@npmcli/installed-package-contents" "^4.0.0" + "@npmcli/package-json" "^7.0.0" + "@npmcli/promise-spawn" "^9.0.0" + "@npmcli/run-script" "^10.0.0" + cacache "^20.0.0" + fs-minipass "^3.0.0" + minipass "^7.0.2" + npm-package-arg "^13.0.0" + npm-packlist "^10.0.1" + npm-pick-manifest "^11.0.1" + npm-registry-fetch "^19.0.0" + proc-log "^6.0.0" + sigstore "^4.0.0" + ssri "^13.0.0" + tar "^7.4.3" + pako@~1.0.2: version "1.0.11" resolved "https://registry.yarnpkg.com/pako/-/pako-1.0.11.tgz#6c9599d340d54dfd3946380252a35705a6b992bf" @@ -13741,7 +14040,7 @@ parent-module@^1.0.0: dependencies: callsites "^3.0.0" -parse-conflict-json@^3.0.0, parse-conflict-json@^3.0.1: +parse-conflict-json@^3.0.0: version "3.0.1" resolved "https://registry.yarnpkg.com/parse-conflict-json/-/parse-conflict-json-3.0.1.tgz#67dc55312781e62aa2ddb91452c7606d1969960c" integrity sha512-01TvEktc68vwbJOtWZluyWeVGWjP+bZwXtPDMQVbBKzbJ/vZBif0L69KH1+cHv1SZ6e0FKLvjyHe8mqsIqYOmw== @@ -13750,6 +14049,15 @@ parse-conflict-json@^3.0.0, parse-conflict-json@^3.0.1: just-diff "^6.0.0" just-diff-apply "^5.2.0" +parse-conflict-json@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/parse-conflict-json/-/parse-conflict-json-5.0.1.tgz#db4acd7472fb400c9808eb86611c2ff72f4c84ba" + integrity sha512-ZHEmNKMq1wyJXNwLxyHnluPfRAFSIliBvbK/UiOceROt4Xh9Pz0fq49NytIaeaCUf5VR86hwQ/34FCcNU5/LKQ== + dependencies: + json-parse-even-better-errors "^5.0.0" + just-diff "^6.0.0" + just-diff-apply "^5.2.0" + parse-entities@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/parse-entities/-/parse-entities-2.0.0.tgz#53c6eb5b9314a1f4ec99fa0fdf7ce01ecda0cbe8" @@ -13780,16 +14088,19 @@ parse-json@^5.0.0, parse-json@^5.2.0: json-parse-even-better-errors "^2.3.0" lines-and-columns "^1.1.6" -parse-json@^7.0.0: - version "7.1.1" - resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-7.1.1.tgz#68f7e6f0edf88c54ab14c00eb700b753b14e2120" - integrity sha512-SgOTCX/EZXtZxBE5eJ97P4yGM5n37BwRU+YMsH4vNzFqJV/oWFXXCmwFlgWUM4PrakybVOueJJ6pwHqSVhTFDw== +parse-json@^8.0.0, parse-json@^8.3.0: + version "8.3.0" + resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-8.3.0.tgz#88a195a2157025139a2317a4f2f9252b61304ed5" + integrity sha512-ybiGyvspI+fAoRQbIPRddCcSTV9/LsJbf0e/S85VLowVGzRmokfneg2kwVW/KU5rOXrPSbF1qAKPMgNTqqROQQ== dependencies: - "@babel/code-frame" "^7.21.4" - error-ex "^1.3.2" - json-parse-even-better-errors "^3.0.0" - lines-and-columns "^2.0.3" - type-fest "^3.8.0" + "@babel/code-frame" "^7.26.2" + index-to-position "^1.1.0" + type-fest "^4.39.1" + +parse-ms@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/parse-ms/-/parse-ms-4.0.0.tgz#c0c058edd47c2a590151a718990533fd62803df4" + integrity sha512-TXfryirbmq34y8QBwgqCVLi+8oA3oWx2eAnSn62ITyEhEYaWRlVZ2DvMM9eZbMs/RfxPu/PK/aBLyGj4IrqMHw== parse-path@^7.0.0: version "7.0.0" @@ -13805,6 +14116,13 @@ parse-url@^8.1.0: dependencies: parse-path "^7.0.0" +parse5-htmlparser2-tree-adapter@^6.0.0: + version "6.0.1" + resolved "https://registry.yarnpkg.com/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-6.0.1.tgz#2cdf9ad823321140370d4dbf5d3e92c7c8ddc6e6" + integrity sha512-qPuWvbLgvDGilKc5BoicRovlT4MtYT6JfJyBOMDsKoiT+GiuP5qyrPCnR9HcPECIJJmZh5jRndyNThnhhb/vlA== + dependencies: + parse5 "^6.0.1" + parse5-htmlparser2-tree-adapter@^7.0.0: version "7.0.0" resolved "https://registry.yarnpkg.com/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-7.0.0.tgz#23c2cc233bcf09bb7beba8b8a69d46b08c62c2f1" @@ -13813,6 +14131,16 @@ parse5-htmlparser2-tree-adapter@^7.0.0: domhandler "^5.0.2" parse5 "^7.0.0" +parse5@^5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/parse5/-/parse5-5.1.1.tgz#f68e4e5ba1852ac2cadc00f4555fff6c2abb6178" + integrity sha512-ugq4DFI0Ptb+WWjAdOK16+u/nHfiIrcE+sh8kZMaM0WllQKLI9rOUq6c2b7cwPkXdzfQESqvoqK6ug7U/Yyzug== + +parse5@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/parse5/-/parse5-6.0.1.tgz#e1a1c085c569b3dc08321184f19a39cc27f7c30b" + integrity sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw== + parse5@^7.0.0: version "7.1.2" resolved "https://registry.yarnpkg.com/parse5/-/parse5-7.1.2.tgz#0736bebbfd77793823240a23b7fc5e010b7f8e32" @@ -13843,10 +14171,10 @@ path-exists@^4.0.0: resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-4.0.0.tgz#513bdbe2d3b95d7762e8c1137efa195c6c61b5b3" integrity sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w== -path-exists@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-5.0.0.tgz#a6aad9489200b21fab31e49cf09277e5116fb9e7" - integrity sha512-RjhtfwJOxzcFmNOi6ltcbcu4Iu+FL3zEj83dk4kAS+fVpTxXLO1b38RvJgT/0QwvV/L3aY9TAnyv0EOqW4GoMQ== +path-expression-matcher@^1.1.3, path-expression-matcher@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/path-expression-matcher/-/path-expression-matcher-1.2.0.tgz#9bdae3787f43b0857b0269e9caaa586c12c8abee" + integrity sha512-DwmPWeFn+tq7TiyJ2CxezCAirXjFxvaiD03npak3cRjlP9+OjTmSy1EpIrEbh+l6JgUundniloMLDQ/6VTdhLQ== path-is-absolute@^1.0.0: version "1.0.1" @@ -13873,13 +14201,13 @@ path-parse@^1.0.7: resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== -path-scurry@^1.11.1, path-scurry@^1.6.1: - version "1.11.1" - resolved "https://registry.yarnpkg.com/path-scurry/-/path-scurry-1.11.1.tgz#7960a668888594a0720b12a911d1a742ab9f11d2" - integrity sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA== +path-scurry@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/path-scurry/-/path-scurry-2.0.2.tgz#6be0d0ee02a10d9e0de7a98bae65e182c9061f85" + integrity sha512-3O/iVVsJAPsOnpwWIeD+d6z/7PmqApyQePUtCndjatj/9I5LylHvt5qluFaBT3I5h3r1ejfR056c+FCv+NnNXg== dependencies: - lru-cache "^10.2.0" - minipass "^5.0.0 || ^6.0.2 || ^7.0.0" + lru-cache "^11.0.0" + minipass "^7.1.2" path-to-regexp@0.1.12, path-to-regexp@~0.1.12: version "0.1.12" @@ -13918,11 +14246,6 @@ path-type@^4.0.0: resolved "https://registry.yarnpkg.com/path-type/-/path-type-4.0.0.tgz#84ed01c0a7ba380afe09d90a8c180dcd9d03043b" integrity sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw== -path-type@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/path-type/-/path-type-6.0.0.tgz#2f1bb6791a91ce99194caede5d6c5920ed81eb51" - integrity sha512-Vj7sf++t5pBD637NSfkxpHSMfWaeig5+DKWLhcqIYx6mWQz5hdJTGDVMQiJcw1ZYkhs7AazKDGpRVji1LJCZUQ== - peek-readable@^4.1.0: version "4.1.0" resolved "https://registry.yarnpkg.com/peek-readable/-/peek-readable-4.1.0.tgz#4ece1111bf5c2ad8867c314c81356847e8a62e72" @@ -14029,16 +14352,16 @@ picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.2.3, picomatch@^2.3.1: resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42" integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA== -picomatch@^2.0.5: - version "2.3.2" - resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.2.tgz#5a942915e26b372dc0f0e6753149a16e6b1c5601" - integrity sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA== - picomatch@^4.0.2: version "4.0.3" resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-4.0.3.tgz#796c76136d1eead715db1e7bad785dedd695a042" integrity sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q== +picomatch@^4.0.3: + version "4.0.4" + resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-4.0.4.tgz#fd6f5e00a143086e074dffe4c924b8fb293b0589" + integrity sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A== + pify@5.0.0: version "5.0.0" resolved "https://registry.yarnpkg.com/pify/-/pify-5.0.0.tgz#1f5eca3f5e87ebec28cc6d54a0e4aaf00acc127f" @@ -14153,6 +14476,11 @@ pluralize@8.0.0, pluralize@^8.0.0: resolved "https://registry.yarnpkg.com/pluralize/-/pluralize-8.0.0.tgz#1a6fa16a38d12a1901e0320fa017051c539ce3b1" integrity sha512-Nc3IT5yHzflTfbjgqWcCPpo7DaKy4FnpB0l/zCAW0Tc7jxAiuqSxHasntB3D7887LSrA93kDJ9IXovxJYxyLCA== +pony-cause@^2.1.4: + version "2.1.11" + resolved "https://registry.yarnpkg.com/pony-cause/-/pony-cause-2.1.11.tgz#d69a20aaccdb3bdb8f74dd59e5c68d8e6772e4bd" + integrity sha512-M7LhCsdNbNgiLYiP4WjsfLUuFmCfnjdF6jKe2R9NKl4WFN+HZPGHJZ9lnLP7f9ZnKe3U9nuWD0szirmj+migUg== + possible-typed-array-names@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/possible-typed-array-names/-/possible-typed-array-names-1.0.0.tgz#89bb63c6fada2c3e90adc4a647beeeb39cc7bf8f" @@ -14166,6 +14494,14 @@ postcss-selector-parser@^6.0.10: cssesc "^3.0.0" util-deprecate "^1.0.2" +postcss-selector-parser@^7.0.0: + version "7.1.1" + resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-7.1.1.tgz#e75d2e0d843f620e5df69076166f4e16f891cb9f" + integrity sha512-orRsuYpJVw8LdAwqqLykBj9ecS5/cRHlI5+nvTo8LcCKmzDmqVORXtOIYEEQuL9D4BxtA1lm5isAqzQZCoQ6Eg== + dependencies: + cssesc "^3.0.0" + util-deprecate "^1.0.2" + postgres-array@~2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/postgres-array/-/postgres-array-2.0.0.tgz#48f8fce054fbc69671999329b8834b772652d82e" @@ -14188,7 +14524,7 @@ postgres-interval@^1.1.0: dependencies: xtend "^4.0.0" -prebuild-install@^7.1.1: +prebuild-install@^7.1.1, prebuild-install@^7.1.3: version "7.1.3" resolved "https://registry.yarnpkg.com/prebuild-install/-/prebuild-install-7.1.3.tgz#d630abad2b147443f20a212917beae68b8092eec" integrity sha512-8Mf2cbV7x1cXPUILADGI3wuhfqWvtiLA1iclTDbFRZkgRQS0NqsPZphna9V+HyTEadheuPmjaJMsbzKQFOzLug== @@ -14260,16 +14596,23 @@ pretty-format@^29.0.0, pretty-format@^29.7.0: ansi-styles "^5.0.0" react-is "^18.0.0" -proc-log@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/proc-log/-/proc-log-3.0.0.tgz#fb05ef83ccd64fd7b20bbe9c8c1070fc08338dd8" - integrity sha512-++Vn7NS4Xf9NacaU9Xq3URUuqZETPsf8L4j5/ckhaRYsfPeRyzGw+iDjFhV/Jr3uNmTvvddEJFWh5R1gRgUH8A== +pretty-ms@^9.2.0: + version "9.3.0" + resolved "https://registry.yarnpkg.com/pretty-ms/-/pretty-ms-9.3.0.tgz#dd2524fcb3c326b4931b2272dfd1e1a8ed9a9f5a" + integrity sha512-gjVS5hOP+M3wMm5nmNOucbIrqudzs9v/57bWRHQWLYklXqoXKrVfYW2W9+glfGsqtPgpiz5WwyEEB+ksXIx3gQ== + dependencies: + parse-ms "^4.0.0" proc-log@^4.0.0, proc-log@^4.1.0, proc-log@^4.2.0: version "4.2.0" resolved "https://registry.yarnpkg.com/proc-log/-/proc-log-4.2.0.tgz#b6f461e4026e75fdfe228b265e9f7a00779d7034" integrity sha512-g8+OnU/L2v+wyiVK+D5fA34J7EH8jZ8DDlvwhRCMxmMj7UCBvxiO1mGeN+36JXIKF4zevU4kRBd8lVgG9vLelA== +proc-log@^6.0.0, proc-log@^6.1.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/proc-log/-/proc-log-6.1.0.tgz#18519482a37d5198e231133a70144a50f21f0215" + integrity sha512-iG+GYldRf2BQ0UDUAd6JQ/RwzaQy6mXmsk/IzlYyal4A4SNFw54MeH4/tLkF4I5WoWG9SQwuqWzS99jaFQHBuQ== + process-nextick-args@~2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-2.0.1.tgz#7820d9b16120cc55ca9ae7792680ae7dba6d7fe2" @@ -14305,6 +14648,11 @@ proggy@^2.0.0: resolved "https://registry.yarnpkg.com/proggy/-/proggy-2.0.0.tgz#154bb0e41d3125b518ef6c79782455c2c47d94e1" integrity sha512-69agxLtnI8xBs9gUGqEnK26UfiexpHy+KUpBQWabiytQjnn5wFY8rklAi7GRfABIuPNnQ/ik48+LGLkYYJcy4A== +proggy@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/proggy/-/proggy-4.0.0.tgz#85fa89d7c81bc3fb77992a80f47bb1e17c610fa3" + integrity sha512-MbA4R+WQT76ZBm/5JUpV9yqcJt92175+Y0Bodg3HgiXzrmKu7Ggq+bpn6y6wHH+gN9NcyKn3yg1+d47VaKwNAQ== + progress@2.0.3, progress@^2.0.3: version "2.0.3" resolved "https://registry.yarnpkg.com/progress/-/progress-2.0.3.tgz#7e8cf8d8f5b8f239c1bc68beb4eb78567d572ef8" @@ -14315,11 +14663,6 @@ promise-all-reject-late@^1.0.0: resolved "https://registry.yarnpkg.com/promise-all-reject-late/-/promise-all-reject-late-1.0.1.tgz#f8ebf13483e5ca91ad809ccc2fcf25f26f8643c2" integrity sha512-vuf0Lf0lOxyQREH7GDIOUMLS7kz+gs8i6B+Yi8dC68a2sychGrHTJYghMBD6k7eUcH0H5P73EckCA48xijWqXw== -promise-call-limit@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/promise-call-limit/-/promise-call-limit-1.0.2.tgz#f64b8dd9ef7693c9c7613e7dfe8d6d24de3031ea" - integrity sha512-1vTUnfI2hzui8AEIixbdAJlFY4LFDXqQswy/2eOlThAscXCY4It8FdVuI0fMJGAB2aWGbdQf/gv0skKYXmdrHA== - promise-call-limit@^3.0.1: version "3.0.2" resolved "https://registry.yarnpkg.com/promise-call-limit/-/promise-call-limit-3.0.2.tgz#524b7f4b97729ff70417d93d24f46f0265efa4f9" @@ -14358,6 +14701,13 @@ promzard@^1.0.0: dependencies: read "^3.0.1" +promzard@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/promzard/-/promzard-3.0.1.tgz#e42b9b75197661e5707dc7077da8dfd3bdfd9e3d" + integrity sha512-M5mHhWh+Adz0BIxgSrqcc6GTCSconR7zWQV9vnOSptNtr6cSFlApLc28GbQhuN6oOWBQeV2C0bNE47JCY/zu3Q== + dependencies: + read "^5.0.0" + propagate@^2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/propagate/-/propagate-2.0.1.tgz#40cdedab18085c792334e64f0ac17256d38f9a45" @@ -14419,17 +14769,10 @@ qrcode-terminal@^0.12.0: resolved "https://registry.yarnpkg.com/qrcode-terminal/-/qrcode-terminal-0.12.0.tgz#bb5b699ef7f9f0505092a3748be4464fe71b5819" integrity sha512-EXtzRZmC+YGmGlDFbXKxQiMZNwCLEO6BANKXG4iCtSIM0yqc/pappSx3RIKr4r0uh5JsBckOXeKrB3Iz7mdQpQ== -qs@6.13.0: - version "6.13.0" - resolved "https://registry.yarnpkg.com/qs/-/qs-6.13.0.tgz#6ca3bd58439f7e245655798997787b0d88a51906" - integrity sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg== - dependencies: - side-channel "^1.0.6" - -qs@^6.11.2, qs@^6.14.0, qs@^6.14.1, qs@^6.5.2, qs@~6.14.0: - version "6.14.2" - resolved "https://registry.yarnpkg.com/qs/-/qs-6.14.2.tgz#b5634cf9d9ad9898e31fba3504e866e8efb6798c" - integrity sha512-V/yCWTTF7VJ9hIh18Ugr2zhJMP01MY7c5kh4J870L7imm6/DIzBsNLTXzMwUA3yZ5b/KBqLx8Kp3uRvd7xSe3Q== +qs@6.13.0, qs@>=6.14.1, qs@^6.11.2, qs@^6.14.0, qs@^6.14.1, qs@^6.5.2, qs@~6.14.0: + version "6.15.0" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.15.0.tgz#db8fd5d1b1d2d6b5b33adaf87429805f1909e7b3" + integrity sha512-mAZTtNCeetKMH+pSjrb76NAM8V9a05I9aBZOHztWy/UqcJdQYNsf59vrRKWnojAT9Y+GbIvoTBC++CPHqpDBhQ== dependencies: side-channel "^1.1.0" @@ -14523,6 +14866,11 @@ read-cmd-shim@4.0.0, read-cmd-shim@^4.0.0: resolved "https://registry.yarnpkg.com/read-cmd-shim/-/read-cmd-shim-4.0.0.tgz#640a08b473a49043e394ae0c7a34dd822c73b9bb" integrity sha512-yILWifhaSEEytfXI76kB9xEEiG1AiozaCJZ83A87ytjRiN+jVibXjedjCRNjoZviinhG+4UkalO3mWTd8u5O0Q== +read-cmd-shim@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/read-cmd-shim/-/read-cmd-shim-6.0.0.tgz#98f5c8566e535829f1f8afb1595aaf05fd0f3970" + integrity sha512-1zM5HuOfagXCBWMN83fuFI/x+T/UhZ7k+KIzhrHXcQoeX5+7gmaDYjELQHmmzIodumBHeByBJT4QYS7ufAgs7A== + read-package-json-fast@^3.0.0, read-package-json-fast@^3.0.2: version "3.0.2" resolved "https://registry.yarnpkg.com/read-package-json-fast/-/read-package-json-fast-3.0.2.tgz#394908a9725dc7a5f14e70c8e7556dff1d2b1049" @@ -14531,24 +14879,23 @@ read-package-json-fast@^3.0.0, read-package-json-fast@^3.0.2: json-parse-even-better-errors "^3.0.0" npm-normalize-package-bin "^3.0.0" -read-package-json@^6.0.0: - version "6.0.4" - resolved "https://registry.yarnpkg.com/read-package-json/-/read-package-json-6.0.4.tgz#90318824ec456c287437ea79595f4c2854708836" - integrity sha512-AEtWXYfopBj2z5N5PbkAOeNHRPUg5q+Nen7QLxV8M2zJq1ym6/lCz3fYNTCXe19puu2d06jfHhrP7v/S2PtMMw== +read-package-up@^11.0.0: + version "11.0.0" + resolved "https://registry.yarnpkg.com/read-package-up/-/read-package-up-11.0.0.tgz#71fb879fdaac0e16891e6e666df22de24a48d5ba" + integrity sha512-MbgfoNPANMdb4oRBNg5eqLbB2t2r+o5Ua1pNt8BqGp4I0FJZhuVSOj3PaBPni4azWuSzEdNn2evevzVmEk1ohQ== dependencies: - glob "^10.2.2" - json-parse-even-better-errors "^3.0.0" - normalize-package-data "^5.0.0" - npm-normalize-package-bin "^3.0.0" + find-up-simple "^1.0.0" + read-pkg "^9.0.0" + type-fest "^4.6.0" -read-pkg-up@^10.0.0: - version "10.1.0" - resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-10.1.0.tgz#2d13ab732d2f05d6e8094167c2112e2ee50644f4" - integrity sha512-aNtBq4jR8NawpKJQldrQcSW9y/d+KWH4v24HWkHljOZ7H0av+YTGANBzRh9A5pw7v/bLVsLVPpOhJ7gHNVy8lA== +read-package-up@^12.0.0: + version "12.0.0" + resolved "https://registry.yarnpkg.com/read-package-up/-/read-package-up-12.0.0.tgz#7ae889586f397b7a291ca59ce08caf7e9f68a61c" + integrity sha512-Q5hMVBYur/eQNWDdbF4/Wqqr9Bjvtrw2kjGxxBbKLbx8bVCL8gcArjTy8zDUuLGQicftpMuU0riQNcAsbtOVsw== dependencies: - find-up "^6.3.0" - read-pkg "^8.1.0" - type-fest "^4.2.0" + find-up-simple "^1.0.1" + read-pkg "^10.0.0" + type-fest "^5.2.0" read-pkg-up@^3.0.0: version "3.0.0" @@ -14567,6 +14914,17 @@ read-pkg-up@^7.0.1: read-pkg "^5.2.0" type-fest "^0.8.1" +read-pkg@^10.0.0: + version "10.1.0" + resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-10.1.0.tgz#eff31c7e505a4995a85c5af017b3dc413745431c" + integrity sha512-I8g2lArQiP78ll51UeMZojewtYgIRCKCWqZEgOO8c/uefTI+XDXvCSXu3+YNUaTNvZzobrL5+SqHjBrByRRTdg== + dependencies: + "@types/normalize-package-data" "^2.4.4" + normalize-package-data "^8.0.0" + parse-json "^8.3.0" + type-fest "^5.4.4" + unicorn-magic "^0.4.0" + read-pkg@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-3.0.0.tgz#9cbc686978fee65d16c00e2b19c237fcf6e38389" @@ -14586,22 +14944,16 @@ read-pkg@^5.2.0: parse-json "^5.0.0" type-fest "^0.6.0" -read-pkg@^8.0.0, read-pkg@^8.1.0: - version "8.1.0" - resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-8.1.0.tgz#6cf560b91d90df68bce658527e7e3eee75f7c4c7" - integrity sha512-PORM8AgzXeskHO/WEv312k9U03B8K9JSiWF/8N9sUuFjBa+9SF2u6K7VClzXwDXab51jCd8Nd36CNM+zR97ScQ== +read-pkg@^9.0.0: + version "9.0.1" + resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-9.0.1.tgz#b1b81fb15104f5dbb121b6bbdee9bbc9739f569b" + integrity sha512-9viLL4/n1BJUCT1NXVTdS1jtm80yDEgR5T4yCelII49Mbj0v1rZdKqj7zCiYdbB0CuCgdrvHcNogAKTFPBocFA== dependencies: - "@types/normalize-package-data" "^2.4.1" + "@types/normalize-package-data" "^2.4.3" normalize-package-data "^6.0.0" - parse-json "^7.0.0" - type-fest "^4.2.0" - -read@^2.0.0, read@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/read/-/read-2.1.0.tgz#69409372c54fe3381092bc363a00650b6ac37218" - integrity sha512-bvxi1QLJHcaywCAEsAk4DG3nVoqiY2Csps3qzWalhj5hFqRn1d/OixkFXtLO1PrgHUcAP0FNaSY/5GYNfENFFQ== - dependencies: - mute-stream "~1.0.0" + parse-json "^8.0.0" + type-fest "^4.6.0" + unicorn-magic "^0.1.0" read@^3.0.1: version "3.0.1" @@ -14610,6 +14962,13 @@ read@^3.0.1: dependencies: mute-stream "^1.0.0" +read@^5.0.0, read@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/read/-/read-5.0.1.tgz#e6b0a84743406182fdfc20b2418a11b39b7ef837" + integrity sha512-+nsqpqYkkpet2UVPG8ZiuE8d113DK4vHYEoEhcrXBAlPiq6di7QRTuNiKQAbaRYegobuX2BpZ6QjanKOXnJdTA== + dependencies: + mute-stream "^3.0.0" + readable-stream@3, readable-stream@^3.0.0, readable-stream@^3.0.2, readable-stream@^3.1.1, readable-stream@^3.4.0, readable-stream@^3.6.0: version "3.6.2" resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-3.6.2.tgz#56a9b36ea965c00c5a93ef31eb111a0f11056967" @@ -14816,6 +15175,15 @@ resolve@^1.10.0, resolve@^1.20.0, resolve@^1.22.4: path-parse "^1.0.7" supports-preserve-symlinks-flag "^1.0.0" +resolve@~1.22.1: + version "1.22.11" + resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.11.tgz#aad857ce1ffb8bfa9b0b1ac29f1156383f68c262" + integrity sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ== + dependencies: + is-core-module "^2.16.1" + path-parse "^1.0.7" + supports-preserve-symlinks-flag "^1.0.0" + restore-cursor@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/restore-cursor/-/restore-cursor-2.0.0.tgz#9f7ee287f82fd326d4fd162923d62129eee0dfaf" @@ -15021,46 +15389,39 @@ semantic-release-slack-bot@^4.0.2: node-fetch "^2.3.0" slackify-markdown "^4.3.0" -semantic-release@^21.0.5: - version "21.1.2" - resolved "https://registry.yarnpkg.com/semantic-release/-/semantic-release-21.1.2.tgz#f4c5ba7c17b53ce90bac4fa6ccf21178d0384445" - integrity sha512-kz76azHrT8+VEkQjoCBHE06JNQgTgsC4bT8XfCzb7DHcsk9vG3fqeMVik8h5rcWCYi2Fd+M3bwA7BG8Z8cRwtA== +semantic-release@^21.0.5, semantic-release@^25.0.0: + version "25.0.3" + resolved "https://registry.yarnpkg.com/semantic-release/-/semantic-release-25.0.3.tgz#77c2a7bfdcc63125fa2dea062d2cee28662ce224" + integrity sha512-WRgl5GcypwramYX4HV+eQGzUbD7UUbljVmS+5G1uMwX/wLgYuJAxGeerXJDMO2xshng4+FXqCgyB5QfClV6WjA== dependencies: - "@semantic-release/commit-analyzer" "^10.0.0" + "@semantic-release/commit-analyzer" "^13.0.1" "@semantic-release/error" "^4.0.0" - "@semantic-release/github" "^9.0.0" - "@semantic-release/npm" "^10.0.2" - "@semantic-release/release-notes-generator" "^11.0.0" + "@semantic-release/github" "^12.0.0" + "@semantic-release/npm" "^13.1.1" + "@semantic-release/release-notes-generator" "^14.1.0" aggregate-error "^5.0.0" - cosmiconfig "^8.0.0" + cosmiconfig "^9.0.0" debug "^4.0.0" - env-ci "^9.0.0" - execa "^8.0.0" - figures "^5.0.0" - find-versions "^5.1.0" + env-ci "^11.0.0" + execa "^9.0.0" + figures "^6.0.0" + find-versions "^6.0.0" get-stream "^6.0.0" git-log-parser "^1.2.0" - hook-std "^3.0.0" - hosted-git-info "^7.0.0" + hook-std "^4.0.0" + hosted-git-info "^9.0.0" + import-from-esm "^2.0.0" lodash-es "^4.17.21" - marked "^5.0.0" - marked-terminal "^5.1.1" + marked "^15.0.0" + marked-terminal "^7.3.0" micromatch "^4.0.2" p-each-series "^3.0.0" p-reduce "^3.0.0" - read-pkg-up "^10.0.0" + read-package-up "^12.0.0" resolve-from "^5.0.0" semver "^7.3.2" - semver-diff "^4.0.0" signale "^1.2.1" - yargs "^17.5.1" - -semver-diff@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/semver-diff/-/semver-diff-4.0.0.tgz#3afcf5ed6d62259f5c72d0d5d50dffbdc9680df5" - integrity sha512-0Ju4+6A8iOnpL/Thra7dZsSlOHYAHIeMxfhWQRI1/VLcT3WDBZKKtQt/QkBOsiIN9ZpuvHE6cGZ0x4glCMmfiA== - dependencies: - semver "^7.3.5" + yargs "^18.0.0" semver-regex@^4.0.5: version "4.0.5" @@ -15077,7 +15438,7 @@ semver-store@^0.3.0: resolved "https://registry.yarnpkg.com/semver/-/semver-5.7.2.tgz#48d55db737c3287cd4835e17fa13feace1c41ef8" integrity sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g== -semver@7.5.4, semver@^7.0.0, semver@^7.1.1, semver@^7.1.2, semver@^7.3.2, semver@^7.3.4, semver@^7.3.5, semver@^7.3.7, semver@^7.3.8, semver@^7.5.3, semver@^7.5.4: +semver@7.5.4, semver@^7.0.0, semver@^7.1.1, semver@^7.1.2, semver@^7.3.2, semver@^7.3.4, semver@^7.3.5, semver@^7.3.7, semver@^7.3.8, semver@^7.5.3, semver@^7.5.4, semver@~7.5.4: version "7.5.4" resolved "https://registry.yarnpkg.com/semver/-/semver-7.5.4.tgz#483986ec4ed38e1c6c48c34894a9182dbff68a6e" integrity sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA== @@ -15089,7 +15450,7 @@ semver@^6.0.0, semver@^6.3.0, semver@^6.3.1: resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== -semver@^7.6.0: +semver@^7.5.2, semver@^7.7.2, semver@^7.7.4: version "7.7.4" resolved "https://registry.yarnpkg.com/semver/-/semver-7.7.4.tgz#28464e36060e991fa7a11d0279d2d3f3b57a7e8a" integrity sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA== @@ -15164,6 +15525,28 @@ sequelize-pool@^7.1.0: resolved "https://registry.yarnpkg.com/sequelize-pool/-/sequelize-pool-7.1.0.tgz#210b391af4002762f823188fd6ecfc7413020768" integrity sha512-G9c0qlIWQSK29pR/5U2JF5dDQeqqHRragoyahj/Nx4KOOQ3CPPfzxnfqFPCSB7x5UgjOgnZ61nSxz+fjDpRlJg== +sequelize@*, sequelize@^6.37.8: + version "6.37.8" + resolved "https://registry.yarnpkg.com/sequelize/-/sequelize-6.37.8.tgz#70e62c9e682a2009005093104591e59ec2d0ed2c" + integrity sha512-HJ0IQFqcTsTiqbEgiuioYFMSD00TP6Cz7zoTti+zVVBwVe9fEhev9cH6WnM3XU31+ABS356durAb99ZuOthnKw== + dependencies: + "@types/debug" "^4.1.8" + "@types/validator" "^13.7.17" + debug "^4.3.4" + dottie "^2.0.6" + inflection "^1.13.4" + lodash "^4.17.21" + moment "^2.29.4" + moment-timezone "^0.5.43" + pg-connection-string "^2.6.1" + retry-as-promised "^7.0.4" + semver "^7.5.4" + sequelize-pool "^7.1.0" + toposort-class "^1.0.1" + uuid "^8.3.2" + validator "^13.9.0" + wkx "^0.5.0" + sequelize@6.37.5, sequelize@^6.28.0, sequelize@^6.37.5: version "6.37.5" resolved "https://registry.yarnpkg.com/sequelize/-/sequelize-6.37.5.tgz#2711ab97d0e0fe49c652368946a7312eb0f11cd7" @@ -15394,7 +15777,7 @@ side-channel@^1.0.4: get-intrinsic "^1.0.2" object-inspect "^1.9.0" -side-channel@^1.0.6, side-channel@^1.1.0: +side-channel@^1.1.0: version "1.1.0" resolved "https://registry.yarnpkg.com/side-channel/-/side-channel-1.1.0.tgz#c3fcff9c4da932784873335ec9765fa94ff66bc9" integrity sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw== @@ -15429,17 +15812,6 @@ signale@^1.2.1, signale@^1.4.0: figures "^2.0.0" pkg-conf "^2.1.0" -sigstore@^1.3.0, sigstore@^1.4.0, sigstore@^1.9.0: - version "1.9.0" - resolved "https://registry.yarnpkg.com/sigstore/-/sigstore-1.9.0.tgz#1e7ad8933aa99b75c6898ddd0eeebc3eb0d59875" - integrity sha512-0Zjz0oe37d08VeOtBIuB6cRriqXse2e8w+7yIy2XSXjshRKxbc2KkhXjL229jXSxEm7UbcjS76wcJDGQddVI9A== - dependencies: - "@sigstore/bundle" "^1.1.0" - "@sigstore/protobuf-specs" "^0.2.0" - "@sigstore/sign" "^1.0.0" - "@sigstore/tuf" "^1.0.3" - make-fetch-happen "^11.0.1" - sigstore@^2.2.0: version "2.3.1" resolved "https://registry.yarnpkg.com/sigstore/-/sigstore-2.3.1.tgz#0755dd2cc4820f2e922506da54d3d628e13bfa39" @@ -15452,6 +15824,18 @@ sigstore@^2.2.0: "@sigstore/tuf" "^2.3.4" "@sigstore/verify" "^1.2.1" +sigstore@^4.0.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/sigstore/-/sigstore-4.1.0.tgz#d34b92a544a05e003a2430209d26d8dfafd805a0" + integrity sha512-/fUgUhYghuLzVT/gaJoeVehLCgZiUxPCPMcyVNY0lIf/cTCz58K/WTI7PefDarXxp9nUKpEwg1yyz3eSBMTtgA== + dependencies: + "@sigstore/bundle" "^4.0.0" + "@sigstore/core" "^3.1.0" + "@sigstore/protobuf-specs" "^0.5.0" + "@sigstore/sign" "^4.1.0" + "@sigstore/tuf" "^4.0.1" + "@sigstore/verify" "^3.1.0" + simple-concat@^1.0.0: version "1.0.1" resolved "https://registry.yarnpkg.com/simple-concat/-/simple-concat-1.0.1.tgz#f46976082ba35c2263f1c8ab5edfe26c41c9552f" @@ -15490,6 +15874,13 @@ sisteransi@^1.0.5: resolved "https://registry.yarnpkg.com/sisteransi/-/sisteransi-1.0.5.tgz#134d681297756437cc05ca01370d3a7a571075ed" integrity sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg== +skin-tone@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/skin-tone/-/skin-tone-2.0.0.tgz#4e3933ab45c0d4f4f781745d64b9f4c208e41237" + integrity sha512-kUMbT1oBJCpgrnKoSr0o6wPtvRWT9W9UKvGLwfJYO2WuahZRHOpEyL1ckyMGgMWh0UdpmaoFqKKD29WTomNEGA== + dependencies: + unicode-emoji-modifier-base "^1.0.0" + slackify-markdown@^4.3.0: version "4.4.0" resolved "https://registry.yarnpkg.com/slackify-markdown/-/slackify-markdown-4.4.0.tgz#706a56fd09f536c47588e2c12f1e0ee6930c5e8d" @@ -15508,11 +15899,6 @@ slash@3.0.0, slash@^3.0.0: resolved "https://registry.yarnpkg.com/slash/-/slash-3.0.0.tgz#6539be870c165adbd5240220dbe361f1bc4d4634" integrity sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q== -slash@^5.1.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/slash/-/slash-5.1.0.tgz#be3adddcdf09ac38eebe8dcdc7b1a57a75b095ce" - integrity sha512-ZA6oR3T/pEyuqwMgAKT0/hAv8oAXckzbkmR0UkUosQ+Mc4RxGoJkRmwHgHufaenlyAgE1Mxgpdcrf75y6XcnDg== - slice-ansi@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/slice-ansi/-/slice-ansi-4.0.0.tgz#500e8dd0fd55b05815086255b3195adf2a45fe6b" @@ -15536,15 +15922,6 @@ socks-proxy-agent@^6.0.0: debug "^4.3.3" socks "^2.6.2" -socks-proxy-agent@^7.0.0: - version "7.0.0" - resolved "https://registry.yarnpkg.com/socks-proxy-agent/-/socks-proxy-agent-7.0.0.tgz#dc069ecf34436621acb41e3efa66ca1b5fed15b6" - integrity sha512-Fgl0YPZ902wEsAyiQ+idGd1A7rSFx/ayC1CQVMw5P+EQx2V0SgpGtf6OKFhVjPflPUl9YMmEOnmfjCdMUsygww== - dependencies: - agent-base "^6.0.2" - debug "^4.3.3" - socks "^2.6.2" - socks-proxy-agent@^8.0.3: version "8.0.5" resolved "https://registry.yarnpkg.com/socks-proxy-agent/-/socks-proxy-agent-8.0.5.tgz#b9cdb4e7e998509d7659d689ce7697ac21645bee" @@ -15630,7 +16007,7 @@ spdx-exceptions@^2.1.0: resolved "https://registry.yarnpkg.com/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz#3f28ce1a77a00372683eade4a433183527a2163d" integrity sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A== -spdx-expression-parse@^3.0.0, spdx-expression-parse@^3.0.1: +spdx-expression-parse@^3.0.0: version "3.0.1" resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz#cf70f50482eefdc98e3ce0a6833e4a53ceeba679" integrity sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q== @@ -15638,6 +16015,14 @@ spdx-expression-parse@^3.0.0, spdx-expression-parse@^3.0.1: spdx-exceptions "^2.1.0" spdx-license-ids "^3.0.0" +spdx-expression-parse@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-4.0.0.tgz#a23af9f3132115465dac215c099303e4ceac5794" + integrity sha512-Clya5JIij/7C6bRR22+tnGXbc4VKlibKSVj2iHvVeX5iMW7s1SIQlqu699JkODJJIhh/pUu8L0/VLh8xflD+LQ== + dependencies: + spdx-exceptions "^2.1.0" + spdx-license-ids "^3.0.0" + spdx-license-ids@^3.0.0: version "3.0.16" resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.16.tgz#a14f64e0954f6e25cc6587bd4f392522db0d998f" @@ -15716,6 +16101,18 @@ sqlite3@^5.1.7: optionalDependencies: node-gyp "8.x" +sqlite3@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/sqlite3/-/sqlite3-6.0.1.tgz#c0956e7834931c406b283c87b66771c847a6abfc" + integrity sha512-X0czUUMG2tmSqJpEQa3tCuZSHKIx8PwM53vLZzKp/o6Rpy25fiVfjdbnZ988M8+O3ZWR1ih0K255VumCb3MAnQ== + dependencies: + bindings "^1.5.0" + node-addon-api "^8.0.0" + prebuild-install "^7.1.3" + tar "^7.5.10" + optionalDependencies: + node-gyp "12.x" + sqlstring@^2.3.2: version "2.3.3" resolved "https://registry.yarnpkg.com/sqlstring/-/sqlstring-2.3.3.tgz#2ddc21f03bce2c387ed60680e739922c65751d0c" @@ -15750,13 +16147,20 @@ ssri@^10.0.0: dependencies: minipass "^7.0.3" -ssri@^10.0.1, ssri@^10.0.5, ssri@^10.0.6: +ssri@^10.0.6: version "10.0.6" resolved "https://registry.yarnpkg.com/ssri/-/ssri-10.0.6.tgz#a8aade2de60ba2bce8688e3fa349bad05c7dc1e5" integrity sha512-MGrFH9Z4NP9Iyhqn16sDtBpRRNJ0Y2hNa6D65h736fVSaPCHr4DM4sWUNvVaSuC+0OBGhwsrydQwmgfg5LncqQ== dependencies: minipass "^7.0.3" +ssri@^13.0.0, ssri@^13.0.1: + version "13.0.1" + resolved "https://registry.yarnpkg.com/ssri/-/ssri-13.0.1.tgz#2d8946614d33f4d0c84946bb370dce7a9379fd18" + integrity sha512-QUiRf1+u9wPTL/76GTYlKttDEBWV1ga9ZXW8BG6kfdeyyM8LGPix9gROyg9V2+P0xNyF3X2Go526xKFdMZrHSQ== + dependencies: + minipass "^7.0.3" + ssri@^8.0.0, ssri@^8.0.1: version "8.0.1" resolved "https://registry.yarnpkg.com/ssri/-/ssri-8.0.1.tgz#638e4e439e2ffbd2cd289776d5ca457c4f51a2af" @@ -15764,13 +16168,6 @@ ssri@^8.0.0, ssri@^8.0.1: dependencies: minipass "^3.1.1" -ssri@^9.0.0: - version "9.0.1" - resolved "https://registry.yarnpkg.com/ssri/-/ssri-9.0.1.tgz#544d4c357a8d7b71a19700074b6883fcb4eae057" - integrity sha512-o57Wcn66jMQvfHG1FlYbWeZWW/dHZhJXjpIcTfXldXEk5nz5lStPo3mK0OJQfGR3RbZUlbISexbljkJzuEj/8Q== - dependencies: - minipass "^3.1.1" - stack-utils@^2.0.3: version "2.0.6" resolved "https://registry.yarnpkg.com/stack-utils/-/stack-utils-2.0.6.tgz#aaf0748169c02fc33c8232abccf933f54a1cc34f" @@ -15832,6 +16229,11 @@ streamsearch@^1.1.0: resolved "https://registry.yarnpkg.com/streamsearch/-/streamsearch-1.1.0.tgz#404dd1e2247ca94af554e841a8ef0eaa238da764" integrity sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg== +string-argv@~0.3.1: + version "0.3.2" + resolved "https://registry.yarnpkg.com/string-argv/-/string-argv-0.3.2.tgz#2b6d0ef24b656274d957d54e0a4bbf6153dc02b6" + integrity sha512-aqD2Q0144Z+/RqG52NeHEkZauTAUWJO8c6yTftGJKO3Tja5tUgIfmIl6kExvhtxSDP7fXB6DvzkfMpCd/F3G+Q== + string-length@^4.0.1: version "4.0.2" resolved "https://registry.yarnpkg.com/string-length/-/string-length-4.0.2.tgz#a8a8dc7bd5c1a82b9b3c8b87e125f66871b6e57a" @@ -15845,7 +16247,7 @@ string-similarity@^4.0.1: resolved "https://registry.yarnpkg.com/string-similarity/-/string-similarity-4.0.4.tgz#42d01ab0b34660ea8a018da8f56a3309bb8b2a5b" integrity sha512-/q/8Q4Bl4ZKAPjj8WerIBJWALKkaPRfrvhfF8k/B23i4nzrlRj2/go1m90In7nG/3XDSbOo0+pu6RvCTM9RGMQ== -"string-width-cjs@npm:string-width@^4.2.0", "string-width@^1.0.2 || 2 || 3 || 4", string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.3: +"string-width@^1.0.2 || 2 || 3 || 4", string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.3: version "4.2.3" resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== @@ -15862,14 +16264,14 @@ string-width@^2.1.0: is-fullwidth-code-point "^2.0.0" strip-ansi "^4.0.0" -string-width@^5.0.1, string-width@^5.1.2: - version "5.1.2" - resolved "https://registry.yarnpkg.com/string-width/-/string-width-5.1.2.tgz#14f8daec6d81e7221d2a357e668cab73bdbca794" - integrity sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA== +string-width@^7.0.0, string-width@^7.2.0: + version "7.2.0" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-7.2.0.tgz#b5bb8e2165ce275d4d43476dd2700ad9091db6dc" + integrity sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ== dependencies: - eastasianwidth "^0.2.0" - emoji-regex "^9.2.2" - strip-ansi "^7.0.1" + emoji-regex "^10.3.0" + get-east-asian-width "^1.0.0" + strip-ansi "^7.1.0" string.prototype.trim@^1.2.10: version "1.2.10" @@ -15944,13 +16346,6 @@ string_decoder@~1.1.1: dependencies: safe-buffer "~5.1.0" -"strip-ansi-cjs@npm:strip-ansi@^6.0.1", strip-ansi@^6.0.0, strip-ansi@^6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" - integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== - dependencies: - ansi-regex "^5.0.1" - strip-ansi@^3.0.0: version "3.0.1" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf" @@ -15972,7 +16367,14 @@ strip-ansi@^5.2.0: dependencies: ansi-regex "^4.1.0" -strip-ansi@^7.0.1: +strip-ansi@^6.0.0, strip-ansi@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" + integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== + dependencies: + ansi-regex "^5.0.1" + +strip-ansi@^7.1.0: version "7.2.0" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-7.2.0.tgz#d22a269522836a627af8d04b5c3fd2c7fa3e32e3" integrity sha512-yDPMNjp4WyfYBkHnjIRLfca1i6KMyGCtsVgoKe/z1+6vukgaENdgGBZt+ZmKPc4gavvEZ5OgHfHdrazhgNyG7w== @@ -16004,6 +16406,11 @@ strip-final-newline@^3.0.0: resolved "https://registry.yarnpkg.com/strip-final-newline/-/strip-final-newline-3.0.0.tgz#52894c313fbff318835280aed60ff71ebf12b8fd" integrity sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw== +strip-final-newline@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/strip-final-newline/-/strip-final-newline-4.0.0.tgz#35a369ec2ac43df356e3edd5dcebb6429aa1fa5c" + integrity sha512-aulFJcD6YK8V1G7iRB5tigAP4TsHBZZrOV8pjV++zdUwmeV8uzbY7yn6h9MswN62adStNZFuCIx4haBnRuMDaw== + strip-indent@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/strip-indent/-/strip-indent-3.0.0.tgz#c32e1cee940b6b3432c771bc2c54bcce73cd3001" @@ -16021,10 +16428,10 @@ strip-json-comments@~2.0.1: resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a" integrity sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ== -strnum@^2.1.2: - version "2.1.2" - resolved "https://registry.yarnpkg.com/strnum/-/strnum-2.1.2.tgz#a5e00ba66ab25f9cafa3726b567ce7a49170937a" - integrity sha512-l63NF9y/cLROq/yqKXSLtcMeeyOfnSQlfMSlzFt/K73oIaD8DGaQWd7Z34X9GPiKqP5rbSh84Hl4bOlLcjiSrQ== +strnum@^2.2.0: + version "2.2.2" + resolved "https://registry.yarnpkg.com/strnum/-/strnum-2.2.2.tgz#f11fd94ab62b536ba2ecc615858f3747c2881b3f" + integrity sha512-DnR90I+jtXNSTXWdwrEy9FakW7UX+qUZg28gj5fk2vxxl7uS/3bpI4fjFYVmdK9etptYBPNkpahuQnEwhwECqA== strtok3@^6.2.4: version "6.3.0" @@ -16045,6 +16452,15 @@ subscriptions-transport-ws@^0.9.19: symbol-observable "^1.0.4" ws "^5.2.0 || ^6.0.0 || ^7.0.0" +super-regex@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/super-regex/-/super-regex-1.1.0.tgz#14b69b6374f7b3338db52ecd511dae97c27acf75" + integrity sha512-WHkws2ZflZe41zj6AolvvmaTrWds/VuyeYr9iPVv/oQeaIoVxMKaushfFWpOGDT+GuBrM/sVqF8KUCYQlSSTdQ== + dependencies: + function-timeout "^1.0.1" + make-asynchronous "^1.0.1" + time-span "^5.1.0" + superagent@^10.2.3: version "10.2.3" resolved "https://registry.yarnpkg.com/superagent/-/superagent-10.2.3.tgz#d1e4986f2caac423c37e38077f9073ccfe73a59b" @@ -16083,6 +16499,11 @@ supertest@^7.1.3: methods "^1.1.2" superagent "^10.2.3" +supports-color@^10.2.2: + version "10.2.2" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-10.2.2.tgz#466c2978cc5cd0052d542a0b576461c2b802ebb4" + integrity sha512-SS+jx45GF1QjgEXQx4NJZV9ImqmO2NPz5FNsIHrsDjh2YsHnawpan7SNQ1o8NuhrbHZy9AZhIoCUiCeaW/C80g== + supports-color@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7" @@ -16102,19 +16523,14 @@ supports-color@^7.0.0, supports-color@^7.1.0: dependencies: has-flag "^4.0.0" -supports-color@^8, supports-color@^8.0.0, supports-color@^8.1.1: +supports-color@^8, supports-color@^8.0.0, supports-color@^8.1.1, supports-color@~8.1.1: version "8.1.1" resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-8.1.1.tgz#cd6fc17e28500cff56c1b86c0a7fd4a54a73005c" integrity sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q== dependencies: has-flag "^4.0.0" -supports-color@^9.4.0: - version "9.4.0" - resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-9.4.0.tgz#17bfcf686288f531db3dea3215510621ccb55954" - integrity sha512-VL+lNrEoIXww1coLPOmiEmK/0sGigko5COxI09KzHc2VJXJsQ37UaQ+8quuxjDeA7+KnLGTWRyOXSLLR2Wb4jw== - -supports-hyperlinks@^2.2.0, supports-hyperlinks@^2.3.0: +supports-hyperlinks@^2.2.0: version "2.3.0" resolved "https://registry.yarnpkg.com/supports-hyperlinks/-/supports-hyperlinks-2.3.0.tgz#3943544347c1ff90b15effb03fc14ae45ec10624" integrity sha512-RpsAZlpWcDwOPQA22aCH4J0t7L8JmAvsCxfOSEwm7cQs3LshN36QaTkwd70DnBOXDWGssw2eUoc8CaRWT0XunA== @@ -16122,6 +16538,14 @@ supports-hyperlinks@^2.2.0, supports-hyperlinks@^2.3.0: has-flag "^4.0.0" supports-color "^7.0.0" +supports-hyperlinks@^3.1.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/supports-hyperlinks/-/supports-hyperlinks-3.2.0.tgz#b8e485b179681dea496a1e7abdf8985bd3145461" + integrity sha512-zFObLMyZeEwzAoKCyu1B91U79K2t7ApXuQfo8OuxwXLDgcKxuwM+YvcbIhm6QWqz7mHUH1TVytR1PwVVjEuMig== + dependencies: + has-flag "^4.0.0" + supports-color "^7.0.0" + supports-preserve-symlinks-flag@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz#6eda4bd344a3c94aea376d4cc31bc77311039e09" @@ -16132,6 +16556,11 @@ symbol-observable@^1.0.2, symbol-observable@^1.0.4: resolved "https://registry.yarnpkg.com/symbol-observable/-/symbol-observable-1.2.0.tgz#c22688aed4eab3cdc2dfeacbb561660560a00804" integrity sha512-e900nM8RRtGhlV36KGEU9k65K3mPb1WV70OdjfxlG2EAuM1noi/E/BaW/uMhL7bPEssK8QV57vN3esixjUvcXQ== +tagged-tag@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/tagged-tag/-/tagged-tag-1.0.0.tgz#a0b5917c2864cba54841495abfa3f6b13edcf4d6" + integrity sha512-yEFYrVhod+hdNyx7g5Bnkkb0G6si8HJurOoOEgC8B/O0uXLHlaey/65KRv6cuWBNhBgHKAROVpc7QyYqE5gFng== + tar-fs@^2.0.0: version "2.1.4" resolved "https://registry.yarnpkg.com/tar-fs/-/tar-fs-2.1.4.tgz#800824dbf4ef06ded9afea4acafe71c67c76b930" @@ -16153,17 +16582,16 @@ tar-stream@^2.1.4, tar-stream@~2.2.0: inherits "^2.0.3" readable-stream "^3.1.1" -tar@6.2.1, tar@^6.0.2, tar@^6.1.11, tar@^6.1.13, tar@^6.1.2, tar@^6.2.1: - version "6.2.1" - resolved "https://registry.yarnpkg.com/tar/-/tar-6.2.1.tgz#717549c541bc3c2af15751bea94b1dd068d4b03a" - integrity sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A== +tar@6.2.1, tar@^6.0.2, tar@^6.1.11, tar@^6.1.2, tar@^6.2.1, tar@^7.4.3, tar@^7.5.1, tar@^7.5.10, tar@^7.5.11, tar@^7.5.4, tar@^7.5.8: + version "7.5.13" + resolved "https://registry.yarnpkg.com/tar/-/tar-7.5.13.tgz#0d214ed56781a26edc313581c0e2d929ceeb866d" + integrity sha512-tOG/7GyXpFevhXVh8jOPJrmtRpOTsYqUIkVdVooZYJS/z8WhfQUX8RJILmeuJNinGAMSu1veBr4asSHFt5/hng== dependencies: - chownr "^2.0.0" - fs-minipass "^2.0.0" - minipass "^5.0.0" - minizlib "^2.1.1" - mkdirp "^1.0.3" - yallist "^4.0.0" + "@isaacs/fs-minipass" "^4.0.0" + chownr "^3.0.0" + minipass "^7.1.2" + minizlib "^3.1.0" + yallist "^5.0.0" tedious@18.6.1, tedious@^18.6.1: version "18.6.1" @@ -16215,16 +16643,25 @@ text-extensions@^1.0.0: resolved "https://registry.yarnpkg.com/text-extensions/-/text-extensions-1.9.0.tgz#1853e45fee39c945ce6f6c36b2d659b5aabc2a26" integrity sha512-wiBrwC1EhBelW12Zy26JeOUkQ5mRu+5o8rpsJk5+2t+Y5vE7e842qtZDQ2g1NpX/29HdyFeJ4nSIhI47ENSxlQ== -text-extensions@^2.0.0: - version "2.4.0" - resolved "https://registry.yarnpkg.com/text-extensions/-/text-extensions-2.4.0.tgz#a1cfcc50cf34da41bfd047cc744f804d1680ea34" - integrity sha512-te/NtwBwfiNRLf9Ijqx3T0nlqZiQ2XrrtBvu+cLL8ZRrGkO0NHTug8MYFKyoSrv/sHTaSKfilUkizV6XhxMJ3g== - text-table@^0.2.0, text-table@~0.2.0: version "0.2.0" resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4" integrity sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw== +thenify-all@^1.0.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/thenify-all/-/thenify-all-1.6.0.tgz#1a1918d402d8fc3f98fbf234db0bcc8cc10e9726" + integrity sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA== + dependencies: + thenify ">= 3.1.0 < 4" + +"thenify@>= 3.1.0 < 4": + version "3.3.1" + resolved "https://registry.yarnpkg.com/thenify/-/thenify-3.3.1.tgz#8932e686a4066038a016dd9e2ca46add9838a95f" + integrity sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw== + dependencies: + any-promise "^1.0.0" + thread-stream@^3.0.0: version "3.1.0" resolved "https://registry.yarnpkg.com/thread-stream/-/thread-stream-3.1.0.tgz#4b2ef252a7c215064507d4ef70c05a5e2d34c4f1" @@ -16252,6 +16689,13 @@ through@2, through@2.3.8, "through@>=2.2.7 <3", through@^2.3.6: resolved "https://registry.yarnpkg.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5" integrity sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg== +time-span@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/time-span/-/time-span-5.1.0.tgz#80c76cf5a0ca28e0842d3f10a4e99034ce94b90d" + integrity sha512-75voc/9G4rDIJleOo4jPvN4/YC4GRZrY8yy1uU4lwrB3XEQbWve8zXoO5No4eFrGcTAMYyoY67p8jRQdtA1HbA== + dependencies: + convert-hrtime "^5.0.0" + timers-ext@^0.1.7: version "0.1.7" resolved "https://registry.yarnpkg.com/timers-ext/-/timers-ext-0.1.7.tgz#6f57ad8578e07a3fb9f91d9387d65647555e25c6" @@ -16270,10 +16714,10 @@ tiny-lru@^8.0.1: resolved "https://registry.yarnpkg.com/tiny-lru/-/tiny-lru-8.0.2.tgz#812fccbe6e622ded552e3ff8a4c3b5ff34a85e4c" integrity sha512-ApGvZ6vVvTNdsmt676grvCkUCGwzG9IqXma5Z07xJgiC5L7akUMof5U8G2JTI9Rz/ovtVhJBlY6mNhEvtjzOIg== -tiny-relative-date@^1.3.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/tiny-relative-date/-/tiny-relative-date-1.3.0.tgz#fa08aad501ed730f31cc043181d995c39a935e07" - integrity sha512-MOQHpzllWxDCHHaDno30hhLfbouoYlOI8YlMNtvKe1zXbjEVhbcEovQxvZrPvtiYW630GQDoMMarCnjfyfHA+A== +tiny-relative-date@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/tiny-relative-date/-/tiny-relative-date-2.0.2.tgz#0c35c2a3ef87b80f311314918505aa86c2d44bc9" + integrity sha512-rGxAbeL9z3J4pI2GtBEoFaavHdO4RKAU54hEuOef5kfx5aPqiQtbhYktMOTL5OA33db8BjsDcLXuNp+/v19PHw== tinyglobby@0.2.12: version "0.2.12" @@ -16283,6 +16727,14 @@ tinyglobby@0.2.12: fdir "^6.4.3" picomatch "^4.0.2" +tinyglobby@^0.2.12, tinyglobby@^0.2.14: + version "0.2.15" + resolved "https://registry.yarnpkg.com/tinyglobby/-/tinyglobby-0.2.15.tgz#e228dd1e638cea993d2fdb4fcd2d4602a79951c2" + integrity sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ== + dependencies: + fdir "^6.5.0" + picomatch "^4.0.3" + tmp@^0.0.33: version "0.0.33" resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.33.tgz#6d34335889768d21b2bcda0aa277ced3b1bfadf9" @@ -16493,15 +16945,6 @@ tsutils@^3.21.0: dependencies: tslib "^1.8.1" -tuf-js@^1.1.7: - version "1.1.7" - resolved "https://registry.yarnpkg.com/tuf-js/-/tuf-js-1.1.7.tgz#21b7ae92a9373015be77dfe0cb282a80ec3bbe43" - integrity sha512-i3P9Kgw3ytjELUfpuKVDNBJvk4u5bXL6gskv572mcevPbSKCV3zt3djhmlEQ65yERjIbOSncy7U4cQJaB1CBCg== - dependencies: - "@tufjs/models" "1.0.4" - debug "^4.3.4" - make-fetch-happen "^11.1.1" - tuf-js@^2.2.1: version "2.2.1" resolved "https://registry.yarnpkg.com/tuf-js/-/tuf-js-2.2.1.tgz#fdd8794b644af1a75c7aaa2b197ddffeb2911b56" @@ -16511,6 +16954,15 @@ tuf-js@^2.2.1: debug "^4.3.4" make-fetch-happen "^13.0.1" +tuf-js@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/tuf-js/-/tuf-js-4.1.0.tgz#ae4ef9afa456fcb4af103dc50a43bc031f066603" + integrity sha512-50QV99kCKH5P/Vs4E2Gzp7BopNV+KzTXqWeaxrfu5IQJBOULRsTIS9seSsOVT8ZnGXzCyx55nYWAi4qJzpZKEQ== + dependencies: + "@tufjs/models" "4.1.0" + debug "^4.4.3" + make-fetch-happen "^15.0.1" + tunnel-agent@^0.6.0: version "0.6.0" resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd" @@ -16525,6 +16977,11 @@ tunnel-ssh@^5.2.0: dependencies: ssh2 "^1.15.0" +tunnel@^0.0.6: + version "0.0.6" + resolved "https://registry.yarnpkg.com/tunnel/-/tunnel-0.0.6.tgz#72f1314b34a5b192db012324df2cc587ca47f92c" + integrity sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg== + tweetnacl@^0.14.3: version "0.14.5" resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64" @@ -16587,16 +17044,18 @@ type-fest@^2.12.2: resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-2.19.0.tgz#88068015bb33036a598b952e55e9311a60fd3a9b" integrity sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA== -type-fest@^3.8.0: - version "3.13.1" - resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-3.13.1.tgz#bb744c1f0678bea7543a2d1ec24e83e68e8c8706" - integrity sha512-tLq3bSNx+xSpwvAJnzrK0Ep5CLNWjvFTOp71URMaAEWBfRb9nnJiBoUe0tF8bI4ZFO3omgBR6NvnbzVUT3Ly4g== - -type-fest@^4.2.0: +type-fest@^4.0.0, type-fest@^4.39.1, type-fest@^4.6.0: version "4.41.0" resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-4.41.0.tgz#6ae1c8e5731273c2bf1f58ad39cbae2c91a46c58" integrity sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA== +type-fest@^5.2.0, type-fest@^5.4.4: + version "5.5.0" + resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-5.5.0.tgz#78fca72f3a1f9ec964e6ae260db492b070c56f3b" + integrity sha512-PlBfpQwiUvGViBNX84Yxwjsdhd1TUlXr6zjX7eoirtCPIr08NAmxwa+fcYBTeRQxHo9YC9wwF3m9i700sHma8g== + dependencies: + tagged-tag "^1.0.0" + type-is@^1.6.16, type-is@^1.6.18, type-is@~1.6.18: version "1.6.18" resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.18.tgz#4e552cd05df09467dcbc4ef739de89f2cf37c131" @@ -16766,6 +17225,17 @@ uid@2.0.2: dependencies: "@lukeed/csprng" "^1.0.0" +umzug@^3.8.2: + version "3.8.2" + resolved "https://registry.yarnpkg.com/umzug/-/umzug-3.8.2.tgz#53c2189604d36956d7b75a89128108d0e3073a9f" + integrity sha512-BEWEF8OJjTYVC56GjELeHl/1XjFejrD7aHzn+HldRJTx+pL1siBrKHZC8n4K/xL3bEzVA9o++qD1tK2CpZu4KA== + dependencies: + "@rushstack/ts-command-line" "^4.12.2" + emittery "^0.13.0" + fast-glob "^3.3.2" + pony-cause "^2.1.4" + type-fest "^4.0.0" + unbox-primitive@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/unbox-primitive/-/unbox-primitive-1.0.2.tgz#29032021057d5e6cdbd08c5129c226dff8ed6f9e" @@ -16806,11 +17276,36 @@ undici-types@~6.21.0: resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-6.21.0.tgz#691d00af3909be93a7faa13be61b3a5b50ef12cb" integrity sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ== +undici@^6.23.0: + version "6.24.1" + resolved "https://registry.yarnpkg.com/undici/-/undici-6.24.1.tgz#9df1425cede20b836d95634347946f79578b7e71" + integrity sha512-sC+b0tB1whOCzbtlx20fx3WgCXwkW627p4EA9uM+/tNNPkSS+eSEld6pAs9nDv7WbY1UUljBMYPtu9BCOrCWKA== + +undici@^7.0.0: + version "7.24.5" + resolved "https://registry.yarnpkg.com/undici/-/undici-7.24.5.tgz#7debcf5623df2d1cb469b6face01645d9c852ae2" + integrity sha512-3IWdCpjgxp15CbJnsi/Y9TCDE7HWVN19j1hmzVhoAkY/+CJx449tVxT5wZc1Gwg8J+P0LWvzlBzxYRnHJ+1i7Q== + +unicode-emoji-modifier-base@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/unicode-emoji-modifier-base/-/unicode-emoji-modifier-base-1.0.0.tgz#dbbd5b54ba30f287e2a8d5a249da6c0cef369459" + integrity sha512-yLSH4py7oFH3oG/9K+XWrz1pSi3dfUrWEnInbxMfArOfc1+33BlGPQtLsOYwvdMy11AwUBetYuaRxSPqgkq+8g== + +unicorn-magic@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/unicorn-magic/-/unicorn-magic-0.1.0.tgz#1bb9a51c823aaf9d73a8bfcd3d1a23dde94b0ce4" + integrity sha512-lRfVq8fE8gz6QMBuDM6a+LO3IAzTi05H6gCVaUpir2E1Rwpo4ZUog45KpNXKC/Mn3Yb9UDuHumeFTo9iV/D9FQ== + unicorn-magic@^0.3.0: version "0.3.0" resolved "https://registry.yarnpkg.com/unicorn-magic/-/unicorn-magic-0.3.0.tgz#4efd45c85a69e0dd576d25532fbfa22aa5c8a104" integrity sha512-+QBBXBCvifc56fsbuxZQ6Sic3wqqc3WWaqxs58gvJrcOuN83HGTCwz3oS5phzU9LthRNE9VrJCFCLUgHeeFnfA== +unicorn-magic@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/unicorn-magic/-/unicorn-magic-0.4.0.tgz#78c6a090fd6d07abd2468b83b385603e00dfdb24" + integrity sha512-wH590V9VNgYH9g3lH9wWjTrUoKsjLF6sGLjhR4sH1LWpLmCOH0Zf7PukhDA8BiS7KHe4oPNkcTHqYkj7SOGUOw== + unified@^9.0.0: version "9.2.2" resolved "https://registry.yarnpkg.com/unified/-/unified-9.2.2.tgz#67649a1abfc3ab85d2969502902775eb03146975" @@ -16830,13 +17325,6 @@ unique-filename@^1.1.1: dependencies: unique-slug "^2.0.0" -unique-filename@^2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/unique-filename/-/unique-filename-2.0.1.tgz#e785f8675a9a7589e0ac77e0b5c34d2eaeac6da2" - integrity sha512-ODWHtkkdx3IAR+veKxFV+VBkUMcN+FaqzUUd7IZzt+0zhDZFPFxhlqwPF3YQvMHx1TD0tdgYl+kuPnJ8E6ql7A== - dependencies: - unique-slug "^3.0.0" - unique-filename@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/unique-filename/-/unique-filename-3.0.0.tgz#48ba7a5a16849f5080d26c760c86cf5cf05770ea" @@ -16851,13 +17339,6 @@ unique-slug@^2.0.0: dependencies: imurmurhash "^0.1.4" -unique-slug@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/unique-slug/-/unique-slug-3.0.0.tgz#6d347cf57c8a7a7a6044aabd0e2d74e4d76dc7c9" - integrity sha512-8EyMynh679x/0gqE9fT9oilG+qEt+ibFyqjuVTsZn1+CMxH+XLlpvr2UZx4nVcCwTpx81nICr2JQFkM+HPLq4w== - dependencies: - imurmurhash "^0.1.4" - unique-slug@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/unique-slug/-/unique-slug-4.0.0.tgz#6bae6bb16be91351badd24cdce741f892a6532e3" @@ -16913,6 +17394,11 @@ universal-user-agent@^6.0.0: resolved "https://registry.yarnpkg.com/universal-user-agent/-/universal-user-agent-6.0.1.tgz#15f20f55da3c930c57bddbf1734c6654d5fd35aa" integrity sha512-yCzhz6FN2wU1NiiQRogkTQszlQSlpWaw8SvVegAc+bDxbzHgh1vX8uIe8OYyMH6DwH+sdTJsgMl36+mSMdRJIQ== +universal-user-agent@^7.0.0, universal-user-agent@^7.0.2: + version "7.0.3" + resolved "https://registry.yarnpkg.com/universal-user-agent/-/universal-user-agent-7.0.3.tgz#c05870a58125a2dc00431f2df815a77fe69736be" + integrity sha512-TmnEAEAsBJVZM/AADELsK76llnwcf9vMKuPz8JflO1frO8Lchitr0fNaN9d+Ap0BjKtqWqd/J17qeDnXh8CL2A== + universalify@^2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/universalify/-/universalify-2.0.1.tgz#168efc2180964e6386d061e094df61afe239b18d" @@ -16944,7 +17430,7 @@ update-browserslist-db@^1.1.3: escalade "^3.2.0" picocolors "^1.1.1" -uri-js@^4.2.2: +uri-js@^4.2.2, uri-js@^4.4.1: version "4.4.1" resolved "https://registry.yarnpkg.com/uri-js/-/uri-js-4.4.1.tgz#9b1a52595225859e55f669d928f88c6c57f2a77e" integrity sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg== @@ -17037,6 +17523,11 @@ validate-npm-package-name@^5.0.0: dependencies: builtins "^5.0.0" +validate-npm-package-name@^7.0.0, validate-npm-package-name@^7.0.2: + version "7.0.2" + resolved "https://registry.yarnpkg.com/validate-npm-package-name/-/validate-npm-package-name-7.0.2.tgz#e57c3d721a4c8bbff454a246e7f7da811559ea0d" + integrity sha512-hVDIBwsRruT73PbK7uP5ebUt+ezEtCmzZz3F59BSr2F6OVFnJ/6h8liuvdLrQ88Xmnk6/+xGGuq+pG9WwTuy3A== + validator@^13.9.0: version "13.15.26" resolved "https://registry.yarnpkg.com/validator/-/validator-13.15.26.tgz#36c3deeab30e97806a658728a155c66fcaa5b944" @@ -17083,6 +17574,11 @@ walk-up-path@^3.0.1: resolved "https://registry.yarnpkg.com/walk-up-path/-/walk-up-path-3.0.1.tgz#c8d78d5375b4966c717eb17ada73dbd41490e886" integrity sha512-9YlCL/ynK3CTlrSRrDxZvUauLzAswPCrsaCgilqFevUYpeEW0/3ScEjaa3kbW/T0ghhkEr7mv+fpjqn1Y1YuTA== +walk-up-path@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/walk-up-path/-/walk-up-path-4.0.0.tgz#590666dcf8146e2d72318164f1f2ac6ef51d4198" + integrity sha512-3hu+tD8YzSLGuFYtPRb48vdhKMi0KQV5sn+uWr8+7dMEq/2G/dtLrdDinkLjqq5TIbIBjYJ4Ax/n3YiaW7QM8A== + walker@^1.0.8: version "1.0.8" resolved "https://registry.yarnpkg.com/walker/-/walker-1.0.8.tgz#bd498db477afe573dc04185f011d3ab8a8d7653f" @@ -17097,6 +17593,11 @@ wcwidth@^1.0.0, wcwidth@^1.0.1: dependencies: defaults "^1.0.3" +web-worker@^1.5.0: + version "1.5.0" + resolved "https://registry.yarnpkg.com/web-worker/-/web-worker-1.5.0.tgz#71b2b0fbcc4293e8f0aa4f6b8a3ffebff733dcc5" + integrity sha512-RiMReJrTAiA+mBjGONMnjVDP2u3p9R1vkcGz6gDIrOMT3oGuYwX2WRMYI9ipkphSuE5XKEhydbhNEJh4NY9mlw== + webidl-conversions@^3.0.0: version "3.0.1" resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz#24534275e2a7bc6be7bc86611cc16ae0a5654871" @@ -17231,13 +17732,6 @@ which@^2.0.1, which@^2.0.2: dependencies: isexe "^2.0.0" -which@^3.0.0, which@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/which/-/which-3.0.1.tgz#89f1cd0c23f629a8105ffe69b8172791c87b4be1" - integrity sha512-XA1b62dzQzLfaEOSQFTCOd5KFf/1VSzZo7/7TUjnya6u0vGGKzU96UQBZTAThCb2j4/xjBAyii1OhRLJEivHvg== - dependencies: - isexe "^2.0.0" - which@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/which/-/which-4.0.0.tgz#cd60b5e74503a3fbcfbf6cd6b4138a8bae644c1a" @@ -17245,6 +17739,13 @@ which@^4.0.0: dependencies: isexe "^3.1.1" +which@^6.0.0, which@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/which/-/which-6.0.1.tgz#021642443a198fb93b784a5606721cb18cfcbfce" + integrity sha512-oGLe46MIrCRqX7ytPUf66EAYvdeMIZYn3WaocqqKZAxrBpkqHfL/qvTyJ/bTk5+AqHCjXmrv3CEWgy368zhRUg== + dependencies: + isexe "^4.0.0" + wide-align@1.1.5, wide-align@^1.1.2, wide-align@^1.1.5: version "1.1.5" resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.5.tgz#df1d4c206854369ecf3c9a4898f1b23fbd9d15d3" @@ -17271,39 +17772,39 @@ wordwrap@>=0.0.2, wordwrap@^1.0.0: resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-1.0.0.tgz#27584810891456a4171c8d0226441ade90cbcaeb" integrity sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q== -"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0", wrap-ansi@^7.0.0: - version "7.0.0" - resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" - integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== +wrap-ansi@^6.0.1, wrap-ansi@^6.2.0: + version "6.2.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-6.2.0.tgz#e9393ba07102e6c91a3b221478f0257cd2856e53" + integrity sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA== dependencies: ansi-styles "^4.0.0" string-width "^4.1.0" strip-ansi "^6.0.0" -wrap-ansi@^6.0.1, wrap-ansi@^6.2.0: - version "6.2.0" - resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-6.2.0.tgz#e9393ba07102e6c91a3b221478f0257cd2856e53" - integrity sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA== +wrap-ansi@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" + integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== dependencies: ansi-styles "^4.0.0" string-width "^4.1.0" strip-ansi "^6.0.0" -wrap-ansi@^8.1.0: - version "8.1.0" - resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-8.1.0.tgz#56dc22368ee570face1b49819975d9b9a5ead214" - integrity sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ== +wrap-ansi@^9.0.0: + version "9.0.2" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-9.0.2.tgz#956832dea9494306e6d209eb871643bb873d7c98" + integrity sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww== dependencies: - ansi-styles "^6.1.0" - string-width "^5.0.1" - strip-ansi "^7.0.1" + ansi-styles "^6.2.1" + string-width "^7.0.0" + strip-ansi "^7.1.0" wrappy@1: version "1.0.2" resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" integrity sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ== -write-file-atomic@5.0.1, write-file-atomic@^5.0.0, write-file-atomic@^5.0.1: +write-file-atomic@5.0.1, write-file-atomic@^5.0.0: version "5.0.1" resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-5.0.1.tgz#68df4717c55c6fa4281a7860b4c2ba0a6d2b11e7" integrity sha512-+QU2zd6OTD8XWIJCbffaiQeH9U73qIqafo1x6V1snCWYGJf6cVE0cDR4D8xRzcEnfI21IFrUPzPGtcPf8AC+Rw== @@ -17328,6 +17829,13 @@ write-file-atomic@^4.0.2: imurmurhash "^0.1.4" signal-exit "^3.0.7" +write-file-atomic@^7.0.0: + version "7.0.1" + resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-7.0.1.tgz#0e2a450ab5aa306bcfcd3aed61833b10cc4fb885" + integrity sha512-OTIk8iR8/aCRWBqvxrzxR0hgxWpnYBblY1S5hDWBQfk/VFmJwzmJgQFN3WsoUKHISv2eAwe+PpbUzyL1CKTLXg== + dependencies: + signal-exit "^4.0.1" + write-json-file@^3.2.0: version "3.2.0" resolved "https://registry.yarnpkg.com/write-json-file/-/write-json-file-3.2.0.tgz#65bbdc9ecd8a1458e15952770ccbadfcff5fe62a" @@ -17384,6 +17892,11 @@ yallist@^4.0.0: resolved "https://registry.yarnpkg.com/yallist/-/yallist-4.0.0.tgz#9bb92790d9c0effec63be73519e11a35019a3a72" integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A== +yallist@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/yallist/-/yallist-5.0.0.tgz#00e2de443639ed0d78fd87de0d27469fbcffb533" + integrity sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw== + yaml@^2.2.1, yaml@^2.8.1: version "2.8.2" resolved "https://registry.yarnpkg.com/yaml/-/yaml-2.8.2.tgz#5694f25eca0ce9c3e7a9d9e00ce0ddabbd9e35c5" @@ -17404,7 +17917,12 @@ yargs-parser@^20.2.2, yargs-parser@^20.2.3: resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-20.2.9.tgz#2eb7dc3b0289718fc295f362753845c41a0c94ee" integrity sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w== -yargs@17.7.2, yargs@^17.0.0, yargs@^17.3.1, yargs@^17.5.1, yargs@^17.6.2: +yargs-parser@^22.0.0: + version "22.0.0" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-22.0.0.tgz#87b82094051b0567717346ecd00fd14804b357c8" + integrity sha512-rwu/ClNdSMpkSrUb+d6BRsSkLUq1fmfsY6TOpYzTwvwkg1/NRG85KBy3kq++A8LKQwX6lsu+aWad+2khvuXrqw== + +yargs@17.7.2, yargs@^17.0.0, yargs@^17.3.1, yargs@^17.6.2: version "17.7.2" resolved "https://registry.yarnpkg.com/yargs/-/yargs-17.7.2.tgz#991df39aca675a192b816e1e0363f9d75d2aa269" integrity sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w== @@ -17417,7 +17935,7 @@ yargs@17.7.2, yargs@^17.0.0, yargs@^17.3.1, yargs@^17.5.1, yargs@^17.6.2: y18n "^5.0.5" yargs-parser "^21.1.1" -yargs@^16.2.0: +yargs@^16.0.0, yargs@^16.2.0: version "16.2.0" resolved "https://registry.yarnpkg.com/yargs/-/yargs-16.2.0.tgz#1c82bf0f6b6a66eafce7ef30e376f49a12477f66" integrity sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw== @@ -17430,6 +17948,18 @@ yargs@^16.2.0: y18n "^5.0.5" yargs-parser "^20.2.2" +yargs@^18.0.0: + version "18.0.0" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-18.0.0.tgz#6c84259806273a746b09f579087b68a3c2d25bd1" + integrity sha512-4UEqdc2RYGHZc7Doyqkrqiln3p9X2DZVxaGbwhn2pi7MrRagKaOcIKe8L3OxYcbhXLgLFUS3zAYuQjKBQgmuNg== + dependencies: + cliui "^9.0.1" + escalade "^3.1.1" + get-caller-file "^2.0.5" + string-width "^7.2.0" + y18n "^5.0.5" + yargs-parser "^22.0.0" + ylru@^1.2.0: version "1.4.0" resolved "https://registry.yarnpkg.com/ylru/-/ylru-1.4.0.tgz#0cf0aa57e9c24f8a2cbde0cc1ca2c9592ac4e0f6" @@ -17445,16 +17975,16 @@ yocto-queue@^0.1.0: resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-0.1.0.tgz#0294eb3dee05028d31ee1a5fa2c556a6aaf10a1b" integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== -yocto-queue@^1.0.0: - version "1.2.2" - resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-1.2.2.tgz#3e09c95d3f1aa89a58c114c99223edf639152c00" - integrity sha512-4LCcse/U2MHZ63HAJVE+v71o7yOdIe4cZ70Wpf8D/IyjDKYQLV5GD46B+hSTjJsvV5PztjvHoU580EftxjDZFQ== - yoctocolors-cjs@^2.1.2: version "2.1.2" resolved "https://registry.yarnpkg.com/yoctocolors-cjs/-/yoctocolors-cjs-2.1.2.tgz#f4b905a840a37506813a7acaa28febe97767a242" integrity sha512-cYVsTjKl8b+FrnidjibDWskAv7UKOfcwaVZdp/it9n1s9fU3IkgDbhdIRKCW4JDsAlECJY0ytoVPT3sK6kideA== +yoctocolors@^2.1.1: + version "2.1.2" + resolved "https://registry.yarnpkg.com/yoctocolors/-/yoctocolors-2.1.2.tgz#d795f54d173494e7d8db93150cec0ed7f678c83a" + integrity sha512-CzhO+pFNo8ajLM2d2IW/R93ipy99LWjtwblvC1RsoSUMZgyLbYFr221TnSNT7GjGdYui6P459mw9JH/g/zW2ug== + zen-observable-ts@^0.8.21: version "0.8.21" resolved "https://registry.yarnpkg.com/zen-observable-ts/-/zen-observable-ts-0.8.21.tgz#85d0031fbbde1eba3cd07d3ba90da241215f421d" From 6e7858a4df86730804387428ffab27c96e82df2f Mon Sep 17 00:00:00 2001 From: scra Date: Wed, 25 Mar 2026 15:55:54 +0100 Subject: [PATCH 15/18] feat(workflow-executor): type-specific PATCH body validation (#1508) --- .../load-related-record-step-executor.ts | 19 +- .../src/http/executor-http-server.ts | 33 +- .../src/http/pending-data-validators.ts | 37 +++ .../workflow-executor/src/types/record.ts | 2 + .../src/types/step-execution-data.ts | 4 +- .../load-related-record-step-executor.test.ts | 196 +++++++++-- .../step-execution-formatters.test.ts | 1 - .../executors/step-summary-builder.test.ts | 1 - .../test/http/executor-http-server.test.ts | 305 +++++++++++++++++- 9 files changed, 548 insertions(+), 50 deletions(-) create mode 100644 packages/workflow-executor/src/http/pending-data-validators.ts diff --git a/packages/workflow-executor/src/executors/load-related-record-step-executor.ts b/packages/workflow-executor/src/executors/load-related-record-step-executor.ts index 108655e27d..df58ac765a 100644 --- a/packages/workflow-executor/src/executors/load-related-record-step-executor.ts +++ b/packages/workflow-executor/src/executors/load-related-record-step-executor.ts @@ -101,13 +101,12 @@ export default class LoadRelatedRecordStepExecutor extends RecordTaskStepExecuto 50, ); - const relatedCollectionName = relatedData[0].collectionName; const selectedRecordId = relatedData[bestIndex].recordId; await this.context.runStore.saveStepExecution(this.context.runId, { type: 'load-related-record', stepIndex: this.context.stepIndex, - pendingData: { displayName, name, relatedCollectionName, suggestedFields, selectedRecordId }, + pendingData: { displayName, name, suggestedFields, selectedRecordId }, selectedRecordRef, }); @@ -126,6 +125,8 @@ export default class LoadRelatedRecordStepExecutor extends RecordTaskStepExecuto /** * Branch A: builds RecordRef from pendingData.selectedRecordId. + * Re-derives relatedCollectionName from FieldSchema using the (possibly updated) relation name, + * so a user-overridden relation name is handled correctly. * No additional getRelatedData call. */ private async resolveFromSelection( @@ -137,7 +138,19 @@ export default class LoadRelatedRecordStepExecutor extends RecordTaskStepExecuto throw new StepStateError(`Step at index ${this.context.stepIndex} has no pending data`); } - const { name, displayName, relatedCollectionName, selectedRecordId } = pendingData; + const { name, displayName, selectedRecordId } = pendingData; + + // Re-derive relatedCollectionName from schema using the (possibly updated) relation name. + // `name` is always a fieldName (set from field.fieldName in buildTarget) — search directly. + const schema = await this.getCollectionSchema(selectedRecordRef.collectionName); + const field = schema.fields.find(f => f.fieldName === name); + const relatedCollectionName = field?.relatedCollectionName; + + if (!relatedCollectionName) { + throw new StepStateError( + `Step at index ${this.context.stepIndex} could not resolve relatedCollectionName for relation "${name}"`, + ); + } const record: RecordRef = { collectionName: relatedCollectionName, diff --git a/packages/workflow-executor/src/http/executor-http-server.ts b/packages/workflow-executor/src/http/executor-http-server.ts index 3bd6ca2153..02674ce1f4 100644 --- a/packages/workflow-executor/src/http/executor-http-server.ts +++ b/packages/workflow-executor/src/http/executor-http-server.ts @@ -2,6 +2,7 @@ import type { Logger } from '../ports/logger-port'; import type { RunStore } from '../ports/run-store'; import type { WorkflowPort } from '../ports/workflow-port'; import type Runner from '../runner'; +import type { StepExecutionData } from '../types/step-execution-data'; import type { Server } from 'http'; import bodyParser from '@koa/bodyparser'; @@ -11,6 +12,7 @@ import Koa from 'koa'; import koaJwt from 'koa-jwt'; import { RunNotFoundError } from '../errors'; +import patchBodySchemas from './pending-data-validators'; export interface ExecutorHttpServerOptions { port: number; @@ -177,30 +179,35 @@ export default class ExecutorHttpServer { return; } - const body = ctx.request.body as Record; - const { userConfirmed } = body; - - if (typeof userConfirmed !== 'boolean') { - ctx.status = 400; - ctx.body = { error: 'userConfirmed must be a boolean' }; + const stepExecutions = await this.options.runStore.getStepExecutions(runId); + const execution = stepExecutions.find(e => e.stepIndex === stepIndex); + const schema = execution ? patchBodySchemas[execution.type] : undefined; + + if ( + !execution || + !schema || + !('pendingData' in execution) || + execution.pendingData === undefined + ) { + ctx.status = 404; + ctx.body = { error: 'Step execution not found or has no pending data' }; return; } - const stepExecutions = await this.options.runStore.getStepExecutions(runId); - const execution = stepExecutions.find(e => e.stepIndex === stepIndex); + const parsed = schema.safeParse(ctx.request.body); - if (!execution || !('pendingData' in execution) || execution.pendingData === undefined) { - ctx.status = 404; - ctx.body = { error: 'Step execution not found or has no pending data' }; + if (!parsed.success) { + ctx.status = 400; + ctx.body = { error: 'Invalid request body', details: parsed.error.issues }; return; } await this.options.runStore.saveStepExecution(runId, { ...execution, - pendingData: { ...(execution.pendingData as object), userConfirmed }, - } as Parameters[1]); + pendingData: { ...(execution.pendingData as object), ...(parsed.data as object) }, + } as StepExecutionData); ctx.status = 204; } diff --git a/packages/workflow-executor/src/http/pending-data-validators.ts b/packages/workflow-executor/src/http/pending-data-validators.ts new file mode 100644 index 0000000000..9ed4cba0d0 --- /dev/null +++ b/packages/workflow-executor/src/http/pending-data-validators.ts @@ -0,0 +1,37 @@ +import type { StepExecutionData } from '../types/step-execution-data'; + +import { z } from 'zod'; + +// Per-step-type body schemas for PATCH /runs/:runId/steps/:stepIndex/pending-data. +// Only step types that support the confirmation flow are listed here — others return 404. +// Schemas use .strict() to reject unknown fields from the client. +const patchBodySchemas: Partial> = { + 'update-record': z + .object({ + userConfirmed: z.boolean(), + value: z.string().optional(), // user may override the AI-proposed value + }) + .strict(), + + 'trigger-action': z.object({ userConfirmed: z.boolean() }).strict(), + + 'mcp-task': z.object({ userConfirmed: z.boolean() }).strict(), + + 'load-related-record': z + .object({ + userConfirmed: z.boolean(), + // User may intentionally switch to a different relation than the one the AI selected. + // The executor re-derives relatedCollectionName from FieldSchema when processing the confirmation. + name: z.string().optional(), + displayName: z.string().optional(), + // User may override the AI-selected record; must be non-empty when provided. + selectedRecordId: z + .array(z.union([z.string(), z.number()])) + .min(1) + .optional(), + }) + .strict(), + // relatedCollectionName and suggestedFields are NOT accepted — internal executor data. +}; + +export default patchBodySchemas; diff --git a/packages/workflow-executor/src/types/record.ts b/packages/workflow-executor/src/types/record.ts index 2237600fb7..c0441b6e36 100644 --- a/packages/workflow-executor/src/types/record.ts +++ b/packages/workflow-executor/src/types/record.ts @@ -8,6 +8,8 @@ export interface FieldSchema { isRelationship: boolean; /** Cardinality of the relation. Absent for non-relationship fields. */ relationType?: 'BelongsTo' | 'HasMany' | 'HasOne'; + /** Target collection name; only meaningful for relationship fields. */ + relatedCollectionName?: string; } export interface ActionSchema { diff --git a/packages/workflow-executor/src/types/step-execution-data.ts b/packages/workflow-executor/src/types/step-execution-data.ts index 6f58bd5491..01c1a979a7 100644 --- a/packages/workflow-executor/src/types/step-execution-data.ts +++ b/packages/workflow-executor/src/types/step-execution-data.ts @@ -111,13 +111,11 @@ export interface RecordTaskStepExecutionData extends BaseStepExecutionData { // -- Load Related Record -- export interface LoadRelatedRecordPendingData extends RelationRef { - /** Collection name of the related records — needed to build RecordRef in Branch A. */ - relatedCollectionName: string; /** AI-selected fields suggested for display on the frontend. undefined = not computed (no non-relation fields). */ suggestedFields?: string[]; /** * The record id to load. Initially set by the AI. Can be overridden by the frontend - * (future iteration — the current PATCH endpoint only accepts userConfirmed). + * via PATCH /runs/:runId/steps/:stepIndex/pending-data. */ selectedRecordId: Array; /** Set by the frontend via PATCH /runs/:runId/steps/:stepIndex/pending-data. */ diff --git a/packages/workflow-executor/test/executors/load-related-record-step-executor.test.ts b/packages/workflow-executor/test/executors/load-related-record-step-executor.test.ts index 5e168b42dd..536056f8a7 100644 --- a/packages/workflow-executor/test/executors/load-related-record-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/load-related-record-step-executor.test.ts @@ -52,12 +52,19 @@ function makeCollectionSchema(overrides: Partial = {}): Collec primaryKeyFields: ['id'], fields: [ { fieldName: 'email', displayName: 'Email', isRelationship: false }, - { fieldName: 'order', displayName: 'Order', isRelationship: true, relationType: 'BelongsTo' }, + { + fieldName: 'order', + displayName: 'Order', + isRelationship: true, + relationType: 'BelongsTo', + relatedCollectionName: 'orders', + }, { fieldName: 'address', displayName: 'Address', isRelationship: true, relationType: 'HasMany', + relatedCollectionName: 'addresses', }, ], actions: [], @@ -135,7 +142,6 @@ function makePendingExecution( pendingData: { displayName: 'Order', name: 'order', - relatedCollectionName: 'orders', selectedRecordId: [99], suggestedFields: ['status', 'amount'], }, @@ -591,7 +597,6 @@ describe('LoadRelatedRecordStepExecutor', () => { pendingData: { displayName: 'Order', name: 'order', - relatedCollectionName: 'orders', selectedRecordId: [99], suggestedFields: [], }, @@ -664,7 +669,6 @@ describe('LoadRelatedRecordStepExecutor', () => { pendingData: { displayName: 'Order', name: 'order', - relatedCollectionName: 'orders', selectedRecordId: [2], // record at index 1 suggestedFields: ['status'], }, @@ -744,9 +748,8 @@ describe('LoadRelatedRecordStepExecutor', () => { pendingData: { displayName: 'Order', name: 'order', - relatedCollectionName: 'orders', - selectedRecordId: [99], suggestedFields: ['status', 'amount'], + selectedRecordId: [99], userConfirmed: true, }, }); @@ -771,7 +774,6 @@ describe('LoadRelatedRecordStepExecutor', () => { pendingData: expect.objectContaining({ displayName: 'Order', name: 'order', - relatedCollectionName: 'orders', selectedRecordId: [99], }), }), @@ -784,7 +786,6 @@ describe('LoadRelatedRecordStepExecutor', () => { pendingData: { displayName: 'Order', name: 'order', - relatedCollectionName: 'orders', suggestedFields: ['status', 'amount'], selectedRecordId: [42], userConfirmed: true, @@ -811,6 +812,158 @@ describe('LoadRelatedRecordStepExecutor', () => { }); }); + describe('resolveFromSelection — relatedCollectionName resolution (Branch A)', () => { + it('derives relatedCollectionName from schema when confirmed', async () => { + const schema = makeCollectionSchema({ + fields: [ + { + fieldName: 'order', + displayName: 'Order', + isRelationship: true, + relationType: 'BelongsTo', + relatedCollectionName: 'orders', + }, + ], + }); + const execution = makePendingExecution({ + pendingData: { + displayName: 'Order', + name: 'order', + suggestedFields: [], + selectedRecordId: [99], + userConfirmed: true, + }, + }); + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([execution]), + }); + const workflowPort = makeMockWorkflowPort({ customers: schema }); + const context = makeContext({ runStore, workflowPort }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + executionResult: expect.objectContaining({ + record: expect.objectContaining({ collectionName: 'orders', recordId: [99] }), + }), + }), + ); + }); + + it('returns error when schema has no relatedCollectionName for the relation', async () => { + const schema = makeCollectionSchema({ + fields: [ + { + fieldName: 'order', + displayName: 'Order', + isRelationship: true, + relationType: 'BelongsTo', + // relatedCollectionName intentionally absent + }, + ], + }); + const execution = makePendingExecution({ + pendingData: { + displayName: 'Order', + name: 'order', + suggestedFields: [], + selectedRecordId: [99], + userConfirmed: true, + }, + }); + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([execution]), + }); + const workflowPort = makeMockWorkflowPort({ customers: schema }); + const context = makeContext({ runStore, workflowPort }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + 'An unexpected error occurred while processing this step.', + ); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); + }); + + it('uses overridden relation name from pendingData to derive relatedCollectionName', async () => { + const schema = makeCollectionSchema({ + fields: [ + { + fieldName: 'order', + displayName: 'Order', + isRelationship: true, + relationType: 'BelongsTo', + relatedCollectionName: 'orders', + }, + { + fieldName: 'address', + displayName: 'Address', + isRelationship: true, + relationType: 'HasMany', + relatedCollectionName: 'addresses', + }, + ], + }); + // User overrode AI's suggestion of 'order' to 'address' via PATCH + const execution = makePendingExecution({ + pendingData: { + displayName: 'Address', + name: 'address', + suggestedFields: [], + selectedRecordId: [77], + userConfirmed: true, + }, + }); + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([execution]), + }); + const workflowPort = makeMockWorkflowPort({ customers: schema }); + const context = makeContext({ runStore, workflowPort }); + const executor = new LoadRelatedRecordStepExecutor(context); + + const result = await executor.execute(); + + expect(result.stepOutcome.status).toBe('success'); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + executionResult: expect.objectContaining({ + record: expect.objectContaining({ collectionName: 'addresses', recordId: [77] }), + }), + }), + ); + }); + + it('calls getCollectionSchema with selectedRecordRef.collectionName', async () => { + const execution = makePendingExecution({ + selectedRecordRef: { collectionName: 'customers', recordId: [42], stepIndex: 0 }, + pendingData: { + displayName: 'Order', + name: 'order', + suggestedFields: [], + selectedRecordId: [99], + userConfirmed: true, + }, + }); + const runStore = makeMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([execution]), + }); + const workflowPort = makeMockWorkflowPort({ customers: makeCollectionSchema() }); + const context = makeContext({ runStore, workflowPort }); + const executor = new LoadRelatedRecordStepExecutor(context); + + await executor.execute(); + + expect(workflowPort.getCollectionSchema).toHaveBeenCalledWith('customers'); + }); + }); + describe('confirmation rejected (Branch A)', () => { it('skips the load when user rejects', async () => { const agentPort = makeMockAgentPort(); @@ -818,9 +971,8 @@ describe('LoadRelatedRecordStepExecutor', () => { pendingData: { displayName: 'Order', name: 'order', - relatedCollectionName: 'orders', - selectedRecordId: [99], suggestedFields: ['status', 'amount'], + selectedRecordId: [99], userConfirmed: false, }, }); @@ -844,21 +996,25 @@ describe('LoadRelatedRecordStepExecutor', () => { }); }); - describe('no pending data in confirmation flow (Branch A)', () => { - it('falls through to first-call path when no execution record is found', async () => { + describe('trigger before PATCH (Branch A)', () => { + it('re-emits awaiting-input when userConfirmed is not yet set in pendingData', async () => { + const agentPort = makeMockAgentPort(); + const execution = makePendingExecution(); // pendingData has no userConfirmed const runStore = makeMockRunStore({ - init: jest.fn().mockResolvedValue(undefined), - close: jest.fn().mockResolvedValue(undefined), - getStepExecutions: jest.fn().mockResolvedValue([]), + getStepExecutions: jest.fn().mockResolvedValue([execution]), }); - const context = makeContext({ runStore }); + const context = makeContext({ agentPort, runStore }); const executor = new LoadRelatedRecordStepExecutor(context); const result = await executor.execute(); expect(result.stepOutcome.status).toBe('awaiting-input'); + expect(agentPort.getRelatedData).not.toHaveBeenCalled(); + expect(runStore.saveStepExecution).not.toHaveBeenCalled(); }); + }); + describe('no pending data in confirmation flow (Branch A)', () => { it('returns error outcome when execution exists but pendingData is absent', async () => { const runStore = makeMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([ @@ -1001,9 +1157,8 @@ describe('LoadRelatedRecordStepExecutor', () => { pendingData: { displayName: 'Order', name: 'order', - relatedCollectionName: 'orders', - selectedRecordId: [99], suggestedFields: ['status', 'amount'], + selectedRecordId: [99], userConfirmed: true, }, }); @@ -1247,7 +1402,6 @@ describe('LoadRelatedRecordStepExecutor', () => { pendingData: expect.objectContaining({ displayName: 'Invoice', name: 'invoice', - relatedCollectionName: 'invoices', selectedRecordId: [55], }), selectedRecordRef: expect.objectContaining({ recordId: [99], collectionName: 'orders' }), @@ -1366,9 +1520,8 @@ describe('LoadRelatedRecordStepExecutor', () => { pendingData: { displayName: 'Order', name: 'order', - relatedCollectionName: 'orders', - selectedRecordId: [99], suggestedFields: ['status', 'amount'], + selectedRecordId: [99], userConfirmed: false, }, }); @@ -1452,7 +1605,6 @@ describe('LoadRelatedRecordStepExecutor', () => { pendingData: { displayName: 'Invoice', name: 'invoice', - relatedCollectionName: 'invoices', selectedRecordId: [55], }, }; diff --git a/packages/workflow-executor/test/executors/step-execution-formatters.test.ts b/packages/workflow-executor/test/executors/step-execution-formatters.test.ts index dc4f65db29..7d71a6b259 100644 --- a/packages/workflow-executor/test/executors/step-execution-formatters.test.ts +++ b/packages/workflow-executor/test/executors/step-execution-formatters.test.ts @@ -40,7 +40,6 @@ describe('StepExecutionFormatters', () => { pendingData: { displayName: 'Address', name: 'address', - relatedCollectionName: 'addresses', selectedRecordId: [1], }, }; diff --git a/packages/workflow-executor/test/executors/step-summary-builder.test.ts b/packages/workflow-executor/test/executors/step-summary-builder.test.ts index a6a5c743f4..24b07c1715 100644 --- a/packages/workflow-executor/test/executors/step-summary-builder.test.ts +++ b/packages/workflow-executor/test/executors/step-summary-builder.test.ts @@ -252,7 +252,6 @@ describe('StepSummaryBuilder', () => { pendingData: { displayName: 'Address', name: 'address', - relatedCollectionName: 'addresses', selectedRecordId: [1], }, }; diff --git a/packages/workflow-executor/test/http/executor-http-server.test.ts b/packages/workflow-executor/test/http/executor-http-server.test.ts index dca12e7c26..f6034e1c68 100644 --- a/packages/workflow-executor/test/http/executor-http-server.test.ts +++ b/packages/workflow-executor/test/http/executor-http-server.test.ts @@ -267,6 +267,22 @@ describe('ExecutorHttpServer', () => { expect(runner.triggerPoll).not.toHaveBeenCalled(); }); + + it('returns 403 when hasRunAccess returns false on PATCH /runs/:runId/steps/:stepIndex/pending-data', async () => { + const workflowPort = createMockWorkflowPort({ + hasRunAccess: jest.fn().mockResolvedValue(false), + }); + const server = createServer({ workflowPort }); + const token = signToken({ id: 'user-1' }); + + const response = await request(server.callback) + .patch('/runs/run-1/steps/0/pending-data') + .set('Authorization', `Bearer ${token}`) + .send({ userConfirmed: true }); + + expect(response.status).toBe(403); + expect(response.body).toEqual({ error: 'Forbidden' }); + }); }); describe('GET /runs/:runId', () => { @@ -365,7 +381,6 @@ describe('ExecutorHttpServer', () => { getStepExecutions: jest.fn().mockResolvedValue([existing]), saveStepExecution: jest.fn().mockResolvedValue(undefined), }); - const server = createServer({ runStore }); const token = signToken({ id: 'user-1' }); @@ -393,7 +408,6 @@ describe('ExecutorHttpServer', () => { getStepExecutions: jest.fn().mockResolvedValue([existing]), saveStepExecution: jest.fn().mockResolvedValue(undefined), }); - const server = createServer({ runStore }); const token = signToken({ id: 'user-1' }); @@ -415,7 +429,6 @@ describe('ExecutorHttpServer', () => { const runStore = createMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([]), }); - const server = createServer({ runStore }); const token = signToken({ id: 'user-1' }); @@ -428,12 +441,11 @@ describe('ExecutorHttpServer', () => { expect(response.body).toEqual({ error: 'Step execution not found or has no pending data' }); }); - it('returns 404 when step execution has no pendingData', async () => { + it('returns 404 when step type does not support pending-data confirmation', async () => { const existing = { type: 'condition' as const, stepIndex: 1 }; const runStore = createMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([existing]), }); - const server = createServer({ runStore }); const token = signToken({ id: 'user-1' }); @@ -459,7 +471,76 @@ describe('ExecutorHttpServer', () => { expect(response.body).toEqual({ error: 'Invalid stepIndex' }); }); + it('returns 400 when body contains unknown fields', async () => { + const existing = { + type: 'update-record' as const, + stepIndex: 0, + pendingData: { name: 'status', displayName: 'Status', value: 'active' }, + }; + const runStore = createMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([existing]), + }); + const server = createServer({ runStore }); + const token = signToken({ id: 'user-1' }); + + const response = await request(server.callback) + .patch('/runs/run-1/steps/0/pending-data') + .set('Authorization', `Bearer ${token}`) + .send({ userConfirmed: true, extra: 'injection' }); + + expect(response.status).toBe(400); + expect(response.body).toEqual(expect.objectContaining({ error: 'Invalid request body' })); + }); + it('returns 400 when userConfirmed is not a boolean', async () => { + const existing = { + type: 'update-record' as const, + stepIndex: 0, + pendingData: { name: 'status', displayName: 'Status', value: 'active' }, + }; + const runStore = createMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([existing]), + }); + const server = createServer({ runStore }); + const token = signToken({ id: 'user-1' }); + + const response = await request(server.callback) + .patch('/runs/run-1/steps/0/pending-data') + .set('Authorization', `Bearer ${token}`) + .send({ userConfirmed: 'yes' }); + + expect(response.status).toBe(400); + expect(response.body).toEqual(expect.objectContaining({ error: 'Invalid request body' })); + }); + + it('update-record: accepts value override', async () => { + const existing = { + type: 'update-record' as const, + stepIndex: 0, + pendingData: { fieldName: 'status', value: 'old_value' }, + }; + const runStore = createMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([existing]), + saveStepExecution: jest.fn().mockResolvedValue(undefined), + }); + const server = createServer({ runStore }); + const token = signToken({ id: 'user-1' }); + + const response = await request(server.callback) + .patch('/runs/run-1/steps/0/pending-data') + .set('Authorization', `Bearer ${token}`) + .send({ userConfirmed: true, value: 'new_value' }); + + expect(response.status).toBe(204); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + pendingData: expect.objectContaining({ value: 'new_value', userConfirmed: true }), + }), + ); + }); + + it('update-record: rejects unknown field', async () => { const existing = { type: 'update-record' as const, stepIndex: 0, @@ -468,17 +549,227 @@ describe('ExecutorHttpServer', () => { const runStore = createMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([existing]), }); + const server = createServer({ runStore }); + const token = signToken({ id: 'user-1' }); + + const response = await request(server.callback) + .patch('/runs/run-1/steps/0/pending-data') + .set('Authorization', `Bearer ${token}`) + .send({ userConfirmed: true, name: 'hack' }); + + expect(response.status).toBe(400); + expect(response.body).toEqual(expect.objectContaining({ error: 'Invalid request body' })); + }); + + it('load-related-record: accepts selectedRecordId override', async () => { + const existing = { + type: 'load-related-record' as const, + stepIndex: 1, + pendingData: { + name: 'order', + displayName: 'Order', + selectedRecordId: [99], + suggestedFields: [], + }, + selectedRecordRef: { collectionName: 'customers', recordId: [42], stepIndex: 0 }, + }; + const runStore = createMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([existing]), + saveStepExecution: jest.fn().mockResolvedValue(undefined), + }); + const server = createServer({ runStore }); + const token = signToken({ id: 'user-1' }); + + const response = await request(server.callback) + .patch('/runs/run-1/steps/1/pending-data') + .set('Authorization', `Bearer ${token}`) + .send({ userConfirmed: true, selectedRecordId: ['42'] }); + + expect(response.status).toBe(204); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + pendingData: expect.objectContaining({ selectedRecordId: ['42'], userConfirmed: true }), + }), + ); + }); + + it('load-related-record: accepts relation override (name + displayName)', async () => { + const existing = { + type: 'load-related-record' as const, + stepIndex: 1, + pendingData: { + name: 'order', + displayName: 'Order', + selectedRecordId: [99], + suggestedFields: [], + }, + selectedRecordRef: { collectionName: 'customers', recordId: [42], stepIndex: 0 }, + }; + const runStore = createMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([existing]), + saveStepExecution: jest.fn().mockResolvedValue(undefined), + }); + const server = createServer({ runStore }); + const token = signToken({ id: 'user-1' }); + + const response = await request(server.callback) + .patch('/runs/run-1/steps/1/pending-data') + .set('Authorization', `Bearer ${token}`) + .send({ userConfirmed: true, name: 'orders', displayName: 'Orders' }); + + expect(response.status).toBe(204); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + pendingData: expect.objectContaining({ name: 'orders', displayName: 'Orders' }), + }), + ); + }); + it('load-related-record: rejects empty selectedRecordId', async () => { + const existing = { + type: 'load-related-record' as const, + stepIndex: 1, + pendingData: { + name: 'order', + displayName: 'Order', + selectedRecordId: [99], + suggestedFields: [], + }, + selectedRecordRef: { collectionName: 'customers', recordId: [42], stepIndex: 0 }, + }; + const runStore = createMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([existing]), + }); + const server = createServer({ runStore }); + const token = signToken({ id: 'user-1' }); + + const response = await request(server.callback) + .patch('/runs/run-1/steps/1/pending-data') + .set('Authorization', `Bearer ${token}`) + .send({ userConfirmed: true, selectedRecordId: [] }); + + expect(response.status).toBe(400); + expect(response.body).toEqual(expect.objectContaining({ error: 'Invalid request body' })); + }); + + it('load-related-record: rejects relatedCollectionName (internal field)', async () => { + const existing = { + type: 'load-related-record' as const, + stepIndex: 1, + pendingData: { + name: 'order', + displayName: 'Order', + selectedRecordId: [99], + suggestedFields: [], + }, + selectedRecordRef: { collectionName: 'customers', recordId: [42], stepIndex: 0 }, + }; + const runStore = createMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([existing]), + }); + const server = createServer({ runStore }); + const token = signToken({ id: 'user-1' }); + + const response = await request(server.callback) + .patch('/runs/run-1/steps/1/pending-data') + .set('Authorization', `Bearer ${token}`) + .send({ userConfirmed: true, relatedCollectionName: 'Order' }); + + expect(response.status).toBe(400); + expect(response.body).toEqual(expect.objectContaining({ error: 'Invalid request body' })); + }); + + it('trigger-action: rejects extra field', async () => { + const existing = { + type: 'trigger-action' as const, + stepIndex: 0, + pendingData: { name: 'send_email', displayName: 'Send Email' }, + }; + const runStore = createMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([existing]), + }); const server = createServer({ runStore }); const token = signToken({ id: 'user-1' }); const response = await request(server.callback) .patch('/runs/run-1/steps/0/pending-data') .set('Authorization', `Bearer ${token}`) - .send({ userConfirmed: 'yes' }); + .send({ userConfirmed: true, name: 'other_action' }); expect(response.status).toBe(400); - expect(response.body).toEqual({ error: 'userConfirmed must be a boolean' }); + expect(response.body).toEqual(expect.objectContaining({ error: 'Invalid request body' })); + }); + + it('mcp-task: accepts only userConfirmed and returns 204', async () => { + const existing = { + type: 'mcp-task' as const, + stepIndex: 0, + pendingData: { name: 'send_report', input: {} }, + }; + const runStore = createMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([existing]), + saveStepExecution: jest.fn().mockResolvedValue(undefined), + }); + const server = createServer({ runStore }); + const token = signToken({ id: 'user-1' }); + + const response = await request(server.callback) + .patch('/runs/run-1/steps/0/pending-data') + .set('Authorization', `Bearer ${token}`) + .send({ userConfirmed: true }); + + expect(response.status).toBe(204); + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + pendingData: expect.objectContaining({ userConfirmed: true }), + }), + ); + }); + + it('mcp-task: rejects extra field', async () => { + const existing = { + type: 'mcp-task' as const, + stepIndex: 0, + pendingData: { name: 'send_report', input: {} }, + }; + const runStore = createMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([existing]), + }); + const server = createServer({ runStore }); + const token = signToken({ id: 'user-1' }); + + const response = await request(server.callback) + .patch('/runs/run-1/steps/0/pending-data') + .set('Authorization', `Bearer ${token}`) + .send({ userConfirmed: true, name: 'override' }); + + expect(response.status).toBe(400); + expect(response.body).toEqual(expect.objectContaining({ error: 'Invalid request body' })); + }); + + it('returns 500 when saveStepExecution rejects', async () => { + const existing = { + type: 'update-record' as const, + stepIndex: 0, + pendingData: { fieldName: 'status', value: 'active' }, + }; + const runStore = createMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([existing]), + saveStepExecution: jest.fn().mockRejectedValue(new Error('disk full')), + }); + const server = createServer({ runStore }); + const token = signToken({ id: 'user-1' }); + + const response = await request(server.callback) + .patch('/runs/run-1/steps/0/pending-data') + .set('Authorization', `Bearer ${token}`) + .send({ userConfirmed: true }); + + expect(response.status).toBe(500); + expect(response.body).toEqual({ error: 'Internal server error' }); }); }); From a52c00a28e6a471cb06e8819ef64c071cdde1a7a Mon Sep 17 00:00:00 2001 From: scra Date: Wed, 25 Mar 2026 17:44:57 +0100 Subject: [PATCH 16/18] refactor(workflow-executor): encapsulate pending-data business logic in Runner (#1503) --- packages/workflow-executor/CLAUDE.md | 4 +- packages/workflow-executor/src/errors.ts | 23 + .../src/http/executor-http-server.ts | 48 +- .../src/{http => }/pending-data-validators.ts | 2 +- packages/workflow-executor/src/runner.ts | 45 +- .../test/http/executor-http-server.test.ts | 420 ++---------------- .../workflow-executor/test/runner.test.ts | 187 +++++++- 7 files changed, 318 insertions(+), 411 deletions(-) rename packages/workflow-executor/src/{http => }/pending-data-validators.ts (95%) diff --git a/packages/workflow-executor/CLAUDE.md b/packages/workflow-executor/CLAUDE.md index a54d599883..f3822a22d0 100644 --- a/packages/workflow-executor/CLAUDE.md +++ b/packages/workflow-executor/CLAUDE.md @@ -76,7 +76,9 @@ src/ - **Privacy** — Zero client data leaves the client's infrastructure. `StepOutcome` is sent to the orchestrator and must NEVER contain client data. Privacy-sensitive information (e.g. AI reasoning) must stay in `StepExecutionData` (persisted in the RunStore, client-side only). - **Ports (IO injection)** — All external IO goes through injected port interfaces, keeping the core pure and testable. - **AI integration** — Uses `@langchain/core` (`BaseChatModel`, `DynamicStructuredTool`) for AI-powered steps. `ExecutionContext.model` is a `BaseChatModel`. -- **Error hierarchy** — All domain errors must extend `WorkflowExecutorError` (defined in `src/errors.ts`). This ensures executors can distinguish domain errors (caught → error outcome) from infrastructure errors (uncaught → propagate to caller). Never throw a plain `Error` for a domain error case. +- **Error hierarchy** — Two families of errors coexist in `src/errors.ts`: + - **Domain errors** (`extends WorkflowExecutorError`) — Thrown during step execution (e.g. `RecordNotFoundError`, `MissingToolCallError`). Caught by `base-step-executor.ts` and converted into `stepOutcome.error` sent to the orchestrator. All domain errors must extend `WorkflowExecutorError`. + - **Boundary errors** (`extends Error`) — Thrown outside step execution, at the HTTP or Runner layer (e.g. `RunNotFoundError`, `PendingDataNotFoundError`, `ConfigurationError`). Caught by the HTTP server and translated into HTTP status codes (404, 400, etc.). These intentionally do NOT extend `WorkflowExecutorError` to prevent `base-step-executor` from catching them as step failures. - **Dual error messages** — `WorkflowExecutorError` carries two messages: `message` (technical, for dev logs) and `userMessage` (human-readable, surfaced to the Forest Admin UI via `stepOutcome.error`). The mapping happens in a single place: `base-step-executor.ts` uses `error.userMessage` when building the error outcome. When adding a new error subclass, always provide a distinct `userMessage` oriented toward end-users (no collection names, field names, or AI internals). If `userMessage` is omitted in the constructor call, it falls back to `message`. - **displayName in AI tools** — All `DynamicStructuredTool` schemas and system message prompts must use `displayName`, never `fieldName`. `displayName` is a Forest Admin frontend feature that replaces the technical field/relation/action name with a product-oriented label configured by the Forest Admin admin. End users write their workflow prompts using these display names, not the underlying technical names. After an AI tool call returns display names, map them back to `fieldName`/`name` before using them in datasource operations (e.g. filtering record values, calling `getRecord`). - **No recovery/retry** — Once the executor returns a step result to the orchestrator, the step is considered executed. There is no mechanism to re-dispatch a step, so executors must NOT include recovery checks (e.g. checking the RunStore for cached results before executing). Each step executes exactly once. diff --git a/packages/workflow-executor/src/errors.ts b/packages/workflow-executor/src/errors.ts index ae7df0b995..3c55df97e2 100644 --- a/packages/workflow-executor/src/errors.ts +++ b/packages/workflow-executor/src/errors.ts @@ -215,3 +215,26 @@ export class RunNotFoundError extends Error { if (cause !== undefined) this.cause = cause; } } + +export class PendingDataNotFoundError extends Error { + constructor(runId: string, stepIndex: number) { + super(`Step ${stepIndex} in run "${runId}" not found or has no pending data`); + this.name = 'PendingDataNotFoundError'; + } +} + +/** Minimal mirror of ZodIssue — avoids importing Zod types into errors.ts. */ +export interface ValidationIssue { + path: (string | number)[]; + message: string; + code: string; +} + +export class InvalidPendingDataError extends WorkflowExecutorError { + readonly issues: ValidationIssue[]; + + constructor(issues: ValidationIssue[]) { + super('Invalid pending data', 'The request body is invalid.'); + this.issues = issues; + } +} diff --git a/packages/workflow-executor/src/http/executor-http-server.ts b/packages/workflow-executor/src/http/executor-http-server.ts index 02674ce1f4..cae4be50cf 100644 --- a/packages/workflow-executor/src/http/executor-http-server.ts +++ b/packages/workflow-executor/src/http/executor-http-server.ts @@ -1,8 +1,6 @@ import type { Logger } from '../ports/logger-port'; -import type { RunStore } from '../ports/run-store'; import type { WorkflowPort } from '../ports/workflow-port'; import type Runner from '../runner'; -import type { StepExecutionData } from '../types/step-execution-data'; import type { Server } from 'http'; import bodyParser from '@koa/bodyparser'; @@ -11,12 +9,10 @@ import http from 'http'; import Koa from 'koa'; import koaJwt from 'koa-jwt'; -import { RunNotFoundError } from '../errors'; -import patchBodySchemas from './pending-data-validators'; +import { InvalidPendingDataError, PendingDataNotFoundError, RunNotFoundError } from '../errors'; export interface ExecutorHttpServerOptions { port: number; - runStore: RunStore; runner: Runner; authSecret: string; workflowPort: WorkflowPort; @@ -142,9 +138,7 @@ export default class ExecutorHttpServer { } private async handleGetRun(ctx: Koa.Context): Promise { - const { runId } = ctx.params; - const steps = await this.options.runStore.getStepExecutions(runId); - + const steps = await this.options.runner.getRunStepExecutions(ctx.params.runId); ctx.body = { steps }; } @@ -179,36 +173,26 @@ export default class ExecutorHttpServer { return; } - const stepExecutions = await this.options.runStore.getStepExecutions(runId); - const execution = stepExecutions.find(e => e.stepIndex === stepIndex); - const schema = execution ? patchBodySchemas[execution.type] : undefined; - - if ( - !execution || - !schema || - !('pendingData' in execution) || - execution.pendingData === undefined - ) { - ctx.status = 404; - ctx.body = { error: 'Step execution not found or has no pending data' }; + try { + await this.options.runner.patchPendingData(runId, stepIndex, ctx.request.body); + } catch (err) { + if (err instanceof PendingDataNotFoundError) { + ctx.status = 404; + ctx.body = { error: 'Step execution not found or has no pending data' }; - return; - } + return; + } - const parsed = schema.safeParse(ctx.request.body); + if (err instanceof InvalidPendingDataError) { + ctx.status = 400; + ctx.body = { error: 'Invalid request body', details: err.issues }; - if (!parsed.success) { - ctx.status = 400; - ctx.body = { error: 'Invalid request body', details: parsed.error.issues }; + return; + } - return; + throw err; } - await this.options.runStore.saveStepExecution(runId, { - ...execution, - pendingData: { ...(execution.pendingData as object), ...(parsed.data as object) }, - } as StepExecutionData); - ctx.status = 204; } } diff --git a/packages/workflow-executor/src/http/pending-data-validators.ts b/packages/workflow-executor/src/pending-data-validators.ts similarity index 95% rename from packages/workflow-executor/src/http/pending-data-validators.ts rename to packages/workflow-executor/src/pending-data-validators.ts index 9ed4cba0d0..188c766f1d 100644 --- a/packages/workflow-executor/src/http/pending-data-validators.ts +++ b/packages/workflow-executor/src/pending-data-validators.ts @@ -1,4 +1,4 @@ -import type { StepExecutionData } from '../types/step-execution-data'; +import type { StepExecutionData } from './types/step-execution-data'; import { z } from 'zod'; diff --git a/packages/workflow-executor/src/runner.ts b/packages/workflow-executor/src/runner.ts index ccb423ff50..69e01d283b 100644 --- a/packages/workflow-executor/src/runner.ts +++ b/packages/workflow-executor/src/runner.ts @@ -4,12 +4,19 @@ import type { Logger } from './ports/logger-port'; import type { RunStore } from './ports/run-store'; import type { McpConfiguration, WorkflowPort } from './ports/workflow-port'; import type { PendingStepExecution, StepExecutionResult } from './types/execution'; +import type { StepExecutionData } from './types/step-execution-data'; import type { AiClient, RemoteTool } from '@forestadmin/ai-proxy'; import ConsoleLogger from './adapters/console-logger'; -import { RunNotFoundError, causeMessage } from './errors'; +import { + InvalidPendingDataError, + PendingDataNotFoundError, + RunNotFoundError, + causeMessage, +} from './errors'; import StepExecutorFactory from './executors/step-executor-factory'; import ExecutorHttpServer from './http/executor-http-server'; +import patchBodySchemas from './pending-data-validators'; import validateSecrets from './validate-secrets'; export interface RunnerConfig { @@ -64,7 +71,6 @@ export default class Runner { if (this.config.httpPort !== undefined && !this.httpServer) { const server = new ExecutorHttpServer({ port: this.config.httpPort, - runStore: this.config.runStore, runner: this, authSecret: this.config.authSecret, workflowPort: this.config.workflowPort, @@ -102,6 +108,41 @@ export default class Runner { // TODO: graceful drain of in-flight steps (out of scope PRD-223) } + async getRunStepExecutions(runId: string): Promise { + return this.config.runStore.getStepExecutions(runId); + } + + async patchPendingData(runId: string, stepIndex: number, body: unknown): Promise { + const stepExecutions = await this.config.runStore.getStepExecutions(runId); + const execution = stepExecutions.find(e => e.stepIndex === stepIndex); + const schema = execution ? patchBodySchemas[execution.type] : undefined; + + // pendingData is typed as T | undefined; null is not expected (RunStore never persists null) + // but `== null` guards against both for safety. + if (!execution || !schema || !('pendingData' in execution) || execution.pendingData == null) { + throw new PendingDataNotFoundError(runId, stepIndex); + } + + const parsed = schema.safeParse(body); + + if (!parsed.success) { + throw new InvalidPendingDataError( + parsed.error.issues.map(({ path, message, code }) => ({ + path: path as (string | number)[], + message, + code, + })), + ); + } + + // Cast is safe: the type guard above ensures `execution` is the correct union branch, + // and patchBodySchemas[execution.type] only accepts keys valid for that branch. + await this.config.runStore.saveStepExecution(runId, { + ...execution, + pendingData: { ...(execution.pendingData as object), ...(parsed.data as object) }, + } as StepExecutionData); + } + async triggerPoll(runId: string): Promise { const step = await this.config.workflowPort.getPendingStepExecutionsForRun(runId); diff --git a/packages/workflow-executor/test/http/executor-http-server.test.ts b/packages/workflow-executor/test/http/executor-http-server.test.ts index f6034e1c68..17aabf8813 100644 --- a/packages/workflow-executor/test/http/executor-http-server.test.ts +++ b/packages/workflow-executor/test/http/executor-http-server.test.ts @@ -1,11 +1,14 @@ -import type { RunStore } from '../../src/ports/run-store'; import type { WorkflowPort } from '../../src/ports/workflow-port'; import type Runner from '../../src/runner'; import jsonwebtoken from 'jsonwebtoken'; import request from 'supertest'; -import { RunNotFoundError } from '../../src/errors'; +import { + InvalidPendingDataError, + PendingDataNotFoundError, + RunNotFoundError, +} from '../../src/errors'; import ExecutorHttpServer from '../../src/http/executor-http-server'; const AUTH_SECRET = 'test-auth-secret'; @@ -14,21 +17,13 @@ function signToken(payload: object, secret = AUTH_SECRET, options?: jsonwebtoken return jsonwebtoken.sign(payload, secret, { expiresIn: '1h', ...options }); } -function createMockRunStore(overrides: Partial = {}): RunStore { - return { - init: jest.fn().mockResolvedValue(undefined), - close: jest.fn().mockResolvedValue(undefined), - getStepExecutions: jest.fn().mockResolvedValue([]), - saveStepExecution: jest.fn().mockResolvedValue(undefined), - ...overrides, - }; -} - function createMockRunner(overrides: Partial = {}): Runner { return { start: jest.fn().mockResolvedValue(undefined), stop: jest.fn().mockResolvedValue(undefined), triggerPoll: jest.fn().mockResolvedValue(undefined), + getRunStepExecutions: jest.fn().mockResolvedValue([]), + patchPendingData: jest.fn().mockResolvedValue(undefined), ...overrides, } as unknown as Runner; } @@ -47,7 +42,6 @@ function createMockWorkflowPort(overrides: Partial = {}): Workflow function createServer( overrides: { - runStore?: RunStore; runner?: Runner; workflowPort?: WorkflowPort; logger?: { error: jest.Mock }; @@ -55,7 +49,6 @@ function createServer( ) { return new ExecutorHttpServer({ port: 0, - runStore: overrides.runStore ?? createMockRunStore(), runner: overrides.runner ?? createMockRunner(), authSecret: AUTH_SECRET, workflowPort: overrides.workflowPort ?? createMockWorkflowPort(), @@ -240,17 +233,17 @@ describe('ExecutorHttpServer', () => { ); }); - it('does not call getStepExecutions when hasRunAccess returns false', async () => { - const runStore = createMockRunStore(); + it('does not call getRunStepExecutions when hasRunAccess returns false', async () => { + const runner = createMockRunner(); const workflowPort = createMockWorkflowPort({ hasRunAccess: jest.fn().mockResolvedValue(false), }); - const server = createServer({ runStore, workflowPort }); + const server = createServer({ runner, workflowPort }); const token = signToken({ id: 'user-1' }); await request(server.callback).get('/runs/run-1').set('Authorization', `Bearer ${token}`); - expect(runStore.getStepExecutions).not.toHaveBeenCalled(); + expect(runner.getRunStepExecutions).not.toHaveBeenCalled(); }); it('does not call triggerPoll when hasRunAccess returns false', async () => { @@ -286,14 +279,14 @@ describe('ExecutorHttpServer', () => { }); describe('GET /runs/:runId', () => { - it('should return steps from the run store', async () => { + it('should return steps from the runner', async () => { const steps = [{ type: 'condition' as const, stepIndex: 0 }]; - const runStore = createMockRunStore({ - getStepExecutions: jest.fn().mockResolvedValue(steps), + const runner = createMockRunner({ + getRunStepExecutions: jest.fn().mockResolvedValue(steps), }); - const server = createServer({ runStore }); + const server = createServer({ runner }); const token = signToken({ id: 'user-1' }); const response = await request(server.callback) @@ -302,15 +295,15 @@ describe('ExecutorHttpServer', () => { expect(response.status).toBe(200); expect(response.body).toEqual({ steps }); - expect(runStore.getStepExecutions).toHaveBeenCalledWith('run-1'); + expect(runner.getRunStepExecutions).toHaveBeenCalledWith('run-1'); }); - it('should return 500 when getStepExecutions rejects', async () => { - const runStore = createMockRunStore({ - getStepExecutions: jest.fn().mockRejectedValue(new Error('db error')), + it('should return 500 when getRunStepExecutions rejects', async () => { + const runner = createMockRunner({ + getRunStepExecutions: jest.fn().mockRejectedValue(new Error('db error')), }); - const server = createServer({ runStore }); + const server = createServer({ runner }); const token = signToken({ id: 'user-1' }); const response = await request(server.callback) @@ -371,17 +364,11 @@ describe('ExecutorHttpServer', () => { }); describe('PATCH /runs/:runId/steps/:stepIndex/pending-data', () => { - it('returns 204 and merges userConfirmed:true into pendingData', async () => { - const existing = { - type: 'update-record' as const, - stepIndex: 2, - pendingData: { fieldName: 'status', value: 'active' }, - }; - const runStore = createMockRunStore({ - getStepExecutions: jest.fn().mockResolvedValue([existing]), - saveStepExecution: jest.fn().mockResolvedValue(undefined), + it('returns 204 when patchPendingData succeeds', async () => { + const runner = createMockRunner({ + patchPendingData: jest.fn().mockResolvedValue(undefined), }); - const server = createServer({ runStore }); + const server = createServer({ runner }); const token = signToken({ id: 'user-1' }); const response = await request(server.callback) @@ -390,46 +377,14 @@ describe('ExecutorHttpServer', () => { .send({ userConfirmed: true }); expect(response.status).toBe(204); - expect(runStore.saveStepExecution).toHaveBeenCalledWith( - 'run-1', - expect.objectContaining({ - pendingData: { fieldName: 'status', value: 'active', userConfirmed: true }, - }), - ); - }); - - it('returns 204 and merges userConfirmed:false into pendingData', async () => { - const existing = { - type: 'trigger-action' as const, - stepIndex: 0, - pendingData: { name: 'send_email', displayName: 'Send Email' }, - }; - const runStore = createMockRunStore({ - getStepExecutions: jest.fn().mockResolvedValue([existing]), - saveStepExecution: jest.fn().mockResolvedValue(undefined), - }); - const server = createServer({ runStore }); - const token = signToken({ id: 'user-1' }); - - const response = await request(server.callback) - .patch('/runs/run-1/steps/0/pending-data') - .set('Authorization', `Bearer ${token}`) - .send({ userConfirmed: false }); - - expect(response.status).toBe(204); - expect(runStore.saveStepExecution).toHaveBeenCalledWith( - 'run-1', - expect.objectContaining({ - pendingData: { name: 'send_email', displayName: 'Send Email', userConfirmed: false }, - }), - ); + expect(runner.patchPendingData).toHaveBeenCalledWith('run-1', 2, { userConfirmed: true }); }); - it('returns 404 when step execution does not exist', async () => { - const runStore = createMockRunStore({ - getStepExecutions: jest.fn().mockResolvedValue([]), + it('returns 404 when patchPendingData throws PendingDataNotFoundError', async () => { + const runner = createMockRunner({ + patchPendingData: jest.fn().mockRejectedValue(new PendingDataNotFoundError('run-1', 0)), }); - const server = createServer({ runStore }); + const server = createServer({ runner }); const token = signToken({ id: 'user-1' }); const response = await request(server.callback) @@ -441,67 +396,14 @@ describe('ExecutorHttpServer', () => { expect(response.body).toEqual({ error: 'Step execution not found or has no pending data' }); }); - it('returns 404 when step type does not support pending-data confirmation', async () => { - const existing = { type: 'condition' as const, stepIndex: 1 }; - const runStore = createMockRunStore({ - getStepExecutions: jest.fn().mockResolvedValue([existing]), - }); - const server = createServer({ runStore }); - const token = signToken({ id: 'user-1' }); - - const response = await request(server.callback) - .patch('/runs/run-1/steps/1/pending-data') - .set('Authorization', `Bearer ${token}`) - .send({ userConfirmed: true }); - - expect(response.status).toBe(404); - expect(response.body).toEqual({ error: 'Step execution not found or has no pending data' }); - }); - - it('returns 400 when stepIndex is not a valid integer', async () => { - const server = createServer(); - const token = signToken({ id: 'user-1' }); - - const response = await request(server.callback) - .patch('/runs/run-1/steps/abc/pending-data') - .set('Authorization', `Bearer ${token}`) - .send({ userConfirmed: true }); - - expect(response.status).toBe(400); - expect(response.body).toEqual({ error: 'Invalid stepIndex' }); - }); - - it('returns 400 when body contains unknown fields', async () => { - const existing = { - type: 'update-record' as const, - stepIndex: 0, - pendingData: { name: 'status', displayName: 'Status', value: 'active' }, - }; - const runStore = createMockRunStore({ - getStepExecutions: jest.fn().mockResolvedValue([existing]), - }); - const server = createServer({ runStore }); - const token = signToken({ id: 'user-1' }); - - const response = await request(server.callback) - .patch('/runs/run-1/steps/0/pending-data') - .set('Authorization', `Bearer ${token}`) - .send({ userConfirmed: true, extra: 'injection' }); - - expect(response.status).toBe(400); - expect(response.body).toEqual(expect.objectContaining({ error: 'Invalid request body' })); - }); - - it('returns 400 when userConfirmed is not a boolean', async () => { - const existing = { - type: 'update-record' as const, - stepIndex: 0, - pendingData: { name: 'status', displayName: 'Status', value: 'active' }, - }; - const runStore = createMockRunStore({ - getStepExecutions: jest.fn().mockResolvedValue([existing]), + it('returns 400 with details when patchPendingData throws InvalidPendingDataError', async () => { + const issues = [ + { path: ['userConfirmed'], message: 'Expected boolean', code: 'invalid_type' }, + ]; + const runner = createMockRunner({ + patchPendingData: jest.fn().mockRejectedValue(new InvalidPendingDataError(issues)), }); - const server = createServer({ runStore }); + const server = createServer({ runner }); const token = signToken({ id: 'user-1' }); const response = await request(server.callback) @@ -510,257 +412,29 @@ describe('ExecutorHttpServer', () => { .send({ userConfirmed: 'yes' }); expect(response.status).toBe(400); - expect(response.body).toEqual(expect.objectContaining({ error: 'Invalid request body' })); + expect(response.body).toEqual({ error: 'Invalid request body', details: issues }); }); - it('update-record: accepts value override', async () => { - const existing = { - type: 'update-record' as const, - stepIndex: 0, - pendingData: { fieldName: 'status', value: 'old_value' }, - }; - const runStore = createMockRunStore({ - getStepExecutions: jest.fn().mockResolvedValue([existing]), - saveStepExecution: jest.fn().mockResolvedValue(undefined), - }); - const server = createServer({ runStore }); - const token = signToken({ id: 'user-1' }); - - const response = await request(server.callback) - .patch('/runs/run-1/steps/0/pending-data') - .set('Authorization', `Bearer ${token}`) - .send({ userConfirmed: true, value: 'new_value' }); - - expect(response.status).toBe(204); - expect(runStore.saveStepExecution).toHaveBeenCalledWith( - 'run-1', - expect.objectContaining({ - pendingData: expect.objectContaining({ value: 'new_value', userConfirmed: true }), - }), - ); - }); - - it('update-record: rejects unknown field', async () => { - const existing = { - type: 'update-record' as const, - stepIndex: 0, - pendingData: { fieldName: 'status', value: 'active' }, - }; - const runStore = createMockRunStore({ - getStepExecutions: jest.fn().mockResolvedValue([existing]), - }); - const server = createServer({ runStore }); - const token = signToken({ id: 'user-1' }); - - const response = await request(server.callback) - .patch('/runs/run-1/steps/0/pending-data') - .set('Authorization', `Bearer ${token}`) - .send({ userConfirmed: true, name: 'hack' }); - - expect(response.status).toBe(400); - expect(response.body).toEqual(expect.objectContaining({ error: 'Invalid request body' })); - }); - - it('load-related-record: accepts selectedRecordId override', async () => { - const existing = { - type: 'load-related-record' as const, - stepIndex: 1, - pendingData: { - name: 'order', - displayName: 'Order', - selectedRecordId: [99], - suggestedFields: [], - }, - selectedRecordRef: { collectionName: 'customers', recordId: [42], stepIndex: 0 }, - }; - const runStore = createMockRunStore({ - getStepExecutions: jest.fn().mockResolvedValue([existing]), - saveStepExecution: jest.fn().mockResolvedValue(undefined), - }); - const server = createServer({ runStore }); - const token = signToken({ id: 'user-1' }); - - const response = await request(server.callback) - .patch('/runs/run-1/steps/1/pending-data') - .set('Authorization', `Bearer ${token}`) - .send({ userConfirmed: true, selectedRecordId: ['42'] }); - - expect(response.status).toBe(204); - expect(runStore.saveStepExecution).toHaveBeenCalledWith( - 'run-1', - expect.objectContaining({ - pendingData: expect.objectContaining({ selectedRecordId: ['42'], userConfirmed: true }), - }), - ); - }); - - it('load-related-record: accepts relation override (name + displayName)', async () => { - const existing = { - type: 'load-related-record' as const, - stepIndex: 1, - pendingData: { - name: 'order', - displayName: 'Order', - selectedRecordId: [99], - suggestedFields: [], - }, - selectedRecordRef: { collectionName: 'customers', recordId: [42], stepIndex: 0 }, - }; - const runStore = createMockRunStore({ - getStepExecutions: jest.fn().mockResolvedValue([existing]), - saveStepExecution: jest.fn().mockResolvedValue(undefined), - }); - const server = createServer({ runStore }); - const token = signToken({ id: 'user-1' }); - - const response = await request(server.callback) - .patch('/runs/run-1/steps/1/pending-data') - .set('Authorization', `Bearer ${token}`) - .send({ userConfirmed: true, name: 'orders', displayName: 'Orders' }); - - expect(response.status).toBe(204); - expect(runStore.saveStepExecution).toHaveBeenCalledWith( - 'run-1', - expect.objectContaining({ - pendingData: expect.objectContaining({ name: 'orders', displayName: 'Orders' }), - }), - ); - }); - - it('load-related-record: rejects empty selectedRecordId', async () => { - const existing = { - type: 'load-related-record' as const, - stepIndex: 1, - pendingData: { - name: 'order', - displayName: 'Order', - selectedRecordId: [99], - suggestedFields: [], - }, - selectedRecordRef: { collectionName: 'customers', recordId: [42], stepIndex: 0 }, - }; - const runStore = createMockRunStore({ - getStepExecutions: jest.fn().mockResolvedValue([existing]), - }); - const server = createServer({ runStore }); - const token = signToken({ id: 'user-1' }); - - const response = await request(server.callback) - .patch('/runs/run-1/steps/1/pending-data') - .set('Authorization', `Bearer ${token}`) - .send({ userConfirmed: true, selectedRecordId: [] }); - - expect(response.status).toBe(400); - expect(response.body).toEqual(expect.objectContaining({ error: 'Invalid request body' })); - }); - - it('load-related-record: rejects relatedCollectionName (internal field)', async () => { - const existing = { - type: 'load-related-record' as const, - stepIndex: 1, - pendingData: { - name: 'order', - displayName: 'Order', - selectedRecordId: [99], - suggestedFields: [], - }, - selectedRecordRef: { collectionName: 'customers', recordId: [42], stepIndex: 0 }, - }; - const runStore = createMockRunStore({ - getStepExecutions: jest.fn().mockResolvedValue([existing]), - }); - const server = createServer({ runStore }); - const token = signToken({ id: 'user-1' }); - - const response = await request(server.callback) - .patch('/runs/run-1/steps/1/pending-data') - .set('Authorization', `Bearer ${token}`) - .send({ userConfirmed: true, relatedCollectionName: 'Order' }); - - expect(response.status).toBe(400); - expect(response.body).toEqual(expect.objectContaining({ error: 'Invalid request body' })); - }); - - it('trigger-action: rejects extra field', async () => { - const existing = { - type: 'trigger-action' as const, - stepIndex: 0, - pendingData: { name: 'send_email', displayName: 'Send Email' }, - }; - const runStore = createMockRunStore({ - getStepExecutions: jest.fn().mockResolvedValue([existing]), - }); - const server = createServer({ runStore }); - const token = signToken({ id: 'user-1' }); - - const response = await request(server.callback) - .patch('/runs/run-1/steps/0/pending-data') - .set('Authorization', `Bearer ${token}`) - .send({ userConfirmed: true, name: 'other_action' }); - - expect(response.status).toBe(400); - expect(response.body).toEqual(expect.objectContaining({ error: 'Invalid request body' })); - }); - - it('mcp-task: accepts only userConfirmed and returns 204', async () => { - const existing = { - type: 'mcp-task' as const, - stepIndex: 0, - pendingData: { name: 'send_report', input: {} }, - }; - const runStore = createMockRunStore({ - getStepExecutions: jest.fn().mockResolvedValue([existing]), - saveStepExecution: jest.fn().mockResolvedValue(undefined), - }); - const server = createServer({ runStore }); + it('returns 400 when stepIndex is not a valid integer', async () => { + const runner = createMockRunner(); + const server = createServer({ runner }); const token = signToken({ id: 'user-1' }); const response = await request(server.callback) - .patch('/runs/run-1/steps/0/pending-data') + .patch('/runs/run-1/steps/abc/pending-data') .set('Authorization', `Bearer ${token}`) .send({ userConfirmed: true }); - expect(response.status).toBe(204); - expect(runStore.saveStepExecution).toHaveBeenCalledWith( - 'run-1', - expect.objectContaining({ - pendingData: expect.objectContaining({ userConfirmed: true }), - }), - ); - }); - - it('mcp-task: rejects extra field', async () => { - const existing = { - type: 'mcp-task' as const, - stepIndex: 0, - pendingData: { name: 'send_report', input: {} }, - }; - const runStore = createMockRunStore({ - getStepExecutions: jest.fn().mockResolvedValue([existing]), - }); - const server = createServer({ runStore }); - const token = signToken({ id: 'user-1' }); - - const response = await request(server.callback) - .patch('/runs/run-1/steps/0/pending-data') - .set('Authorization', `Bearer ${token}`) - .send({ userConfirmed: true, name: 'override' }); - expect(response.status).toBe(400); - expect(response.body).toEqual(expect.objectContaining({ error: 'Invalid request body' })); + expect(response.body).toEqual({ error: 'Invalid stepIndex' }); + expect(runner.patchPendingData).not.toHaveBeenCalled(); }); - it('returns 500 when saveStepExecution rejects', async () => { - const existing = { - type: 'update-record' as const, - stepIndex: 0, - pendingData: { fieldName: 'status', value: 'active' }, - }; - const runStore = createMockRunStore({ - getStepExecutions: jest.fn().mockResolvedValue([existing]), - saveStepExecution: jest.fn().mockRejectedValue(new Error('disk full')), + it('returns 500 when patchPendingData throws an unexpected error', async () => { + const runner = createMockRunner({ + patchPendingData: jest.fn().mockRejectedValue(new Error('disk full')), }); - const server = createServer({ runStore }); + const server = createServer({ runner }); const token = signToken({ id: 'user-1' }); const response = await request(server.callback) diff --git a/packages/workflow-executor/test/runner.test.ts b/packages/workflow-executor/test/runner.test.ts index 92c595768b..462bdf09d0 100644 --- a/packages/workflow-executor/test/runner.test.ts +++ b/packages/workflow-executor/test/runner.test.ts @@ -7,7 +7,12 @@ import type { PendingStepExecution } from '../src/types/execution'; import type { StepDefinition } from '../src/types/step-definition'; import type { AiClient, BaseChatModel } from '@forestadmin/ai-proxy'; -import { ConfigurationError, RunNotFoundError } from '../src/errors'; +import { + ConfigurationError, + InvalidPendingDataError, + PendingDataNotFoundError, + RunNotFoundError, +} from '../src/errors'; import BaseStepExecutor from '../src/executors/base-step-executor'; import ConditionStepExecutor from '../src/executors/condition-step-executor'; import LoadRelatedRecordStepExecutor from '../src/executors/load-related-record-step-executor'; @@ -62,9 +67,20 @@ function createMockLogger(): jest.Mocked { const VALID_ENV_SECRET = 'a'.repeat(64); const VALID_AUTH_SECRET = 'test-auth-secret'; +function createMockRunStore(overrides: Partial = {}): jest.Mocked { + return { + init: jest.fn().mockResolvedValue(undefined), + close: jest.fn().mockResolvedValue(undefined), + getStepExecutions: jest.fn().mockResolvedValue([]), + saveStepExecution: jest.fn().mockResolvedValue(undefined), + ...overrides, + } as jest.Mocked; +} + function createRunnerConfig( overrides: Partial<{ workflowPort: WorkflowPort; + runStore: RunStore; aiClient: AiClient; logger: Logger; httpPort: number; @@ -168,7 +184,6 @@ describe('start', () => { expect(MockedExecutorHttpServer).toHaveBeenCalledWith({ port: 3100, - runStore: config.runStore, runner, authSecret: VALID_AUTH_SECRET, workflowPort: config.workflowPort, @@ -841,3 +856,171 @@ describe('error handling', () => { expect(workflowPort.getPendingStepExecutions).toHaveBeenCalledTimes(2); }); }); + +// --------------------------------------------------------------------------- +// getRunStepExecutions +// --------------------------------------------------------------------------- + +describe('getRunStepExecutions', () => { + it('delegates to runStore.getStepExecutions and returns the result', async () => { + const steps = [{ type: 'condition' as const, stepIndex: 0 }]; + const runStore = createMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue(steps), + }); + runner = new Runner(createRunnerConfig({ runStore })); + + const result = await runner.getRunStepExecutions('run-1'); + + expect(result).toEqual(steps); + expect(runStore.getStepExecutions).toHaveBeenCalledWith('run-1'); + }); +}); + +// --------------------------------------------------------------------------- +// patchPendingData +// --------------------------------------------------------------------------- + +describe('patchPendingData', () => { + it('throws PendingDataNotFoundError when step is not found', async () => { + const runStore = createMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([]) }); + runner = new Runner(createRunnerConfig({ runStore })); + + await expect(runner.patchPendingData('run-1', 0, { userConfirmed: true })).rejects.toThrow( + PendingDataNotFoundError, + ); + }); + + it('throws PendingDataNotFoundError when step has no pendingData', async () => { + const runStore = createMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([{ type: 'update-record', stepIndex: 0 }]), + }); + runner = new Runner(createRunnerConfig({ runStore })); + + await expect(runner.patchPendingData('run-1', 0, { userConfirmed: true })).rejects.toThrow( + PendingDataNotFoundError, + ); + }); + + it('throws PendingDataNotFoundError when step type has no schema (e.g. condition)', async () => { + const runStore = createMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([{ type: 'condition', stepIndex: 0 }]), + }); + runner = new Runner(createRunnerConfig({ runStore })); + + await expect(runner.patchPendingData('run-1', 0, { userConfirmed: true })).rejects.toThrow( + PendingDataNotFoundError, + ); + }); + + it('throws InvalidPendingDataError with mapped issues when body fails Zod validation', async () => { + const runStore = createMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([ + { + type: 'update-record', + stepIndex: 0, + pendingData: { fieldName: 'status', value: 'active' }, + }, + ]), + }); + runner = new Runner(createRunnerConfig({ runStore })); + + const error = await runner.patchPendingData('run-1', 0, { userConfirmed: 'yes' }).catch(e => e); + + expect(error).toBeInstanceOf(InvalidPendingDataError); + expect(error.issues).toEqual( + expect.arrayContaining([ + expect.objectContaining({ path: ['userConfirmed'], code: expect.any(String) }), + ]), + ); + }); + + it('throws InvalidPendingDataError when body contains unknown fields', async () => { + const runStore = createMockRunStore({ + getStepExecutions: jest + .fn() + .mockResolvedValue([ + { type: 'trigger-action', stepIndex: 0, pendingData: { name: 'send_email' } }, + ]), + }); + runner = new Runner(createRunnerConfig({ runStore })); + + await expect( + runner.patchPendingData('run-1', 0, { userConfirmed: true, extra: 'field' }), + ).rejects.toThrow(InvalidPendingDataError); + }); + + it('update-record: merges value override into pendingData and calls saveStepExecution', async () => { + const existing = { + type: 'update-record' as const, + stepIndex: 0, + pendingData: { fieldName: 'status', value: 'old_value' }, + }; + const runStore = createMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([existing]), + }); + runner = new Runner(createRunnerConfig({ runStore })); + + await runner.patchPendingData('run-1', 0, { userConfirmed: true, value: 'new_value' }); + + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + type: 'update-record', + stepIndex: 0, + pendingData: { fieldName: 'status', value: 'new_value', userConfirmed: true }, + }), + ); + }); + + it('load-related-record: merges selectedRecordId override correctly', async () => { + const existing = { + type: 'load-related-record' as const, + stepIndex: 1, + pendingData: { + name: 'order', + displayName: 'Order', + selectedRecordId: [99], + suggestedFields: [], + }, + selectedRecordRef: { collectionName: 'customers', recordId: [42], stepIndex: 0 }, + }; + const runStore = createMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([existing]), + }); + runner = new Runner(createRunnerConfig({ runStore })); + + await runner.patchPendingData('run-1', 1, { userConfirmed: true, selectedRecordId: ['42'] }); + + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + pendingData: expect.objectContaining({ selectedRecordId: ['42'], userConfirmed: true }), + }), + ); + }); + + it('trigger-action: merges userConfirmed:true only, rejects extra field', async () => { + const existing = { + type: 'trigger-action' as const, + stepIndex: 0, + pendingData: { name: 'send_email', displayName: 'Send Email' }, + }; + const runStore = createMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([existing]), + }); + runner = new Runner(createRunnerConfig({ runStore })); + + await runner.patchPendingData('run-1', 0, { userConfirmed: true }); + + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + pendingData: expect.objectContaining({ userConfirmed: true }), + }), + ); + + await expect( + runner.patchPendingData('run-1', 0, { userConfirmed: true, name: 'override' }), + ).rejects.toThrow(InvalidPendingDataError); + }); +}); From cf7d069b10d7e2a8cfd1281715f12b679d324f57 Mon Sep 17 00:00:00 2001 From: scra Date: Wed, 25 Mar 2026 17:46:18 +0100 Subject: [PATCH 17/18] docs(workflow-executor): update contract to match implementation (#1511) --- WORKFLOW-EXECUTOR-CONTRACT.md | 110 +++++++++++++++++++--------------- 1 file changed, 61 insertions(+), 49 deletions(-) diff --git a/WORKFLOW-EXECUTOR-CONTRACT.md b/WORKFLOW-EXECUTOR-CONTRACT.md index 1313a0b13f..ee68b81ae3 100644 --- a/WORKFLOW-EXECUTOR-CONTRACT.md +++ b/WORKFLOW-EXECUTOR-CONTRACT.md @@ -1,7 +1,7 @@ # Workflow Executor — Contract Types > Types exchanged between the **orchestrator (server)**, the **executor (agent-nodejs)**, and the **frontend**. -> Last updated: 2026-03-24 +> Last updated: 2026-03-25 --- @@ -19,7 +19,6 @@ interface PendingStepExecution { baseRecordRef: RecordRef; stepDefinition: StepDefinition; previousSteps: Step[]; - userConfirmed?: boolean; // true = user confirmed a pending action on this step } ``` @@ -128,85 +127,90 @@ interface McpTaskStepOutcome { After executing a step, the executor posts the outcome back to the server. The body is one of the `StepOutcome` shapes above. -> ⚠️ **NEVER contains client data** (field values, AI reasoning, etc.) — those stay in the `RunStore` on the client side. +> **NEVER contains client data** (field values, AI reasoning, etc.) — those stay in the `RunStore` on the client side. --- ## 3. Pending Data -Steps that require user input pause with `status: "awaiting-input"`. The frontend writes `pendingData` to unblock them via a dedicated endpoint on the executor HTTP server. +Steps that require user input pause with `status: "awaiting-input"`. The executor writes its AI-selected data to `pendingData` in the RunStore. The frontend can then override fields and confirm via the pending-data endpoint. -> **TODO** — The pending-data write endpoint is not yet implemented. Route, method, and per-step-type body shapes are TBD (PRD-240). +**`PATCH /runs/:runId/steps/:stepIndex/pending-data`** -Once written, the frontend calls `POST /runs/:runId/trigger` and the executor resumes with `userConfirmed: true`. +The frontend writes user overrides + confirmation to the executor HTTP server. Request bodies are validated per step type with strict Zod schemas — unknown fields are rejected with `400`. + +Once written, the frontend calls `POST /runs/:runId/trigger`. On the next execution, the executor reads `pendingData` from the RunStore and checks `userConfirmed`: +- `undefined` → returns `awaiting-input` again (the step is not yet actionable) +- `true` → execute the confirmed action +- `false` → skip the step (mark as success) ### update-record — user picks a field + value to write -> **TODO** — Pending-data write endpoint TBD (PRD-240). +The executor writes the AI's field selection to `pendingData`. The frontend can override `value` and confirm. +Stored in RunStore: ```typescript interface UpdateRecordPendingData { - name: string; // technical field name - displayName: string; // label shown in the UI - value: string; // value chosen by the user + name: string; // technical field name (set by executor) + displayName: string; // label shown in the UI (set by executor) + value: string; // AI-proposed value; overridable by frontend + userConfirmed?: boolean; // set by frontend via PATCH } ``` -### trigger-action — user confirmation only +PATCH request body: +```typescript +{ + userConfirmed: boolean; + value?: string; // optional override of AI-proposed value +} +``` -No payload required from the frontend. The executor selects the action and writes `pendingData` itself (action name + displayName) to the RunStore. The frontend just confirms: +### trigger-action & mcp-task — user confirmation only -``` -POST /runs/:runId/trigger +The executor selects the action (or MCP tool) and writes `pendingData` to the RunStore. The frontend cannot override any executor-selected data — it only confirms or rejects. + +PATCH request body (same for both types): +```typescript +{ + userConfirmed: boolean; +} ``` ### load-related-record — user picks the relation and/or the record -The frontend can override **both** the relation (field) and the selected record. - -> **Current status** — The frontend cannot yet override the AI selection. The executor HTTP server does not yet expose the pending-data write endpoint. Until it is implemented, the executor writes the AI's pick directly into `selectedRecordId`. +The executor writes the AI's relation selection to `pendingData`. The frontend can override the relation, the selected record, or both. +Stored in RunStore: ```typescript -// Written by the executor; overwritable by the frontend via the pending-data endpoint (TBD) interface LoadRelatedRecordPendingData { - name: string; // technical relation name - displayName: string; // label shown in the UI - relatedCollectionName: string; // collection of the related record - suggestedFields?: string[]; // fields suggested for display - selectedRecordId: Array; // AI's pick; overwritten by the frontend via the pending-data endpoint + name: string; // technical relation name + displayName: string; // label shown in the UI + suggestedFields?: string[]; // fields suggested for display (set by executor) + selectedRecordId: Array; // AI's pick; overridable by frontend + userConfirmed?: boolean; // set by frontend via PATCH } ``` -The executor initially writes the AI's pick into `selectedRecordId`. The pending-data endpoint overwrites it (and optionally `name`, `displayName`, `relatedCollectionName`) when the user changes the selection. - -#### Future endpoint — pending-data write (not yet implemented) - -> **TODO** — Route and method TBD (PRD-240). - -Request body: +> `relatedCollectionName` is **not** stored in `pendingData` — the executor re-derives it from the `FieldSchema` at execution time using the (possibly overridden) relation `name`. +PATCH request body: ```typescript { - selectedRecordId?: Array; // record chosen by the user - name?: string; // relation changed - displayName?: string; // relation changed - relatedCollectionName?: string; // required if name is provided + userConfirmed: boolean; + name?: string; // override relation + displayName?: string; // override relation label + selectedRecordId?: Array; // override selected record (min 1 element) } ``` -Response: `204 No Content`. - -The frontend calls this endpoint **before** `POST /runs/:runId/trigger`. On the next poll, `userConfirmed: true` and the executor reads `selectedRecordId` from the RunStore. +### Responses -### mcp-task — user confirmation only - -No payload required from the frontend. The executor selects the tool and writes `pendingData` itself (tool name + input) to the RunStore. The frontend just confirms: - -``` -POST /runs/:runId/trigger -``` - -The executor resumes with `userConfirmed: true` and executes the pre-selected tool. +| Status | Meaning | +|---|---| +| `204 No Content` | Pending data updated successfully | +| `400` | Invalid body — type mismatch, unknown fields, or empty `selectedRecordId` | +| `404` | Step not found, no `pendingData`, or step type does not support confirmation | --- @@ -222,11 +226,19 @@ Orchestrator ──► GET pending?runId=X ──► Executor │ │ status: awaiting-input POST /complete │ (StepOutcome) - Frontend writes pendingData - to executor HTTP server TODO: route TBD + │ + Executor writes pendingData + to RunStore (AI selection) + │ + Frontend reads pendingData + via GET /runs/:runId + │ + Frontend overrides + confirms + PATCH /runs/:runId/steps/:stepIndex/pending-data + { userConfirmed: true/false } → 204 │ POST /runs/:runId/trigger - (next poll: userConfirmed = true) │ Executor resumes + (reads userConfirmed from pendingData) ``` From cf8e699b1184084cdd712926de21cbdd928d39b4 Mon Sep 17 00:00:00 2001 From: scra Date: Thu, 26 Mar 2026 15:38:16 +0100 Subject: [PATCH 18/18] feat(workflow-executor): add buildInMemoryExecutor and buildDatabaseExecutor factories (#1510) --- WORKFLOW-EXECUTOR-CONTRACT.md | 45 +- .../src/adapters/agent-client-agent-port.ts | 96 ++- .../adapters/forest-server-workflow-port.ts | 6 +- .../src/build-workflow-executor.ts | 85 ++ packages/workflow-executor/src/errors.ts | 7 + .../src/executors/base-step-executor.ts | 8 +- .../src/executors/condition-step-executor.ts | 9 +- .../load-related-record-step-executor.ts | 30 +- .../executors/read-record-step-executor.ts | 13 +- .../src/executors/safe-agent-port.ts | 17 +- .../src/executors/step-executor-factory.ts | 3 + .../trigger-record-action-step-executor.ts | 13 +- .../executors/update-record-step-executor.ts | 13 +- .../src/http/executor-http-server.ts | 130 +-- packages/workflow-executor/src/index.ts | 8 + .../workflow-executor/src/ports/agent-port.ts | 9 +- .../src/ports/workflow-port.ts | 4 +- packages/workflow-executor/src/runner.ts | 44 +- .../workflow-executor/src/schema-cache.ts | 45 + .../workflow-executor/src/types/execution.ts | 16 + .../workflow-executor/src/types/record.ts | 1 + .../src/types/step-outcome.ts | 7 +- .../adapters/agent-client-agent-port.test.ts | 234 ++++-- .../forest-server-workflow-port.test.ts | 12 +- .../test/build-workflow-executor.test.ts | 206 +++++ .../test/executors/base-step-executor.test.ts | 13 + .../executors/condition-step-executor.test.ts | 21 +- .../load-related-record-step-executor.test.ts | 63 +- .../executors/mcp-task-step-executor.test.ts | 13 + .../read-record-step-executor.test.ts | 31 +- .../test/executors/safe-agent-port.test.ts | 69 +- ...rigger-record-action-step-executor.test.ts | 73 +- .../update-record-step-executor.test.ts | 40 +- .../test/http/executor-http-server.test.ts | 203 ++--- .../integration/workflow-execution.test.ts | 783 ++++++++++++++++++ .../workflow-executor/test/runner.test.ts | 164 +++- .../test/schema-cache.test.ts | 131 +++ 37 files changed, 2156 insertions(+), 509 deletions(-) create mode 100644 packages/workflow-executor/src/build-workflow-executor.ts create mode 100644 packages/workflow-executor/src/schema-cache.ts create mode 100644 packages/workflow-executor/test/build-workflow-executor.test.ts create mode 100644 packages/workflow-executor/test/integration/workflow-execution.test.ts create mode 100644 packages/workflow-executor/test/schema-cache.test.ts diff --git a/WORKFLOW-EXECUTOR-CONTRACT.md b/WORKFLOW-EXECUTOR-CONTRACT.md index ee68b81ae3..f9378428f6 100644 --- a/WORKFLOW-EXECUTOR-CONTRACT.md +++ b/WORKFLOW-EXECUTOR-CONTRACT.md @@ -1,7 +1,7 @@ # Workflow Executor — Contract Types > Types exchanged between the **orchestrator (server)**, the **executor (agent-nodejs)**, and the **frontend**. -> Last updated: 2026-03-25 +> Last updated: 2026-03-26 --- @@ -12,6 +12,18 @@ The executor polls for the current pending step of a run. The server must return **one object** (not an array), or `null` if the run is not found. ```typescript +interface StepUser { + id: number; + email: string; + firstName: string; + lastName: string; + team: string; + renderingId: number; + role: string; + permissionLevel: string; + tags: Record; +} + interface PendingStepExecution { runId: string; stepId: string; @@ -19,11 +31,40 @@ interface PendingStepExecution { baseRecordRef: RecordRef; stepDefinition: StepDefinition; previousSteps: Step[]; + user: StepUser; // identity of the user who initiated the step } ``` > **`null` response** → executor throws `RunNotFoundError` → HTTP 404 returned to caller. +### CollectionSchema + +Schema of a collection, returned by the orchestrator via `GET /liana/v1/collections/:collectionName`. Used by the executor to resolve primary keys and action endpoints. + +```typescript +interface CollectionSchema { + collectionName: string; + collectionDisplayName: string; + primaryKeyFields: string[]; + fields: FieldSchema[]; + actions: ActionSchema[]; +} + +interface FieldSchema { + fieldName: string; + displayName: string; + isRelationship: boolean; + relationType?: "BelongsTo" | "HasMany" | "HasOne"; + relatedCollectionName?: string; +} + +interface ActionSchema { + name: string; + displayName: string; + endpoint: string; // route path used by the agent to execute the action +} +``` + ### RecordRef Lightweight pointer to a specific record. @@ -97,7 +138,7 @@ interface ConditionStepOutcome { type: "condition"; stepId: string; stepIndex: number; - status: "success" | "error" | "manual-decision"; + status: "success" | "error"; selectedOption?: string; // present when status = "success" error?: string; // present when status = "error" } diff --git a/packages/workflow-executor/src/adapters/agent-client-agent-port.ts b/packages/workflow-executor/src/adapters/agent-client-agent-port.ts index 963e7de4be..52bf31be84 100644 --- a/packages/workflow-executor/src/adapters/agent-client-agent-port.ts +++ b/packages/workflow-executor/src/adapters/agent-client-agent-port.ts @@ -5,8 +5,13 @@ import type { GetRelatedDataQuery, UpdateRecordQuery, } from '../ports/agent-port'; -import type { CollectionSchema } from '../types/record'; -import type { RemoteAgentClient, SelectOptions } from '@forestadmin/agent-client'; +import type SchemaCache from '../schema-cache'; +import type { StepUser } from '../types/execution'; +import type { CollectionSchema, RecordData } from '../types/record'; +import type { SelectOptions } from '@forestadmin/agent-client'; + +import { createRemoteAgentClient } from '@forestadmin/agent-client'; +import jsonwebtoken from 'jsonwebtoken'; import { RecordNotFoundError } from '../errors'; @@ -41,20 +46,20 @@ function extractRecordId( } export default class AgentClientAgentPort implements AgentPort { - private readonly client: RemoteAgentClient; - private readonly collectionSchemas: Record; - - constructor(params: { - client: RemoteAgentClient; - collectionSchemas: Record; - }) { - this.client = params.client; - this.collectionSchemas = params.collectionSchemas; + private readonly agentUrl: string; + private readonly authSecret: string; + private readonly schemaCache: SchemaCache; + + constructor(params: { agentUrl: string; authSecret: string; schemaCache: SchemaCache }) { + this.agentUrl = params.agentUrl; + this.authSecret = params.authSecret; + this.schemaCache = params.schemaCache; } - async getRecord({ collection, id, fields }: GetRecordQuery) { + async getRecord({ collection, id, fields }: GetRecordQuery, user: StepUser): Promise { + const client = this.createClient(user); const schema = this.resolveSchema(collection); - const records = await this.client.collection(collection).list>({ + const records = await client.collection(collection).list>({ filters: buildPkFilter(schema.primaryKeyFields, id), pagination: { size: 1, number: 1 }, ...(fields?.length && { fields }), @@ -67,18 +72,26 @@ export default class AgentClientAgentPort implements AgentPort { return { collectionName: collection, recordId: id, values: records[0] }; } - async updateRecord({ collection, id, values }: UpdateRecordQuery) { - const updatedRecord = await this.client + async updateRecord( + { collection, id, values }: UpdateRecordQuery, + user: StepUser, + ): Promise { + const client = this.createClient(user); + const updatedRecord = await client .collection(collection) .update>(encodePk(id), values); return { collectionName: collection, recordId: id, values: updatedRecord }; } - async getRelatedData({ collection, id, relation, limit, fields }: GetRelatedDataQuery) { + async getRelatedData( + { collection, id, relation, limit, fields }: GetRelatedDataQuery, + user: StepUser, + ): Promise { + const client = this.createClient(user); const relatedSchema = this.resolveSchema(relation); - const records = await this.client + const records = await client .collection(collection) .relation(relation, encodePk(id)) .list>({ @@ -93,26 +106,55 @@ export default class AgentClientAgentPort implements AgentPort { })); } - async executeAction({ collection, action, id }: ExecuteActionQuery): Promise { + async executeAction( + { collection, action, id }: ExecuteActionQuery, + user: StepUser, + ): Promise { + const client = this.createClient(user); const encodedId = id?.length ? [encodePk(id)] : []; - const act = await this.client.collection(collection).action(action, { recordIds: encodedId }); + const act = await client.collection(collection).action(action, { recordIds: encodedId }); return act.execute(); } - private resolveSchema(collectionName: string): CollectionSchema { - const schema = this.collectionSchemas[collectionName]; + private createClient(user: StepUser) { + const token = jsonwebtoken.sign({ ...user, scope: 'step-execution' }, this.authSecret, { + expiresIn: '5m', + }); + + return createRemoteAgentClient({ + url: this.agentUrl, + token, + actionEndpoints: this.buildActionEndpoints(), + }); + } + + private buildActionEndpoints() { + const endpoints: Record> = {}; + + for (const [collectionName, schema] of this.schemaCache) { + endpoints[collectionName] = {}; - if (!schema) { - return { + for (const action of schema.actions) { + endpoints[collectionName][action.name] = { + name: action.name, + endpoint: action.endpoint, + }; + } + } + + return endpoints; + } + + private resolveSchema(collectionName: string): CollectionSchema { + return ( + this.schemaCache.get(collectionName) ?? { collectionName, collectionDisplayName: collectionName, primaryKeyFields: ['id'], fields: [], actions: [], - }; - } - - return schema; + } + ); } } diff --git a/packages/workflow-executor/src/adapters/forest-server-workflow-port.ts b/packages/workflow-executor/src/adapters/forest-server-workflow-port.ts index 8233e9b705..d3983db844 100644 --- a/packages/workflow-executor/src/adapters/forest-server-workflow-port.ts +++ b/packages/workflow-executor/src/adapters/forest-server-workflow-port.ts @@ -1,5 +1,5 @@ import type { McpConfiguration, WorkflowPort } from '../ports/workflow-port'; -import type { PendingStepExecution } from '../types/execution'; +import type { PendingStepExecution, StepUser } from '../types/execution'; import type { CollectionSchema } from '../types/record'; import type { StepOutcome } from '../types/step-outcome'; import type { HttpOptions } from '@forestadmin/forestadmin-client'; @@ -62,10 +62,8 @@ export default class ForestServerWorkflowPort implements WorkflowPort { } // eslint-disable-next-line @typescript-eslint/no-unused-vars - async hasRunAccess(_runId: string, _userToken: string): Promise { + async hasRunAccess(_runId: string, _user: StepUser): Promise { // TODO: implement once GET /liana/v1/workflow-runs/:runId/access is available. - // When live: call ServerUtils.query with extra header 'forest-user-token': userToken - // to let the orchestrator verify ownership. return true; } } diff --git a/packages/workflow-executor/src/build-workflow-executor.ts b/packages/workflow-executor/src/build-workflow-executor.ts new file mode 100644 index 0000000000..e789d54499 --- /dev/null +++ b/packages/workflow-executor/src/build-workflow-executor.ts @@ -0,0 +1,85 @@ +import type { Logger } from './ports/logger-port'; +import type { AiConfiguration } from '@forestadmin/ai-proxy'; +import type { Options as SequelizeOptions } from 'sequelize'; + +import { AiClient } from '@forestadmin/ai-proxy'; +import { Sequelize } from 'sequelize'; + +import AgentClientAgentPort from './adapters/agent-client-agent-port'; +import ForestServerWorkflowPort from './adapters/forest-server-workflow-port'; +import Runner from './runner'; +import SchemaCache from './schema-cache'; +import DatabaseStore from './stores/database-store'; +import InMemoryStore from './stores/in-memory-store'; + +const DEFAULT_FOREST_SERVER_URL = 'https://api.forestadmin.com'; +const DEFAULT_POLLING_INTERVAL_MS = 5000; + +export interface WorkflowExecutor { + start(): Promise; + stop(): Promise; +} + +export interface ExecutorOptions { + envSecret: string; + authSecret: string; + agentUrl: string; + forestServerUrl?: string; + aiConfigurations: AiConfiguration[]; + pollingIntervalMs?: number; + httpPort?: number; + logger?: Logger; +} + +export type DatabaseExecutorOptions = ExecutorOptions & + ({ database: SequelizeOptions & { uri: string } } | { database: SequelizeOptions }); + +function buildCommonDependencies(options: ExecutorOptions) { + const forestServerUrl = options.forestServerUrl ?? DEFAULT_FOREST_SERVER_URL; + + const workflowPort = new ForestServerWorkflowPort({ + envSecret: options.envSecret, + forestServerUrl, + }); + + const aiClient = new AiClient({ + aiConfigurations: options.aiConfigurations, + }); + + const schemaCache = new SchemaCache(); + + const agentPort = new AgentClientAgentPort({ + agentUrl: options.agentUrl, + authSecret: options.authSecret, + schemaCache, + }); + + return { + agentPort, + schemaCache, + workflowPort, + aiClient, + pollingIntervalMs: options.pollingIntervalMs ?? DEFAULT_POLLING_INTERVAL_MS, + envSecret: options.envSecret, + authSecret: options.authSecret, + httpPort: options.httpPort, + logger: options.logger, + }; +} + +export function buildInMemoryExecutor(options: ExecutorOptions): WorkflowExecutor { + return new Runner({ + ...buildCommonDependencies(options), + runStore: new InMemoryStore(), + }); +} + +export function buildDatabaseExecutor(options: DatabaseExecutorOptions): WorkflowExecutor { + const { uri, ...sequelizeOptions } = options.database as SequelizeOptions & { uri?: string }; + const sequelize = uri ? new Sequelize(uri, sequelizeOptions) : new Sequelize(sequelizeOptions); + + return new Runner({ + ...buildCommonDependencies(options), + runStore: new DatabaseStore({ sequelize }), + }); +} diff --git a/packages/workflow-executor/src/errors.ts b/packages/workflow-executor/src/errors.ts index 3c55df97e2..df43bd98c9 100644 --- a/packages/workflow-executor/src/errors.ts +++ b/packages/workflow-executor/src/errors.ts @@ -216,6 +216,13 @@ export class RunNotFoundError extends Error { } } +export class UserMismatchError extends Error { + constructor(runId: string) { + super(`User not authorized for run "${runId}"`); + this.name = 'UserMismatchError'; + } +} + export class PendingDataNotFoundError extends Error { constructor(runId: string, stepIndex: number) { super(`Step ${stepIndex} in run "${runId}" not found or has no pending data`); diff --git a/packages/workflow-executor/src/executors/base-step-executor.ts b/packages/workflow-executor/src/executors/base-step-executor.ts index fb1ba2da2f..59ba338f25 100644 --- a/packages/workflow-executor/src/executors/base-step-executor.ts +++ b/packages/workflow-executor/src/executors/base-step-executor.ts @@ -29,8 +29,6 @@ export default abstract class BaseStepExecutor(); - constructor(context: ExecutionContext) { this.context = context; this.agentPort = new SafeAgentPort(context.agentPort); @@ -260,13 +258,13 @@ export default abstract class BaseStepExecutor { - const cached = this.schemaCache.get(collectionName); + const cached = this.context.schemaCache.get(collectionName); if (cached) return cached; const schema = await this.context.workflowPort.getCollectionSchema(collectionName); - this.schemaCache.set(collectionName, schema); + this.context.schemaCache.set(collectionName, schema); return schema; } diff --git a/packages/workflow-executor/src/executors/condition-step-executor.ts b/packages/workflow-executor/src/executors/condition-step-executor.ts index 43fd995e3c..59b333c147 100644 --- a/packages/workflow-executor/src/executors/condition-step-executor.ts +++ b/packages/workflow-executor/src/executors/condition-step-executor.ts @@ -1,6 +1,6 @@ import type { StepExecutionResult } from '../types/execution'; import type { ConditionStepDefinition } from '../types/step-definition'; -import type { ConditionStepStatus } from '../types/step-outcome'; +import type { BaseStepStatus } from '../types/step-outcome'; import { DynamicStructuredTool, HumanMessage, SystemMessage } from '@forestadmin/ai-proxy'; import { z } from 'zod'; @@ -38,7 +38,7 @@ const GATEWAY_SYSTEM_PROMPT = `You are an AI agent selecting the correct option export default class ConditionStepExecutor extends BaseStepExecutor { protected buildOutcomeResult(outcome: { - status: ConditionStepStatus; + status: BaseStepStatus; error?: string; selectedOption?: string; }): StepExecutionResult { @@ -97,7 +97,10 @@ export default class ConditionStepExecutor extends BaseStepExecutor { const { selectedRecordRef, name } = target; - const relatedData = await this.agentPort.getRelatedData({ - collection: selectedRecordRef.collectionName, - id: selectedRecordRef.recordId, - relation: name, - limit, - }); + const relatedData = await this.agentPort.getRelatedData( + { + collection: selectedRecordRef.collectionName, + id: selectedRecordRef.recordId, + relation: name, + limit, + }, + this.context.user, + ); if (relatedData.length === 0) { throw new RelatedRecordNotFoundError(selectedRecordRef.collectionName, name); @@ -223,12 +226,15 @@ export default class LoadRelatedRecordStepExecutor extends RecordTaskStepExecuto limit: number, ): Promise { const { selectedRecordRef, name } = target; - const relatedData = await this.agentPort.getRelatedData({ - collection: selectedRecordRef.collectionName, - id: selectedRecordRef.recordId, - relation: name, - limit, - }); + const relatedData = await this.agentPort.getRelatedData( + { + collection: selectedRecordRef.collectionName, + id: selectedRecordRef.recordId, + relation: name, + limit, + }, + this.context.user, + ); if (relatedData.length === 0) { throw new RelatedRecordNotFoundError(selectedRecordRef.collectionName, name); diff --git a/packages/workflow-executor/src/executors/read-record-step-executor.ts b/packages/workflow-executor/src/executors/read-record-step-executor.ts index 4b8622014a..0a7529c66f 100644 --- a/packages/workflow-executor/src/executors/read-record-step-executor.ts +++ b/packages/workflow-executor/src/executors/read-record-step-executor.ts @@ -33,11 +33,14 @@ export default class ReadRecordStepExecutor extends RecordTaskStepExecutor { - return this.call('getRecord', () => this.port.getRecord(query)); + async getRecord(query: GetRecordQuery, user: StepUser): Promise { + return this.call('getRecord', () => this.port.getRecord(query, user)); } - async updateRecord(query: UpdateRecordQuery): Promise { - return this.call('updateRecord', () => this.port.updateRecord(query)); + async updateRecord(query: UpdateRecordQuery, user: StepUser): Promise { + return this.call('updateRecord', () => this.port.updateRecord(query, user)); } - async getRelatedData(query: GetRelatedDataQuery): Promise { - return this.call('getRelatedData', () => this.port.getRelatedData(query)); + async getRelatedData(query: GetRelatedDataQuery, user: StepUser): Promise { + return this.call('getRelatedData', () => this.port.getRelatedData(query, user)); } - async executeAction(query: ExecuteActionQuery): Promise { - return this.call('executeAction', () => this.port.executeAction(query)); + async executeAction(query: ExecuteActionQuery, user: StepUser): Promise { + return this.call('executeAction', () => this.port.executeAction(query, user)); } private async call(operation: string, fn: () => Promise): Promise { diff --git a/packages/workflow-executor/src/executors/step-executor-factory.ts b/packages/workflow-executor/src/executors/step-executor-factory.ts index 2f321dd638..4eee25f3f6 100644 --- a/packages/workflow-executor/src/executors/step-executor-factory.ts +++ b/packages/workflow-executor/src/executors/step-executor-factory.ts @@ -2,6 +2,7 @@ import type { AgentPort } from '../ports/agent-port'; import type { Logger } from '../ports/logger-port'; import type { RunStore } from '../ports/run-store'; import type { WorkflowPort } from '../ports/workflow-port'; +import type SchemaCache from '../schema-cache'; import type { ExecutionContext, IStepExecutor, @@ -30,6 +31,7 @@ export interface StepContextConfig { agentPort: AgentPort; workflowPort: WorkflowPort; runStore: RunStore; + schemaCache: SchemaCache; logger: Logger; } @@ -103,6 +105,7 @@ export default class StepExecutorFactory { agentPort: cfg.agentPort, workflowPort: cfg.workflowPort, runStore: cfg.runStore, + schemaCache: cfg.schemaCache, logger: cfg.logger, }; } diff --git a/packages/workflow-executor/src/executors/trigger-record-action-step-executor.ts b/packages/workflow-executor/src/executors/trigger-record-action-step-executor.ts index 03cbbf685b..9d2169915e 100644 --- a/packages/workflow-executor/src/executors/trigger-record-action-step-executor.ts +++ b/packages/workflow-executor/src/executors/trigger-record-action-step-executor.ts @@ -84,11 +84,14 @@ export default class TriggerRecordActionStepExecutor extends RecordTaskStepExecu ): Promise { const { selectedRecordRef, displayName, name } = target; - const actionResult = await this.agentPort.executeAction({ - collection: selectedRecordRef.collectionName, - action: name, - id: selectedRecordRef.recordId, - }); + const actionResult = await this.agentPort.executeAction( + { + collection: selectedRecordRef.collectionName, + action: name, + id: selectedRecordRef.recordId, + }, + this.context.user, + ); try { await this.context.runStore.saveStepExecution(this.context.runId, { diff --git a/packages/workflow-executor/src/executors/update-record-step-executor.ts b/packages/workflow-executor/src/executors/update-record-step-executor.ts index f1263e54a4..7a0f0a2622 100644 --- a/packages/workflow-executor/src/executors/update-record-step-executor.ts +++ b/packages/workflow-executor/src/executors/update-record-step-executor.ts @@ -89,11 +89,14 @@ export default class UpdateRecordStepExecutor extends RecordTaskStepExecutor { const { selectedRecordRef, displayName, name, value } = target; - const updated = await this.agentPort.updateRecord({ - collection: selectedRecordRef.collectionName, - id: selectedRecordRef.recordId, - values: { [name]: value }, - }); + const updated = await this.agentPort.updateRecord( + { + collection: selectedRecordRef.collectionName, + id: selectedRecordRef.recordId, + values: { [name]: value }, + }, + this.context.user, + ); try { await this.context.runStore.saveStepExecution(this.context.runId, { diff --git a/packages/workflow-executor/src/http/executor-http-server.ts b/packages/workflow-executor/src/http/executor-http-server.ts index cae4be50cf..1cc287060b 100644 --- a/packages/workflow-executor/src/http/executor-http-server.ts +++ b/packages/workflow-executor/src/http/executor-http-server.ts @@ -1,6 +1,7 @@ import type { Logger } from '../ports/logger-port'; import type { WorkflowPort } from '../ports/workflow-port'; import type Runner from '../runner'; +import type { StepUser } from '../types/execution'; import type { Server } from 'http'; import bodyParser from '@koa/bodyparser'; @@ -9,7 +10,13 @@ import http from 'http'; import Koa from 'koa'; import koaJwt from 'koa-jwt'; -import { InvalidPendingDataError, PendingDataNotFoundError, RunNotFoundError } from '../errors'; +import ConsoleLogger from '../adapters/console-logger'; +import { + InvalidPendingDataError, + PendingDataNotFoundError, + RunNotFoundError, + UserMismatchError, +} from '../errors'; export interface ExecutorHttpServerOptions { port: number; @@ -22,10 +29,12 @@ export interface ExecutorHttpServerOptions { export default class ExecutorHttpServer { private readonly app: Koa; private readonly options: ExecutorHttpServerOptions; + private readonly logger: Logger; private server: Server | null = null; constructor(options: ExecutorHttpServerOptions) { this.options = options; + this.logger = options.logger ?? new ConsoleLogger(); this.app = new Koa(); // Error middleware — catches all errors (including JWT 401) and returns structured JSON @@ -42,7 +51,7 @@ export default class ExecutorHttpServer { return; } - this.options.logger?.error('Unhandled HTTP error', { + this.logger.error('Unhandled HTTP error', { method: ctx.method, path: ctx.path, error: err instanceof Error ? err.message : String(err), @@ -63,44 +72,14 @@ export default class ExecutorHttpServer { const router = new Router(); - // Authorization middleware — verifies that the authenticated user owns the requested run. - // Applied to all /runs/:runId routes so future routes are automatically protected. - router.use('/runs/:runId', async (ctx, next) => { - // Raw token is always present here: koa-jwt already rejected the request if missing. - const userToken = ctx.state.rawToken as string; - - try { - const allowed = await this.options.workflowPort.hasRunAccess(ctx.params.runId, userToken); - - if (!allowed) { - ctx.status = 403; - ctx.body = { error: 'Forbidden' }; - - return; - } - } catch (err) { - this.options.logger?.error('Failed to check run access', { - runId: ctx.params.runId, - method: ctx.method, - path: ctx.path, - error: err instanceof Error ? err.message : String(err), - stack: err instanceof Error ? err.stack : undefined, - }); - ctx.status = 503; - ctx.body = { error: 'Service unavailable' }; - - return; - } - - await next(); - }); - - router.get('/runs/:runId', this.handleGetRun.bind(this)); - router.post('/runs/:runId/trigger', this.handleTrigger.bind(this)); - router.patch( - '/runs/:runId/steps/:stepIndex/pending-data', - this.handlePatchPendingData.bind(this), + // hasRunAccess authorization — only on GET (read-only route). + // Trigger handles its own authz by comparing bearer user with step.user. + router.get( + '/runs/:runId', + this.hasRunAccessMiddleware.bind(this), + this.handleGetRun.bind(this), ); + router.post('/runs/:runId/trigger', this.handleTrigger.bind(this)); this.app.use(router.routes()); this.app.use(router.allowedMethods()); @@ -137,6 +116,35 @@ export default class ExecutorHttpServer { return this.app.callback(); } + private async hasRunAccessMiddleware(ctx: Koa.Context, next: Koa.Next): Promise { + const user = ctx.state.user as StepUser; + + try { + const allowed = await this.options.workflowPort.hasRunAccess(ctx.params.runId, user); + + if (!allowed) { + ctx.status = 403; + ctx.body = { error: 'Forbidden' }; + + return; + } + } catch (err) { + this.logger.error('Failed to check run access', { + runId: ctx.params.runId, + method: ctx.method, + path: ctx.path, + error: err instanceof Error ? err.message : String(err), + stack: err instanceof Error ? err.stack : undefined, + }); + ctx.status = 503; + ctx.body = { error: 'Service unavailable' }; + + return; + } + + await next(); + } + private async handleGetRun(ctx: Koa.Context): Promise { const steps = await this.options.runner.getRunStepExecutions(ctx.params.runId); ctx.body = { steps }; @@ -144,9 +152,23 @@ export default class ExecutorHttpServer { private async handleTrigger(ctx: Koa.Context): Promise { const { runId } = ctx.params; + const rawId = (ctx.state.user as { id?: unknown })?.id; + const bearerUserId = typeof rawId === 'number' ? rawId : Number(rawId); + + if (!Number.isFinite(bearerUserId)) { + ctx.status = 400; + ctx.body = { error: 'Missing or invalid user id in token' }; + + return; + } + + const pendingData = (ctx.request.body as { pendingData?: unknown })?.pendingData; try { - await this.options.runner.triggerPoll(runId); + await this.options.runner.triggerPoll(runId, { + pendingData, + bearerUserId, + }); } catch (err) { if (err instanceof RunNotFoundError) { ctx.status = 404; @@ -155,27 +177,14 @@ export default class ExecutorHttpServer { return; } - throw err; - } - - ctx.status = 200; - ctx.body = { triggered: true }; - } - - private async handlePatchPendingData(ctx: Koa.Context): Promise { - const { runId, stepIndex: stepIndexStr } = ctx.params; - const stepIndex = parseInt(stepIndexStr, 10); + if (err instanceof UserMismatchError) { + this.logger.error('User mismatch on trigger', { runId, bearerUserId }); + ctx.status = 403; + ctx.body = { error: 'Forbidden' }; - if (Number.isNaN(stepIndex)) { - ctx.status = 400; - ctx.body = { error: 'Invalid stepIndex' }; - - return; - } + return; + } - try { - await this.options.runner.patchPendingData(runId, stepIndex, ctx.request.body); - } catch (err) { if (err instanceof PendingDataNotFoundError) { ctx.status = 404; ctx.body = { error: 'Step execution not found or has no pending data' }; @@ -193,6 +202,7 @@ export default class ExecutorHttpServer { throw err; } - ctx.status = 204; + ctx.status = 200; + ctx.body = { triggered: true }; } } diff --git a/packages/workflow-executor/src/index.ts b/packages/workflow-executor/src/index.ts index e359dc4ac9..189f0a3b47 100644 --- a/packages/workflow-executor/src/index.ts +++ b/packages/workflow-executor/src/index.ts @@ -44,6 +44,7 @@ export type { } from './types/record'; export type { + StepUser, Step, PendingStepExecution, StepExecutionResult, @@ -102,7 +103,14 @@ export type { ExecutorHttpServerOptions } from './http/executor-http-server'; export { default as Runner } from './runner'; export type { RunnerConfig } from './runner'; export { default as validateSecrets } from './validate-secrets'; +export { default as SchemaCache } from './schema-cache'; export { default as InMemoryStore } from './stores/in-memory-store'; export { default as DatabaseStore } from './stores/database-store'; export type { DatabaseStoreOptions } from './stores/database-store'; export { buildDatabaseRunStore, buildInMemoryRunStore } from './stores/build-run-store'; +export { buildInMemoryExecutor, buildDatabaseExecutor } from './build-workflow-executor'; +export type { + WorkflowExecutor, + ExecutorOptions, + DatabaseExecutorOptions, +} from './build-workflow-executor'; diff --git a/packages/workflow-executor/src/ports/agent-port.ts b/packages/workflow-executor/src/ports/agent-port.ts index 4a95c92cdf..9b809cc6db 100644 --- a/packages/workflow-executor/src/ports/agent-port.ts +++ b/packages/workflow-executor/src/ports/agent-port.ts @@ -1,5 +1,6 @@ /** @draft Types derived from the workflow-executor spec -- subject to change. */ +import type { StepUser } from '../types/execution'; import type { RecordData } from '../types/record'; export type Id = string | number; @@ -20,8 +21,8 @@ export type GetRelatedDataQuery = { export type ExecuteActionQuery = { collection: string; action: string; id?: Id[] }; export interface AgentPort { - getRecord(query: GetRecordQuery): Promise; - updateRecord(query: UpdateRecordQuery): Promise; - getRelatedData(query: GetRelatedDataQuery): Promise; - executeAction(query: ExecuteActionQuery): Promise; + getRecord(query: GetRecordQuery, user: StepUser): Promise; + updateRecord(query: UpdateRecordQuery, user: StepUser): Promise; + getRelatedData(query: GetRelatedDataQuery, user: StepUser): Promise; + executeAction(query: ExecuteActionQuery, user: StepUser): Promise; } diff --git a/packages/workflow-executor/src/ports/workflow-port.ts b/packages/workflow-executor/src/ports/workflow-port.ts index 9373a0c7f6..74274e7288 100644 --- a/packages/workflow-executor/src/ports/workflow-port.ts +++ b/packages/workflow-executor/src/ports/workflow-port.ts @@ -1,6 +1,6 @@ /** @draft Types derived from the workflow-executor spec -- subject to change. */ -import type { PendingStepExecution } from '../types/execution'; +import type { PendingStepExecution, StepUser } from '../types/execution'; import type { CollectionSchema } from '../types/record'; import type { StepOutcome } from '../types/step-outcome'; import type { McpConfiguration } from '@forestadmin/ai-proxy'; @@ -13,5 +13,5 @@ export interface WorkflowPort { updateStepExecution(runId: string, stepOutcome: StepOutcome): Promise; getCollectionSchema(collectionName: string): Promise; getMcpServerConfigs(): Promise; - hasRunAccess(runId: string, userToken: string): Promise; + hasRunAccess(runId: string, user: StepUser): Promise; } diff --git a/packages/workflow-executor/src/runner.ts b/packages/workflow-executor/src/runner.ts index 69e01d283b..2b6bb42320 100644 --- a/packages/workflow-executor/src/runner.ts +++ b/packages/workflow-executor/src/runner.ts @@ -3,6 +3,7 @@ import type { AgentPort } from './ports/agent-port'; import type { Logger } from './ports/logger-port'; import type { RunStore } from './ports/run-store'; import type { McpConfiguration, WorkflowPort } from './ports/workflow-port'; +import type SchemaCache from './schema-cache'; import type { PendingStepExecution, StepExecutionResult } from './types/execution'; import type { StepExecutionData } from './types/step-execution-data'; import type { AiClient, RemoteTool } from '@forestadmin/ai-proxy'; @@ -12,6 +13,7 @@ import { InvalidPendingDataError, PendingDataNotFoundError, RunNotFoundError, + UserMismatchError, causeMessage, } from './errors'; import StepExecutorFactory from './executors/step-executor-factory'; @@ -23,6 +25,7 @@ export interface RunnerConfig { agentPort: AgentPort; workflowPort: WorkflowPort; runStore: RunStore; + schemaCache: SchemaCache; pollingIntervalMs: number; aiClient: AiClient; envSecret: string; @@ -39,16 +42,6 @@ export default class Runner { private isRunning = false; private readonly logger: Logger; - private static once(fn: () => Promise): () => Promise { - let cached: Promise | undefined; - - return () => { - cached ??= fn(); - - return cached; - }; - } - private static stepKey(step: PendingStepExecution): string { return `${step.runId}:${step.stepId}`; } @@ -112,7 +105,7 @@ export default class Runner { return this.config.runStore.getStepExecutions(runId); } - async patchPendingData(runId: string, stepIndex: number, body: unknown): Promise { + private async patchPendingData(runId: string, stepIndex: number, body: unknown): Promise { const stepExecutions = await this.config.runStore.getStepExecutions(runId); const execution = stepExecutions.find(e => e.stepIndex === stepIndex); const schema = execution ? patchBodySchemas[execution.type] : undefined; @@ -143,15 +136,25 @@ export default class Runner { } as StepExecutionData); } - async triggerPoll(runId: string): Promise { + async triggerPoll( + runId: string, + options?: { pendingData?: unknown; bearerUserId?: number }, + ): Promise { const step = await this.config.workflowPort.getPendingStepExecutionsForRun(runId); if (!step) throw new RunNotFoundError(runId); + if (options?.bearerUserId !== undefined && step.user.id !== options.bearerUserId) { + throw new UserMismatchError(runId); + } + + if (options?.pendingData !== undefined) { + await this.patchPendingData(runId, step.stepIndex, options.pendingData); + } + if (this.inFlightSteps.has(Runner.stepKey(step))) return; - const loadTools = Runner.once(() => this.fetchRemoteTools()); - await this.executeStep(step, loadTools); + await this.executeStep(step); } private schedulePoll(): void { @@ -163,8 +166,7 @@ export default class Runner { try { const steps = await this.config.workflowPort.getPendingStepExecutions(); const pending = steps.filter(s => !this.inFlightSteps.has(Runner.stepKey(s))); - const loadTools = Runner.once(() => this.fetchRemoteTools()); - await Promise.allSettled(pending.map(s => this.executeStep(s, loadTools))); + await Promise.allSettled(pending.map(s => this.executeStep(s))); } catch (error) { this.logger.error('Poll cycle failed', { error: error instanceof Error ? error.message : String(error), @@ -187,17 +189,16 @@ export default class Runner { return this.config.aiClient.loadRemoteTools(mergedConfig); } - private async executeStep( - step: PendingStepExecution, - loadTools: () => Promise, - ): Promise { + private async executeStep(step: PendingStepExecution): Promise { const key = Runner.stepKey(step); this.inFlightSteps.add(key); let result: StepExecutionResult; try { - const executor = await StepExecutorFactory.create(step, this.contextConfig, loadTools); + const executor = await StepExecutorFactory.create(step, this.contextConfig, () => + this.fetchRemoteTools(), + ); result = await executor.execute(); } catch (error) { // This block should never execute: the factory and executor contracts guarantee no rejection. @@ -233,6 +234,7 @@ export default class Runner { agentPort: this.config.agentPort, workflowPort: this.config.workflowPort, runStore: this.config.runStore, + schemaCache: this.config.schemaCache, logger: this.logger, }; } diff --git a/packages/workflow-executor/src/schema-cache.ts b/packages/workflow-executor/src/schema-cache.ts new file mode 100644 index 0000000000..bba308a4ab --- /dev/null +++ b/packages/workflow-executor/src/schema-cache.ts @@ -0,0 +1,45 @@ +import type { CollectionSchema } from './types/record'; + +const DEFAULT_TTL_MS = 10 * 60 * 1000; // 10 minutes + +export default class SchemaCache { + private readonly store = new Map(); + private readonly ttlMs: number; + private readonly now: () => number; + + constructor(ttlMs: number = DEFAULT_TTL_MS, now: () => number = Date.now) { + this.ttlMs = ttlMs; + this.now = now; + } + + get(collectionName: string): CollectionSchema | undefined { + const entry = this.store.get(collectionName); + + if (!entry) return undefined; + + if (this.now() - entry.fetchedAt > this.ttlMs) { + this.store.delete(collectionName); + + return undefined; + } + + return entry.schema; + } + + set(collectionName: string, schema: CollectionSchema): void { + this.store.set(collectionName, { schema, fetchedAt: this.now() }); + } + + /** Iterates over non-expired entries, removing stale ones. */ + *[Symbol.iterator](): IterableIterator<[string, CollectionSchema]> { + const now = this.now(); + + for (const [key, entry] of this.store) { + if (now - entry.fetchedAt <= this.ttlMs) { + yield [key, entry.schema]; + } else { + this.store.delete(key); + } + } + } +} diff --git a/packages/workflow-executor/src/types/execution.ts b/packages/workflow-executor/src/types/execution.ts index 2da261f519..a3cc2d775e 100644 --- a/packages/workflow-executor/src/types/execution.ts +++ b/packages/workflow-executor/src/types/execution.ts @@ -1,6 +1,7 @@ /** @draft Types derived from the workflow-executor spec -- subject to change. */ import type { RecordRef } from './record'; +import type SchemaCache from '../schema-cache'; import type { StepDefinition } from './step-definition'; import type { StepOutcome } from './step-outcome'; import type { AgentPort } from '../ports/agent-port'; @@ -9,6 +10,18 @@ import type { RunStore } from '../ports/run-store'; import type { WorkflowPort } from '../ports/workflow-port'; import type { BaseChatModel } from '@forestadmin/ai-proxy'; +export interface StepUser { + id: number; + email: string; + firstName: string; + lastName: string; + team: string; + renderingId: number; + role: string; + permissionLevel: string; + tags: Record; +} + export interface Step { stepDefinition: StepDefinition; stepOutcome: StepOutcome; @@ -21,6 +34,7 @@ export interface PendingStepExecution { readonly baseRecordRef: RecordRef; readonly stepDefinition: StepDefinition; readonly previousSteps: ReadonlyArray; + readonly user: StepUser; } export interface StepExecutionResult { @@ -41,6 +55,8 @@ export interface ExecutionContext readonly agentPort: AgentPort; readonly workflowPort: WorkflowPort; readonly runStore: RunStore; + readonly user: StepUser; + readonly schemaCache: SchemaCache; readonly previousSteps: ReadonlyArray>; readonly logger: Logger; } diff --git a/packages/workflow-executor/src/types/record.ts b/packages/workflow-executor/src/types/record.ts index c0441b6e36..c79cac76a5 100644 --- a/packages/workflow-executor/src/types/record.ts +++ b/packages/workflow-executor/src/types/record.ts @@ -15,6 +15,7 @@ export interface FieldSchema { export interface ActionSchema { name: string; displayName: string; + endpoint: string; } export interface CollectionSchema { diff --git a/packages/workflow-executor/src/types/step-outcome.ts b/packages/workflow-executor/src/types/step-outcome.ts index 3421b60176..1bd933db10 100644 --- a/packages/workflow-executor/src/types/step-outcome.ts +++ b/packages/workflow-executor/src/types/step-outcome.ts @@ -4,14 +4,11 @@ import { StepType } from './step-definition'; export type BaseStepStatus = 'success' | 'error'; -/** Condition steps can fall back to human decision when the AI is uncertain. */ -export type ConditionStepStatus = BaseStepStatus | 'manual-decision'; - /** AI task steps can pause mid-execution to await user input (e.g. awaiting-input). */ export type RecordTaskStepStatus = BaseStepStatus | 'awaiting-input'; /** Union of all step statuses. */ -export type StepStatus = ConditionStepStatus | RecordTaskStepStatus; +export type StepStatus = BaseStepStatus | RecordTaskStepStatus; /** * StepOutcome is sent to the orchestrator — it must NEVER contain client data. @@ -27,7 +24,7 @@ interface BaseStepOutcome { export interface ConditionStepOutcome extends BaseStepOutcome { type: 'condition'; - status: ConditionStepStatus; + status: BaseStepStatus; /** Present when status is 'success'. */ selectedOption?: string; } diff --git a/packages/workflow-executor/test/adapters/agent-client-agent-port.test.ts b/packages/workflow-executor/test/adapters/agent-client-agent-port.test.ts index cca7c3b4f9..38ed081d41 100644 --- a/packages/workflow-executor/test/adapters/agent-client-agent-port.test.ts +++ b/packages/workflow-executor/test/adapters/agent-client-agent-port.test.ts @@ -1,8 +1,18 @@ -import type { CollectionSchema } from '../../src/types/record'; -import type { RemoteAgentClient } from '@forestadmin/agent-client'; +import type { StepUser } from '../../src/types/execution'; + +import { createRemoteAgentClient } from '@forestadmin/agent-client'; import AgentClientAgentPort from '../../src/adapters/agent-client-agent-port'; import { RecordNotFoundError } from '../../src/errors'; +import SchemaCache from '../../src/schema-cache'; + +jest.mock('@forestadmin/agent-client', () => ({ + createRemoteAgentClient: jest.fn(), +})); + +const mockedCreateRemoteAgentClient = createRemoteAgentClient as jest.MockedFunction< + typeof createRemoteAgentClient +>; function createMockClient() { const mockAction = { execute: jest.fn() }; @@ -16,68 +26,84 @@ function createMockClient() { const client = { collection: jest.fn().mockReturnValue(mockCollection), - } as unknown as jest.Mocked; + }; return { client, mockCollection, mockRelation, mockAction }; } describe('AgentClientAgentPort', () => { - let client: jest.Mocked; let mockCollection: ReturnType['mockCollection']; let mockRelation: ReturnType['mockRelation']; let mockAction: ReturnType['mockAction']; - let collectionSchemas: Record; + let user: StepUser; let port: AgentClientAgentPort; beforeEach(() => { jest.clearAllMocks(); - ({ client, mockCollection, mockRelation, mockAction } = createMockClient()); + const mocks = createMockClient(); + ({ mockCollection, mockRelation, mockAction } = mocks); + mockedCreateRemoteAgentClient.mockReturnValue(mocks.client as any); + + const schemaCache = new SchemaCache(); + schemaCache.set('users', { + collectionName: 'users', + collectionDisplayName: 'Users', + primaryKeyFields: ['id'], + fields: [ + { fieldName: 'id', displayName: 'id', isRelationship: false }, + { fieldName: 'name', displayName: 'name', isRelationship: false }, + ], + actions: [ + { name: 'sendEmail', displayName: 'Send Email', endpoint: '/forest/actions/sendEmail' }, + { name: 'archive', displayName: 'Archive', endpoint: '/forest/actions/archive' }, + ], + }); + schemaCache.set('orders', { + collectionName: 'orders', + collectionDisplayName: 'Orders', + primaryKeyFields: ['tenantId', 'orderId'], + fields: [ + { fieldName: 'tenantId', displayName: 'Tenant', isRelationship: false }, + { fieldName: 'orderId', displayName: 'Order', isRelationship: false }, + ], + actions: [], + }); + schemaCache.set('posts', { + collectionName: 'posts', + collectionDisplayName: 'Posts', + primaryKeyFields: ['id'], + fields: [ + { fieldName: 'id', displayName: 'id', isRelationship: false }, + { fieldName: 'title', displayName: 'title', isRelationship: false }, + ], + actions: [], + }); - collectionSchemas = { - users: { - collectionName: 'users', - collectionDisplayName: 'Users', - primaryKeyFields: ['id'], - fields: [ - { fieldName: 'id', displayName: 'id', isRelationship: false }, - { fieldName: 'name', displayName: 'name', isRelationship: false }, - ], - actions: [ - { name: 'sendEmail', displayName: 'Send Email' }, - { name: 'archive', displayName: 'Archive' }, - ], - }, - orders: { - collectionName: 'orders', - collectionDisplayName: 'Orders', - primaryKeyFields: ['tenantId', 'orderId'], - fields: [ - { fieldName: 'tenantId', displayName: 'Tenant', isRelationship: false }, - { fieldName: 'orderId', displayName: 'Order', isRelationship: false }, - ], - actions: [], - }, - posts: { - collectionName: 'posts', - collectionDisplayName: 'Posts', - primaryKeyFields: ['id'], - fields: [ - { fieldName: 'id', displayName: 'id', isRelationship: false }, - { fieldName: 'title', displayName: 'title', isRelationship: false }, - ], - actions: [], - }, + user = { + id: 1, + email: 'test@example.com', + firstName: 'Test', + lastName: 'User', + team: 'admin', + renderingId: 1, + role: 'admin', + permissionLevel: 'admin', + tags: {}, }; - port = new AgentClientAgentPort({ client, collectionSchemas }); + port = new AgentClientAgentPort({ + agentUrl: 'http://localhost:3310', + authSecret: 'test-secret', + schemaCache, + }); }); describe('getRecord', () => { it('should return a RecordData for a simple PK', async () => { mockCollection.list.mockResolvedValue([{ id: 42, name: 'Alice' }]); - const result = await port.getRecord({ collection: 'users', id: [42] }); + const result = await port.getRecord({ collection: 'users', id: [42] }, user); expect(mockCollection.list).toHaveBeenCalledWith({ filters: { field: 'id', operator: 'Equal', value: 42 }, @@ -93,7 +119,7 @@ describe('AgentClientAgentPort', () => { it('should build a composite And filter for composite PKs', async () => { mockCollection.list.mockResolvedValue([{ tenantId: 1, orderId: 2 }]); - await port.getRecord({ collection: 'orders', id: [1, 2] }); + await port.getRecord({ collection: 'orders', id: [1, 2] }, user); expect(mockCollection.list).toHaveBeenCalledWith({ filters: { @@ -110,7 +136,7 @@ describe('AgentClientAgentPort', () => { it('should throw a RecordNotFoundError when no record is found', async () => { mockCollection.list.mockResolvedValue([]); - await expect(port.getRecord({ collection: 'users', id: [999] })).rejects.toThrow( + await expect(port.getRecord({ collection: 'users', id: [999] }, user)).rejects.toThrow( RecordNotFoundError, ); }); @@ -118,7 +144,7 @@ describe('AgentClientAgentPort', () => { it('should pass fields to list when fields is provided', async () => { mockCollection.list.mockResolvedValue([{ id: 42, name: 'Alice' }]); - await port.getRecord({ collection: 'users', id: [42], fields: ['id', 'name'] }); + await port.getRecord({ collection: 'users', id: [42], fields: ['id', 'name'] }, user); expect(mockCollection.list).toHaveBeenCalledWith({ filters: { field: 'id', operator: 'Equal', value: 42 }, @@ -130,7 +156,7 @@ describe('AgentClientAgentPort', () => { it('should not pass fields to list when fields is an empty array', async () => { mockCollection.list.mockResolvedValue([{ id: 42, name: 'Alice' }]); - await port.getRecord({ collection: 'users', id: [42], fields: [] }); + await port.getRecord({ collection: 'users', id: [42], fields: [] }, user); expect(mockCollection.list).toHaveBeenCalledWith({ filters: { field: 'id', operator: 'Equal', value: 42 }, @@ -141,7 +167,7 @@ describe('AgentClientAgentPort', () => { it('should not pass fields to list when fields is undefined', async () => { mockCollection.list.mockResolvedValue([{ id: 42, name: 'Alice' }]); - await port.getRecord({ collection: 'users', id: [42] }); + await port.getRecord({ collection: 'users', id: [42] }, user); expect(mockCollection.list).toHaveBeenCalledWith({ filters: { field: 'id', operator: 'Equal', value: 42 }, @@ -152,7 +178,7 @@ describe('AgentClientAgentPort', () => { it('should fallback to pk field "id" when collection is unknown', async () => { mockCollection.list.mockResolvedValue([{ id: 1 }]); - const result = await port.getRecord({ collection: 'unknown', id: [1] }); + const result = await port.getRecord({ collection: 'unknown', id: [1] }, user); expect(mockCollection.list).toHaveBeenCalledWith( expect.objectContaining({ @@ -167,11 +193,14 @@ describe('AgentClientAgentPort', () => { it('should call update with pipe-encoded id and return a RecordData', async () => { mockCollection.update.mockResolvedValue({ id: 42, name: 'Bob' }); - const result = await port.updateRecord({ - collection: 'users', - id: [42], - values: { name: 'Bob' }, - }); + const result = await port.updateRecord( + { + collection: 'users', + id: [42], + values: { name: 'Bob' }, + }, + user, + ); expect(mockCollection.update).toHaveBeenCalledWith('42', { name: 'Bob' }); expect(result).toEqual({ @@ -184,7 +213,10 @@ describe('AgentClientAgentPort', () => { it('should encode composite PK to pipe format for update', async () => { mockCollection.update.mockResolvedValue({ tenantId: 1, orderId: 2 }); - await port.updateRecord({ collection: 'orders', id: [1, 2], values: { status: 'done' } }); + await port.updateRecord( + { collection: 'orders', id: [1, 2], values: { status: 'done' } }, + user, + ); expect(mockCollection.update).toHaveBeenCalledWith('1|2', { status: 'done' }); }); @@ -197,12 +229,15 @@ describe('AgentClientAgentPort', () => { { id: 11, title: 'Post B' }, ]); - const result = await port.getRelatedData({ - collection: 'users', - id: [42], - relation: 'posts', - limit: null, - }); + const result = await port.getRelatedData( + { + collection: 'users', + id: [42], + relation: 'posts', + limit: null, + }, + user, + ); expect(mockCollection.relation).toHaveBeenCalledWith('posts', '42'); expect(result).toEqual([ @@ -222,7 +257,10 @@ describe('AgentClientAgentPort', () => { it('should apply pagination when limit is a number', async () => { mockRelation.list.mockResolvedValue([{ id: 10, title: 'Post A' }]); - await port.getRelatedData({ collection: 'users', id: [42], relation: 'posts', limit: 5 }); + await port.getRelatedData( + { collection: 'users', id: [42], relation: 'posts', limit: 5 }, + user, + ); expect(mockRelation.list).toHaveBeenCalledWith( expect.objectContaining({ pagination: { size: 5, number: 1 } }), @@ -232,7 +270,10 @@ describe('AgentClientAgentPort', () => { it('should not apply pagination when limit is null', async () => { mockRelation.list.mockResolvedValue([]); - await port.getRelatedData({ collection: 'users', id: [42], relation: 'posts', limit: null }); + await port.getRelatedData( + { collection: 'users', id: [42], relation: 'posts', limit: null }, + user, + ); expect(mockRelation.list).toHaveBeenCalledWith({}); }); @@ -240,12 +281,15 @@ describe('AgentClientAgentPort', () => { it('should fallback to relationName when no CollectionSchema exists', async () => { mockRelation.list.mockResolvedValue([{ id: 1 }]); - const result = await port.getRelatedData({ - collection: 'users', - id: [42], - relation: 'unknownRelation', - limit: null, - }); + const result = await port.getRelatedData( + { + collection: 'users', + id: [42], + relation: 'unknownRelation', + limit: null, + }, + user, + ); expect(result[0].collectionName).toBe('unknownRelation'); expect(result[0].recordId).toEqual([1]); @@ -255,25 +299,31 @@ describe('AgentClientAgentPort', () => { mockRelation.list.mockResolvedValue([]); expect( - await port.getRelatedData({ - collection: 'users', - id: [42], - relation: 'posts', - limit: null, - }), + await port.getRelatedData( + { + collection: 'users', + id: [42], + relation: 'posts', + limit: null, + }, + user, + ), ).toEqual([]); }); it('should forward fields to the list call when provided', async () => { mockRelation.list.mockResolvedValue([{ id: 10, title: 'Post A' }]); - await port.getRelatedData({ - collection: 'users', - id: [42], - relation: 'posts', - limit: null, - fields: ['title'], - }); + await port.getRelatedData( + { + collection: 'users', + id: [42], + relation: 'posts', + limit: null, + fields: ['title'], + }, + user, + ); expect(mockRelation.list).toHaveBeenCalledWith( expect.objectContaining({ fields: ['title'] }), @@ -283,7 +333,10 @@ describe('AgentClientAgentPort', () => { it('should omit fields from the list call when not provided', async () => { mockRelation.list.mockResolvedValue([{ id: 10 }]); - await port.getRelatedData({ collection: 'users', id: [42], relation: 'posts', limit: null }); + await port.getRelatedData( + { collection: 'users', id: [42], relation: 'posts', limit: null }, + user, + ); expect(mockRelation.list).toHaveBeenCalledWith( expect.not.objectContaining({ fields: expect.anything() }), @@ -295,11 +348,14 @@ describe('AgentClientAgentPort', () => { it('should encode ids to pipe format and call execute', async () => { mockAction.execute.mockResolvedValue({ success: 'done' }); - const result = await port.executeAction({ - collection: 'users', - action: 'sendEmail', - id: [1], - }); + const result = await port.executeAction( + { + collection: 'users', + action: 'sendEmail', + id: [1], + }, + user, + ); expect(mockCollection.action).toHaveBeenCalledWith('sendEmail', { recordIds: ['1'] }); expect(result).toEqual({ success: 'done' }); @@ -308,7 +364,7 @@ describe('AgentClientAgentPort', () => { it('should call execute with empty recordIds when ids is not provided', async () => { mockAction.execute.mockResolvedValue(undefined); - await port.executeAction({ collection: 'users', action: 'archive' }); + await port.executeAction({ collection: 'users', action: 'archive' }, user); expect(mockCollection.action).toHaveBeenCalledWith('archive', { recordIds: [] }); expect(mockAction.execute).toHaveBeenCalled(); @@ -318,7 +374,7 @@ describe('AgentClientAgentPort', () => { mockAction.execute.mockRejectedValue(new Error('Action failed')); await expect( - port.executeAction({ collection: 'users', action: 'sendEmail', id: [1] }), + port.executeAction({ collection: 'users', action: 'sendEmail', id: [1] }, user), ).rejects.toThrow('Action failed'); }); }); diff --git a/packages/workflow-executor/test/adapters/forest-server-workflow-port.test.ts b/packages/workflow-executor/test/adapters/forest-server-workflow-port.test.ts index 2bc9172293..725a98807d 100644 --- a/packages/workflow-executor/test/adapters/forest-server-workflow-port.test.ts +++ b/packages/workflow-executor/test/adapters/forest-server-workflow-port.test.ts @@ -125,7 +125,17 @@ describe('ForestServerWorkflowPort', () => { describe('hasRunAccess', () => { it('always returns true (stub until orchestrator endpoint is available)', async () => { - const result = await port.hasRunAccess('run-42', 'some-token'); + const result = await port.hasRunAccess('run-42', { + id: 1, + email: 'test@example.com', + firstName: 'Test', + lastName: 'User', + team: 'admin', + renderingId: 1, + role: 'admin', + permissionLevel: 'admin', + tags: {}, + }); expect(result).toBe(true); expect(mockQuery).not.toHaveBeenCalled(); diff --git a/packages/workflow-executor/test/build-workflow-executor.test.ts b/packages/workflow-executor/test/build-workflow-executor.test.ts new file mode 100644 index 0000000000..6ed51b9325 --- /dev/null +++ b/packages/workflow-executor/test/build-workflow-executor.test.ts @@ -0,0 +1,206 @@ +import ForestServerWorkflowPort from '../src/adapters/forest-server-workflow-port'; +import { buildDatabaseExecutor, buildInMemoryExecutor } from '../src/build-workflow-executor'; +import Runner from '../src/runner'; +import SchemaCache from '../src/schema-cache'; +import DatabaseStore from '../src/stores/database-store'; +import InMemoryStore from '../src/stores/in-memory-store'; + +jest.mock('../src/runner'); +jest.mock('../src/stores/in-memory-store'); +jest.mock('../src/stores/database-store'); +jest.mock('../src/adapters/agent-client-agent-port'); +jest.mock('../src/adapters/forest-server-workflow-port'); +jest.mock('@forestadmin/ai-proxy', () => ({ + AiClient: jest.fn(), +})); +jest.mock('sequelize', () => ({ + Sequelize: jest.fn(), +})); + +const MockedRunner = Runner as jest.MockedClass; + +const BASE_OPTIONS = { + envSecret: 'a'.repeat(64), + authSecret: 'test-secret', + agentUrl: 'http://localhost:3310', + aiConfigurations: [ + { name: 'default', provider: 'openai' as const, model: 'gpt-4o', apiKey: 'sk-test' }, + ], +}; + +beforeEach(() => { + jest.clearAllMocks(); +}); + +describe('buildInMemoryExecutor', () => { + it('returns a WorkflowExecutor backed by a Runner', () => { + const executor = buildInMemoryExecutor(BASE_OPTIONS); + + expect(executor).toBeInstanceOf(Runner); + }); + + it('creates an InMemoryStore as runStore', () => { + buildInMemoryExecutor(BASE_OPTIONS); + + expect(InMemoryStore).toHaveBeenCalledTimes(1); + expect(MockedRunner).toHaveBeenCalledWith( + expect.objectContaining({ runStore: expect.any(InMemoryStore) }), + ); + }); + + it('creates ForestServerWorkflowPort with default forestServerUrl', () => { + buildInMemoryExecutor(BASE_OPTIONS); + + expect(ForestServerWorkflowPort).toHaveBeenCalledWith({ + envSecret: BASE_OPTIONS.envSecret, + forestServerUrl: 'https://api.forestadmin.com', + }); + }); + + it('creates ForestServerWorkflowPort with custom forestServerUrl', () => { + buildInMemoryExecutor({ ...BASE_OPTIONS, forestServerUrl: 'https://custom.example.com' }); + + expect(ForestServerWorkflowPort).toHaveBeenCalledWith({ + envSecret: BASE_OPTIONS.envSecret, + forestServerUrl: 'https://custom.example.com', + }); + }); + + it('creates AgentClientAgentPort with agentUrl and authSecret as agentPort singleton', () => { + // eslint-disable-next-line @typescript-eslint/no-var-requires, global-require + const AgentClientAgentPort = require('../src/adapters/agent-client-agent-port').default; + + buildInMemoryExecutor(BASE_OPTIONS); + + expect(AgentClientAgentPort).toHaveBeenCalledWith({ + agentUrl: 'http://localhost:3310', + authSecret: 'test-secret', + schemaCache: expect.any(SchemaCache), + }); + }); + + it('passes an agentPort singleton to Runner', () => { + buildInMemoryExecutor(BASE_OPTIONS); + + expect(MockedRunner).toHaveBeenCalledWith( + expect.objectContaining({ agentPort: expect.any(Object) }), + ); + }); + + it('creates AiClient with the provided aiConfigurations', () => { + // eslint-disable-next-line @typescript-eslint/no-var-requires, global-require + const { AiClient } = require('@forestadmin/ai-proxy'); + + buildInMemoryExecutor(BASE_OPTIONS); + + expect(AiClient).toHaveBeenCalledWith({ + aiConfigurations: BASE_OPTIONS.aiConfigurations, + }); + }); + + it('passes pollingIntervalMs with default value of 5000', () => { + buildInMemoryExecutor(BASE_OPTIONS); + + expect(MockedRunner).toHaveBeenCalledWith(expect.objectContaining({ pollingIntervalMs: 5000 })); + }); + + it('passes custom pollingIntervalMs', () => { + buildInMemoryExecutor({ ...BASE_OPTIONS, pollingIntervalMs: 1000 }); + + expect(MockedRunner).toHaveBeenCalledWith(expect.objectContaining({ pollingIntervalMs: 1000 })); + }); + + it('passes secrets to Runner config', () => { + buildInMemoryExecutor(BASE_OPTIONS); + + expect(MockedRunner).toHaveBeenCalledWith( + expect.objectContaining({ + envSecret: BASE_OPTIONS.envSecret, + authSecret: BASE_OPTIONS.authSecret, + }), + ); + }); + + it('passes optional httpPort', () => { + buildInMemoryExecutor({ ...BASE_OPTIONS, httpPort: 3000 }); + + expect(MockedRunner).toHaveBeenCalledWith(expect.objectContaining({ httpPort: 3000 })); + }); +}); + +describe('buildDatabaseExecutor', () => { + // eslint-disable-next-line @typescript-eslint/no-var-requires, global-require + const { Sequelize: MockedSequelize } = require('sequelize'); + + const DB_OPTIONS = { + ...BASE_OPTIONS, + database: { uri: 'postgres://localhost/mydb', dialect: 'postgres' as const }, + }; + + it('returns a WorkflowExecutor backed by a Runner', () => { + const executor = buildDatabaseExecutor(DB_OPTIONS); + + expect(executor).toBeInstanceOf(Runner); + }); + + it('creates a DatabaseStore as runStore', () => { + buildDatabaseExecutor(DB_OPTIONS); + + expect(DatabaseStore).toHaveBeenCalledTimes(1); + expect(MockedRunner).toHaveBeenCalledWith( + expect.objectContaining({ runStore: expect.any(DatabaseStore) }), + ); + }); + + it('creates Sequelize with uri and passes remaining options through', () => { + buildDatabaseExecutor(DB_OPTIONS); + + expect(MockedSequelize).toHaveBeenCalledWith('postgres://localhost/mydb', { + dialect: 'postgres', + }); + }); + + it('passes extra Sequelize options (logging, pool, ssl, etc.)', () => { + buildDatabaseExecutor({ + ...BASE_OPTIONS, + database: { + uri: 'postgres://localhost/mydb', + dialect: 'postgres', + logging: false, + pool: { max: 10, min: 2 }, + }, + }); + + expect(MockedSequelize).toHaveBeenCalledWith('postgres://localhost/mydb', { + dialect: 'postgres', + logging: false, + pool: { max: 10, min: 2 }, + }); + }); + + it('creates Sequelize with options only when no uri is provided', () => { + buildDatabaseExecutor({ + ...BASE_OPTIONS, + database: { dialect: 'postgres', host: 'db.example.com', port: 5432, database: 'mydb' }, + }); + + expect(MockedSequelize).toHaveBeenCalledWith({ + dialect: 'postgres', + host: 'db.example.com', + port: 5432, + database: 'mydb', + }); + }); + + it('shares the same common dependencies as buildInMemoryExecutor', () => { + buildDatabaseExecutor(DB_OPTIONS); + + expect(ForestServerWorkflowPort).toHaveBeenCalledWith({ + envSecret: BASE_OPTIONS.envSecret, + forestServerUrl: 'https://api.forestadmin.com', + }); + expect(MockedRunner).toHaveBeenCalledWith( + expect.objectContaining({ agentPort: expect.any(Object) }), + ); + }); +}); diff --git a/packages/workflow-executor/test/executors/base-step-executor.test.ts b/packages/workflow-executor/test/executors/base-step-executor.test.ts index caabca5fed..45a4582c40 100644 --- a/packages/workflow-executor/test/executors/base-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/base-step-executor.test.ts @@ -17,6 +17,7 @@ import { StepPersistenceError, } from '../../src/errors'; import BaseStepExecutor from '../../src/executors/base-step-executor'; +import SchemaCache from '../../src/schema-cache'; import { StepType } from '../../src/types/step-definition'; /** Concrete subclass that exposes protected methods for testing. */ @@ -108,6 +109,18 @@ function makeContext(overrides: Partial = {}): ExecutionContex agentPort: {} as ExecutionContext['agentPort'], workflowPort: {} as ExecutionContext['workflowPort'], runStore: makeMockRunStore(), + user: { + id: 1, + email: 'test@example.com', + firstName: 'Test', + lastName: 'User', + team: 'admin', + renderingId: 1, + role: 'admin', + permissionLevel: 'admin', + tags: {}, + }, + schemaCache: new SchemaCache(), previousSteps: [], logger: makeMockLogger(), ...overrides, diff --git a/packages/workflow-executor/test/executors/condition-step-executor.test.ts b/packages/workflow-executor/test/executors/condition-step-executor.test.ts index 22520661ed..a8ecbabd60 100644 --- a/packages/workflow-executor/test/executors/condition-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/condition-step-executor.test.ts @@ -5,6 +5,7 @@ import type { ConditionStepDefinition } from '../../src/types/step-definition'; import type { ConditionStepOutcome } from '../../src/types/step-outcome'; import ConditionStepExecutor from '../../src/executors/condition-step-executor'; +import SchemaCache from '../../src/schema-cache'; import { StepType } from '../../src/types/step-definition'; function makeStep(overrides: Partial = {}): ConditionStepDefinition { @@ -55,6 +56,18 @@ function makeContext( agentPort: {} as ExecutionContext['agentPort'], workflowPort: {} as ExecutionContext['workflowPort'], runStore: makeMockRunStore(), + user: { + id: 1, + email: 'test@example.com', + firstName: 'Test', + lastName: 'User', + team: 'admin', + renderingId: 1, + role: 'admin', + permissionLevel: 'admin', + tags: {}, + }, + schemaCache: new SchemaCache(), previousSteps: [], logger: { error: jest.fn() }, ...overrides, @@ -211,7 +224,7 @@ describe('ConditionStepExecutor', () => { }); describe('no-match fallback', () => { - it('returns manual-decision when AI selects null', async () => { + it('returns error when AI selects null', async () => { const mockModel = makeMockModel({ option: null, reasoning: 'None apply', @@ -226,8 +239,10 @@ describe('ConditionStepExecutor', () => { const result = await executor.execute(); - expect(result.stepOutcome.status).toBe('manual-decision'); - expect(result.stepOutcome.error).toBeUndefined(); + expect(result.stepOutcome.status).toBe('error'); + expect(result.stepOutcome.error).toBe( + "The AI couldn't decide. Try rephrasing the step's prompt.", + ); expect((result.stepOutcome as ConditionStepOutcome).selectedOption).toBeUndefined(); expect(runStore.saveStepExecution).toHaveBeenCalledWith('run-1', { type: 'condition', diff --git a/packages/workflow-executor/test/executors/load-related-record-step-executor.test.ts b/packages/workflow-executor/test/executors/load-related-record-step-executor.test.ts index 536056f8a7..7d2f7ec461 100644 --- a/packages/workflow-executor/test/executors/load-related-record-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/load-related-record-step-executor.test.ts @@ -7,6 +7,7 @@ import type { RecordTaskStepDefinition } from '../../src/types/step-definition'; import type { LoadRelatedRecordStepExecutionData } from '../../src/types/step-execution-data'; import LoadRelatedRecordStepExecutor from '../../src/executors/load-related-record-step-executor'; +import SchemaCache from '../../src/schema-cache'; import { StepType } from '../../src/types/step-definition'; function makeStep(overrides: Partial = {}): RecordTaskStepDefinition { @@ -126,6 +127,18 @@ function makeContext( agentPort: makeMockAgentPort(), workflowPort: makeMockWorkflowPort(), runStore: makeMockRunStore(), + user: { + id: 1, + email: 'test@example.com', + firstName: 'Test', + lastName: 'User', + team: 'admin', + renderingId: 1, + role: 'admin', + permissionLevel: 'admin', + tags: {}, + }, + schemaCache: new SchemaCache(), previousSteps: [], logger: { error: jest.fn() }, ...overrides, @@ -167,12 +180,10 @@ describe('LoadRelatedRecordStepExecutor', () => { const result = await executor.execute(); expect(result.stepOutcome.status).toBe('success'); - expect(agentPort.getRelatedData).toHaveBeenCalledWith({ - collection: 'customers', - id: [42], - relation: 'order', - limit: 1, - }); + expect(agentPort.getRelatedData).toHaveBeenCalledWith( + { collection: 'customers', id: [42], relation: 'order', limit: 1 }, + expect.objectContaining({ id: 1 }), + ); expect(runStore.saveStepExecution).toHaveBeenCalledWith( 'run-1', expect.objectContaining({ @@ -274,12 +285,10 @@ describe('LoadRelatedRecordStepExecutor', () => { expect(bindTools.mock.calls[2][0][0].name).toBe('select-record-by-content'); // Fetches 50 candidates (HasMany) - expect(agentPort.getRelatedData).toHaveBeenCalledWith({ - collection: 'customers', - id: [42], - relation: 'address', - limit: 50, - }); + expect(agentPort.getRelatedData).toHaveBeenCalledWith( + { collection: 'customers', id: [42], relation: 'address', limit: 50 }, + expect.objectContaining({ id: 1 }), + ); expect(runStore.saveStepExecution).toHaveBeenCalledWith( 'run-1', @@ -553,12 +562,10 @@ describe('LoadRelatedRecordStepExecutor', () => { expect(result.stepOutcome.status).toBe('success'); // HasOne uses the same fetchFirstCandidate path as BelongsTo — limit: 1 - expect(agentPort.getRelatedData).toHaveBeenCalledWith({ - collection: 'customers', - id: [42], - relation: 'profile', - limit: 1, - }); + expect(agentPort.getRelatedData).toHaveBeenCalledWith( + { collection: 'customers', id: [42], relation: 'profile', limit: 1 }, + expect.objectContaining({ id: 1 }), + ); expect(runStore.saveStepExecution).toHaveBeenCalledWith( 'run-1', expect.objectContaining({ @@ -581,12 +588,10 @@ describe('LoadRelatedRecordStepExecutor', () => { const result = await executor.execute(); expect(result.stepOutcome.status).toBe('awaiting-input'); - expect(agentPort.getRelatedData).toHaveBeenCalledWith({ - collection: 'customers', - id: [42], - relation: 'order', - limit: 50, - }); + expect(agentPort.getRelatedData).toHaveBeenCalledWith( + { collection: 'customers', id: [42], relation: 'order', limit: 50 }, + expect.objectContaining({ id: 1 }), + ); // Single record → only select-relation AI call expect(mockModel.bindTools).toHaveBeenCalledTimes(1); expect(runStore.saveStepExecution).toHaveBeenCalledWith( @@ -1564,12 +1569,10 @@ describe('LoadRelatedRecordStepExecutor', () => { const result = await executor.execute(); expect(result.stepOutcome.status).toBe('success'); - expect(agentPort.getRelatedData).toHaveBeenCalledWith({ - collection: 'customers', - id: [42], - relation: 'order', - limit: 1, - }); + expect(agentPort.getRelatedData).toHaveBeenCalledWith( + { collection: 'customers', id: [42], relation: 'order', limit: 1 }, + expect.objectContaining({ id: 1 }), + ); }); }); diff --git a/packages/workflow-executor/test/executors/mcp-task-step-executor.test.ts b/packages/workflow-executor/test/executors/mcp-task-step-executor.test.ts index 81b74c5749..f23b45bd48 100644 --- a/packages/workflow-executor/test/executors/mcp-task-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/mcp-task-step-executor.test.ts @@ -8,6 +8,7 @@ import RemoteTool from '@forestadmin/ai-proxy/src/remote-tool'; import { StepStateError } from '../../src/errors'; import McpTaskStepExecutor from '../../src/executors/mcp-task-step-executor'; +import SchemaCache from '../../src/schema-cache'; import { StepType } from '../../src/types/step-definition'; // --------------------------------------------------------------------------- @@ -93,6 +94,18 @@ function makeContext( } as unknown as ExecutionContext['agentPort'], workflowPort: makeMockWorkflowPort(), runStore: makeMockRunStore(), + user: { + id: 1, + email: 'test@example.com', + firstName: 'Test', + lastName: 'User', + team: 'admin', + renderingId: 1, + role: 'admin', + permissionLevel: 'admin', + tags: {}, + }, + schemaCache: new SchemaCache(), previousSteps: [], logger: { error: jest.fn() }, ...overrides, diff --git a/packages/workflow-executor/test/executors/read-record-step-executor.test.ts b/packages/workflow-executor/test/executors/read-record-step-executor.test.ts index 7acf003c1a..2578b856d1 100644 --- a/packages/workflow-executor/test/executors/read-record-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/read-record-step-executor.test.ts @@ -7,6 +7,7 @@ import type { RecordTaskStepDefinition } from '../../src/types/step-definition'; import { NoRecordsError, RecordNotFoundError } from '../../src/errors'; import ReadRecordStepExecutor from '../../src/executors/read-record-step-executor'; +import SchemaCache from '../../src/schema-cache'; import { StepType } from '../../src/types/step-definition'; function makeStep(overrides: Partial = {}): RecordTaskStepDefinition { @@ -115,6 +116,18 @@ function makeContext( agentPort: makeMockAgentPort(), workflowPort: makeMockWorkflowPort(), runStore: makeMockRunStore(), + user: { + id: 1, + email: 'test@example.com', + firstName: 'Test', + lastName: 'User', + team: 'admin', + renderingId: 1, + role: 'admin', + permissionLevel: 'admin', + tags: {}, + }, + schemaCache: new SchemaCache(), previousSteps: [], logger: { error: jest.fn() }, ...overrides, @@ -208,11 +221,10 @@ describe('ReadRecordStepExecutor', () => { await executor.execute(); - expect(agentPort.getRecord).toHaveBeenCalledWith({ - collection: 'customers', - id: [42], - fields: ['name', 'email'], - }); + expect(agentPort.getRecord).toHaveBeenCalledWith( + { collection: 'customers', id: [42], fields: ['name', 'email'] }, + expect.objectContaining({ id: 1 }), + ); }); it('passes only resolved field names when some fields are unresolved', async () => { @@ -224,11 +236,10 @@ describe('ReadRecordStepExecutor', () => { await executor.execute(); - expect(agentPort.getRecord).toHaveBeenCalledWith({ - collection: 'customers', - id: [42], - fields: ['email'], - }); + expect(agentPort.getRecord).toHaveBeenCalledWith( + { collection: 'customers', id: [42], fields: ['email'] }, + expect.objectContaining({ id: 1 }), + ); }); it('returns error when no fields can be resolved', async () => { diff --git a/packages/workflow-executor/test/executors/safe-agent-port.test.ts b/packages/workflow-executor/test/executors/safe-agent-port.test.ts index 33ab53d072..2458801304 100644 --- a/packages/workflow-executor/test/executors/safe-agent-port.test.ts +++ b/packages/workflow-executor/test/executors/safe-agent-port.test.ts @@ -1,8 +1,21 @@ import type { AgentPort } from '../../src/ports/agent-port'; +import type { StepUser } from '../../src/types/execution'; import { AgentPortError, StepStateError, WorkflowExecutorError } from '../../src/errors'; import SafeAgentPort from '../../src/executors/safe-agent-port'; +const dummyUser: StepUser = { + id: 1, + email: 'test@example.com', + firstName: 'Test', + lastName: 'User', + team: 'admin', + renderingId: 1, + role: 'admin', + permissionLevel: 'admin', + tags: {}, +}; + function makeMockPort(overrides: Partial = {}): AgentPort { return { getRecord: jest @@ -24,7 +37,7 @@ describe('SafeAgentPort', () => { const port = makeMockPort({ getRecord: jest.fn().mockResolvedValue(expected) }); const safe = new SafeAgentPort(port); - const result = await safe.getRecord({ collection: 'customers', id: [1] }); + const result = await safe.getRecord({ collection: 'customers', id: [1] }, dummyUser); expect(result).toBe(expected); }); @@ -34,11 +47,14 @@ describe('SafeAgentPort', () => { const port = makeMockPort({ updateRecord: jest.fn().mockResolvedValue(expected) }); const safe = new SafeAgentPort(port); - const result = await safe.updateRecord({ - collection: 'customers', - id: [1], - values: { status: 'active' }, - }); + const result = await safe.updateRecord( + { + collection: 'customers', + id: [1], + values: { status: 'active' }, + }, + dummyUser, + ); expect(result).toBe(expected); }); @@ -48,12 +64,15 @@ describe('SafeAgentPort', () => { const port = makeMockPort({ getRelatedData: jest.fn().mockResolvedValue(expected) }); const safe = new SafeAgentPort(port); - const result = await safe.getRelatedData({ - collection: 'customers', - id: [1], - relation: 'orders', - limit: 10, - }); + const result = await safe.getRelatedData( + { + collection: 'customers', + id: [1], + relation: 'orders', + limit: 10, + }, + dummyUser, + ); expect(result).toBe(expected); }); @@ -63,7 +82,10 @@ describe('SafeAgentPort', () => { const port = makeMockPort({ executeAction: jest.fn().mockResolvedValue(expected) }); const safe = new SafeAgentPort(port); - const result = await safe.executeAction({ collection: 'customers', action: 'send-email' }); + const result = await safe.executeAction( + { collection: 'customers', action: 'send-email' }, + dummyUser, + ); expect(result).toBe(expected); }); @@ -76,7 +98,7 @@ describe('SafeAgentPort', () => { }); const safe = new SafeAgentPort(port); - await expect(safe.getRecord({ collection: 'customers', id: [1] })).rejects.toThrow( + await expect(safe.getRecord({ collection: 'customers', id: [1] }, dummyUser)).rejects.toThrow( AgentPortError, ); }); @@ -87,7 +109,7 @@ describe('SafeAgentPort', () => { }); const safe = new SafeAgentPort(port); - await expect(safe.getRecord({ collection: 'customers', id: [1] })).rejects.toThrow( + await expect(safe.getRecord({ collection: 'customers', id: [1] }, dummyUser)).rejects.toThrow( 'Agent port "getRecord" failed: DB connection lost', ); }); @@ -99,7 +121,7 @@ describe('SafeAgentPort', () => { const safe = new SafeAgentPort(port); await expect( - safe.updateRecord({ collection: 'customers', id: [1], values: {} }), + safe.updateRecord({ collection: 'customers', id: [1], values: {} }, dummyUser), ).rejects.toThrow('Agent port "updateRecord" failed: Timeout'); }); @@ -110,7 +132,10 @@ describe('SafeAgentPort', () => { const safe = new SafeAgentPort(port); await expect( - safe.getRelatedData({ collection: 'customers', id: [1], relation: 'orders', limit: 10 }), + safe.getRelatedData( + { collection: 'customers', id: [1], relation: 'orders', limit: 10 }, + dummyUser, + ), ).rejects.toThrow('Agent port "getRelatedData" failed: Network error'); }); @@ -121,7 +146,7 @@ describe('SafeAgentPort', () => { const safe = new SafeAgentPort(port); await expect( - safe.executeAction({ collection: 'customers', action: 'send-email' }), + safe.executeAction({ collection: 'customers', action: 'send-email' }, dummyUser), ).rejects.toThrow('Agent port "executeAction" failed: Action failed'); }); @@ -133,7 +158,7 @@ describe('SafeAgentPort', () => { let thrown: unknown; try { - await safe.getRecord({ collection: 'customers', id: [1] }); + await safe.getRecord({ collection: 'customers', id: [1] }, dummyUser); } catch (e) { thrown = e; } @@ -149,7 +174,9 @@ describe('SafeAgentPort', () => { const port = makeMockPort({ getRecord: jest.fn().mockRejectedValue(domainError) }); const safe = new SafeAgentPort(port); - await expect(safe.getRecord({ collection: 'customers', id: [1] })).rejects.toBe(domainError); + await expect(safe.getRecord({ collection: 'customers', id: [1] }, dummyUser)).rejects.toBe( + domainError, + ); }); it('rethrows WorkflowExecutorError subclass without wrapping in AgentPortError', async () => { @@ -160,7 +187,7 @@ describe('SafeAgentPort', () => { let thrown: unknown; try { - await safe.executeAction({ collection: 'customers', action: 'send-email' }); + await safe.executeAction({ collection: 'customers', action: 'send-email' }, dummyUser); } catch (e) { thrown = e; } diff --git a/packages/workflow-executor/test/executors/trigger-record-action-step-executor.test.ts b/packages/workflow-executor/test/executors/trigger-record-action-step-executor.test.ts index 6adc26d1ed..39ee40a146 100644 --- a/packages/workflow-executor/test/executors/trigger-record-action-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/trigger-record-action-step-executor.test.ts @@ -8,6 +8,7 @@ import type { TriggerRecordActionStepExecutionData } from '../../src/types/step- import { StepStateError } from '../../src/errors'; import TriggerRecordActionStepExecutor from '../../src/executors/trigger-record-action-step-executor'; +import SchemaCache from '../../src/schema-cache'; import { StepType } from '../../src/types/step-definition'; function makeStep(overrides: Partial = {}): RecordTaskStepDefinition { @@ -46,8 +47,12 @@ function makeCollectionSchema(overrides: Partial = {}): Collec { fieldName: 'status', displayName: 'Status', isRelationship: false }, ], actions: [ - { name: 'send-welcome-email', displayName: 'Send Welcome Email' }, - { name: 'archive', displayName: 'Archive Customer' }, + { + name: 'send-welcome-email', + displayName: 'Send Welcome Email', + endpoint: '/forest/actions/send-welcome-email', + }, + { name: 'archive', displayName: 'Archive Customer', endpoint: '/forest/actions/archive' }, ], ...overrides, }; @@ -110,6 +115,18 @@ function makeContext( agentPort: makeMockAgentPort(), workflowPort: makeMockWorkflowPort(), runStore: makeMockRunStore(), + user: { + id: 1, + email: 'test@example.com', + firstName: 'Test', + lastName: 'User', + team: 'admin', + renderingId: 1, + role: 'admin', + permissionLevel: 'admin', + tags: {}, + }, + schemaCache: new SchemaCache(), previousSteps: [], logger: { error: jest.fn() }, ...overrides, @@ -137,11 +154,10 @@ describe('TriggerRecordActionStepExecutor', () => { const result = await executor.execute(); expect(result.stepOutcome.status).toBe('success'); - expect(agentPort.executeAction).toHaveBeenCalledWith({ - collection: 'customers', - action: 'send-welcome-email', - id: [42], - }); + expect(agentPort.executeAction).toHaveBeenCalledWith( + { collection: 'customers', action: 'send-welcome-email', id: [42] }, + expect.objectContaining({ id: 1 }), + ); expect(runStore.saveStepExecution).toHaveBeenCalledWith( 'run-1', expect.objectContaining({ @@ -218,11 +234,10 @@ describe('TriggerRecordActionStepExecutor', () => { const result = await executor.execute(); expect(result.stepOutcome.status).toBe('success'); - expect(agentPort.executeAction).toHaveBeenCalledWith({ - collection: 'customers', - action: 'send-welcome-email', - id: [42], - }); + expect(agentPort.executeAction).toHaveBeenCalledWith( + { collection: 'customers', action: 'send-welcome-email', id: [42] }, + expect.objectContaining({ id: 1 }), + ); expect(runStore.saveStepExecution).toHaveBeenCalledWith( 'run-1', expect.objectContaining({ @@ -367,7 +382,9 @@ describe('TriggerRecordActionStepExecutor', () => { reasoning: 'hallucinated', }); const schema = makeCollectionSchema({ - actions: [{ name: 'archive', displayName: 'Archive Customer' }], + actions: [ + { name: 'archive', displayName: 'Archive Customer', endpoint: '/forest/actions/archive' }, + ], }); const runStore = makeMockRunStore(); const workflowPort = makeMockWorkflowPort({ customers: schema }); @@ -547,11 +564,10 @@ describe('TriggerRecordActionStepExecutor', () => { const result = await executor.execute(); expect(result.stepOutcome.status).toBe('success'); - expect(agentPort.executeAction).toHaveBeenCalledWith({ - collection: 'customers', - action: 'archive', - id: [42], - }); + expect(agentPort.executeAction).toHaveBeenCalledWith( + { collection: 'customers', action: 'archive', id: [42] }, + expect.objectContaining({ id: 1 }), + ); }); it('resolves action when AI returns technical name instead of displayName', async () => { @@ -562,7 +578,9 @@ describe('TriggerRecordActionStepExecutor', () => { reasoning: 'fallback to technical name', }); const schema = makeCollectionSchema({ - actions: [{ name: 'archive', displayName: 'Archive Customer' }], + actions: [ + { name: 'archive', displayName: 'Archive Customer', endpoint: '/forest/actions/archive' }, + ], }); const workflowPort = makeMockWorkflowPort({ customers: schema }); const context = makeContext({ @@ -576,11 +594,10 @@ describe('TriggerRecordActionStepExecutor', () => { const result = await executor.execute(); expect(result.stepOutcome.status).toBe('success'); - expect(agentPort.executeAction).toHaveBeenCalledWith({ - collection: 'customers', - action: 'archive', - id: [42], - }); + expect(agentPort.executeAction).toHaveBeenCalledWith( + { collection: 'customers', action: 'archive', id: [42] }, + expect.objectContaining({ id: 1 }), + ); }); }); @@ -596,7 +613,13 @@ describe('TriggerRecordActionStepExecutor', () => { const ordersSchema = makeCollectionSchema({ collectionName: 'orders', collectionDisplayName: 'Orders', - actions: [{ name: 'cancel-order', displayName: 'Cancel Order' }], + actions: [ + { + name: 'cancel-order', + displayName: 'Cancel Order', + endpoint: '/forest/actions/cancel-order', + }, + ], }); // First call: select-record, second call: select-action diff --git a/packages/workflow-executor/test/executors/update-record-step-executor.test.ts b/packages/workflow-executor/test/executors/update-record-step-executor.test.ts index 29ff8a4691..14377287ef 100644 --- a/packages/workflow-executor/test/executors/update-record-step-executor.test.ts +++ b/packages/workflow-executor/test/executors/update-record-step-executor.test.ts @@ -8,6 +8,7 @@ import type { UpdateRecordStepExecutionData } from '../../src/types/step-executi import { StepStateError } from '../../src/errors'; import UpdateRecordStepExecutor from '../../src/executors/update-record-step-executor'; +import SchemaCache from '../../src/schema-cache'; import { StepType } from '../../src/types/step-definition'; function makeStep(overrides: Partial = {}): RecordTaskStepDefinition { @@ -116,6 +117,18 @@ function makeContext( agentPort: makeMockAgentPort(), workflowPort: makeMockWorkflowPort(), runStore: makeMockRunStore(), + user: { + id: 1, + email: 'test@example.com', + firstName: 'Test', + lastName: 'User', + team: 'admin', + renderingId: 1, + role: 'admin', + permissionLevel: 'admin', + tags: {}, + }, + schemaCache: new SchemaCache(), previousSteps: [], logger: { error: jest.fn() }, ...overrides, @@ -144,11 +157,10 @@ describe('UpdateRecordStepExecutor', () => { const result = await executor.execute(); expect(result.stepOutcome.status).toBe('success'); - expect(agentPort.updateRecord).toHaveBeenCalledWith({ - collection: 'customers', - id: [42], - values: { status: 'active' }, - }); + expect(agentPort.updateRecord).toHaveBeenCalledWith( + { collection: 'customers', id: [42], values: { status: 'active' } }, + expect.objectContaining({ id: 1 }), + ); expect(runStore.saveStepExecution).toHaveBeenCalledWith( 'run-1', expect.objectContaining({ @@ -221,11 +233,10 @@ describe('UpdateRecordStepExecutor', () => { const result = await executor.execute(); expect(result.stepOutcome.status).toBe('success'); - expect(agentPort.updateRecord).toHaveBeenCalledWith({ - collection: 'customers', - id: [42], - values: { status: 'active' }, - }); + expect(agentPort.updateRecord).toHaveBeenCalledWith( + { collection: 'customers', id: [42], values: { status: 'active' } }, + expect.objectContaining({ id: 1 }), + ); expect(runStore.saveStepExecution).toHaveBeenCalledWith( 'run-1', expect.objectContaining({ @@ -730,11 +741,10 @@ describe('UpdateRecordStepExecutor', () => { const result = await executor.execute(); expect(result.stepOutcome.status).toBe('success'); - expect(agentPort.updateRecord).toHaveBeenCalledWith({ - collection: 'customers', - id: [42], - values: { status: 'active' }, - }); + expect(agentPort.updateRecord).toHaveBeenCalledWith( + { collection: 'customers', id: [42], values: { status: 'active' } }, + expect.objectContaining({ id: 1 }), + ); }); }); diff --git a/packages/workflow-executor/test/http/executor-http-server.test.ts b/packages/workflow-executor/test/http/executor-http-server.test.ts index 17aabf8813..407c11ae54 100644 --- a/packages/workflow-executor/test/http/executor-http-server.test.ts +++ b/packages/workflow-executor/test/http/executor-http-server.test.ts @@ -8,6 +8,7 @@ import { InvalidPendingDataError, PendingDataNotFoundError, RunNotFoundError, + UserMismatchError, } from '../../src/errors'; import ExecutorHttpServer from '../../src/http/executor-http-server'; @@ -23,7 +24,6 @@ function createMockRunner(overrides: Partial = {}): Runner { stop: jest.fn().mockResolvedValue(undefined), triggerPoll: jest.fn().mockResolvedValue(undefined), getRunStepExecutions: jest.fn().mockResolvedValue([]), - patchPendingData: jest.fn().mockResolvedValue(undefined), ...overrides, } as unknown as Runner; } @@ -69,7 +69,7 @@ describe('ExecutorHttpServer', () => { it('should return 401 when token is signed with wrong secret', async () => { const server = createServer(); - const token = signToken({ id: 'user-1' }, 'wrong-secret'); + const token = signToken({ id: 1 }, 'wrong-secret'); const response = await request(server.callback) .get('/runs/run-1') @@ -81,7 +81,7 @@ describe('ExecutorHttpServer', () => { it('should return 401 when token is expired', async () => { const server = createServer(); - const token = signToken({ id: 'user-1' }, AUTH_SECRET, { expiresIn: '0s' }); + const token = signToken({ id: 1 }, AUTH_SECRET, { expiresIn: '0s' }); // Small delay to ensure token is expired await new Promise(resolve => { @@ -109,7 +109,7 @@ describe('ExecutorHttpServer', () => { it('should accept valid token in Authorization header', async () => { const server = createServer(); - const token = signToken({ id: 'user-1' }); + const token = signToken({ id: 1 }); const response = await request(server.callback) .get('/runs/run-1') @@ -120,7 +120,7 @@ describe('ExecutorHttpServer', () => { it('should accept valid token in forest_session_token cookie', async () => { const server = createServer(); - const token = signToken({ id: 'user-1' }); + const token = signToken({ id: 1 }); const response = await request(server.callback) .get('/runs/run-1') @@ -162,7 +162,7 @@ describe('ExecutorHttpServer', () => { hasRunAccess: jest.fn().mockResolvedValue(false), }); const server = createServer({ workflowPort }); - const token = signToken({ id: 'user-1' }); + const token = signToken({ id: 1 }); const response = await request(server.callback) .get('/runs/run-1') @@ -172,45 +172,36 @@ describe('ExecutorHttpServer', () => { expect(response.body).toEqual({ error: 'Forbidden' }); }); - it('returns 403 when hasRunAccess returns false on POST /runs/:runId/trigger', async () => { - const workflowPort = createMockWorkflowPort({ - hasRunAccess: jest.fn().mockResolvedValue(false), - }); - const server = createServer({ workflowPort }); - const token = signToken({ id: 'user-1' }); - - const response = await request(server.callback) - .post('/runs/run-1/trigger') - .set('Authorization', `Bearer ${token}`); - - expect(response.status).toBe(403); - expect(response.body).toEqual({ error: 'Forbidden' }); - }); - - it('calls hasRunAccess with the correct runId and userToken', async () => { + it('calls hasRunAccess with the correct runId and decoded user', async () => { const workflowPort = createMockWorkflowPort(); const server = createServer({ workflowPort }); - const token = signToken({ id: 'user-1' }); + const token = signToken({ id: 1 }); const response = await request(server.callback) .get('/runs/run-42') .set('Authorization', `Bearer ${token}`); expect(response.status).toBe(200); - expect(workflowPort.hasRunAccess).toHaveBeenCalledWith('run-42', token); + expect(workflowPort.hasRunAccess).toHaveBeenCalledWith( + 'run-42', + expect.objectContaining({ id: 1 }), + ); }); - it('calls hasRunAccess with token from cookie', async () => { + it('calls hasRunAccess with decoded user from cookie token', async () => { const workflowPort = createMockWorkflowPort(); const server = createServer({ workflowPort }); - const token = signToken({ id: 'user-1' }); + const token = signToken({ id: 1 }); const response = await request(server.callback) .get('/runs/run-cookie') .set('Cookie', `forest_session_token=${token}`); expect(response.status).toBe(200); - expect(workflowPort.hasRunAccess).toHaveBeenCalledWith('run-cookie', token); + expect(workflowPort.hasRunAccess).toHaveBeenCalledWith( + 'run-cookie', + expect.objectContaining({ id: 1 }), + ); }); it('returns 503 when hasRunAccess throws', async () => { @@ -219,7 +210,7 @@ describe('ExecutorHttpServer', () => { hasRunAccess: jest.fn().mockRejectedValue(new Error('orchestrator down')), }); const server = createServer({ workflowPort, logger }); - const token = signToken({ id: 'user-1' }); + const token = signToken({ id: 1 }); const response = await request(server.callback) .get('/runs/run-1') @@ -239,43 +230,12 @@ describe('ExecutorHttpServer', () => { hasRunAccess: jest.fn().mockResolvedValue(false), }); const server = createServer({ runner, workflowPort }); - const token = signToken({ id: 'user-1' }); + const token = signToken({ id: 1 }); await request(server.callback).get('/runs/run-1').set('Authorization', `Bearer ${token}`); expect(runner.getRunStepExecutions).not.toHaveBeenCalled(); }); - - it('does not call triggerPoll when hasRunAccess returns false', async () => { - const runner = createMockRunner(); - const workflowPort = createMockWorkflowPort({ - hasRunAccess: jest.fn().mockResolvedValue(false), - }); - const server = createServer({ runner, workflowPort }); - const token = signToken({ id: 'user-1' }); - - await request(server.callback) - .post('/runs/run-1/trigger') - .set('Authorization', `Bearer ${token}`); - - expect(runner.triggerPoll).not.toHaveBeenCalled(); - }); - - it('returns 403 when hasRunAccess returns false on PATCH /runs/:runId/steps/:stepIndex/pending-data', async () => { - const workflowPort = createMockWorkflowPort({ - hasRunAccess: jest.fn().mockResolvedValue(false), - }); - const server = createServer({ workflowPort }); - const token = signToken({ id: 'user-1' }); - - const response = await request(server.callback) - .patch('/runs/run-1/steps/0/pending-data') - .set('Authorization', `Bearer ${token}`) - .send({ userConfirmed: true }); - - expect(response.status).toBe(403); - expect(response.body).toEqual({ error: 'Forbidden' }); - }); }); describe('GET /runs/:runId', () => { @@ -287,7 +247,7 @@ describe('ExecutorHttpServer', () => { }); const server = createServer({ runner }); - const token = signToken({ id: 'user-1' }); + const token = signToken({ id: 1 }); const response = await request(server.callback) .get('/runs/run-1') @@ -304,7 +264,7 @@ describe('ExecutorHttpServer', () => { }); const server = createServer({ runner }); - const token = signToken({ id: 'user-1' }); + const token = signToken({ id: 1 }); const response = await request(server.callback) .get('/runs/run-1') @@ -316,10 +276,10 @@ describe('ExecutorHttpServer', () => { }); describe('POST /runs/:runId/trigger', () => { - it('should call runner.triggerPoll with the runId', async () => { + it('should call runner.triggerPoll with runId and options', async () => { const runner = createMockRunner(); const server = createServer({ runner }); - const token = signToken({ id: 'user-1' }); + const token = signToken({ id: 1 }); const response = await request(server.callback) .post('/runs/run-1/trigger') @@ -327,120 +287,121 @@ describe('ExecutorHttpServer', () => { expect(response.status).toBe(200); expect(response.body).toEqual({ triggered: true }); - expect(runner.triggerPoll).toHaveBeenCalledWith('run-1'); - }); - - it('returns 404 when triggerPoll rejects with RunNotFoundError', async () => { - const runner = createMockRunner({ - triggerPoll: jest.fn().mockRejectedValue(new RunNotFoundError('run-1')), + expect(runner.triggerPoll).toHaveBeenCalledWith('run-1', { + pendingData: undefined, + bearerUserId: 1, }); + }); + it('returns 400 when token has no numeric id', async () => { + const runner = createMockRunner(); const server = createServer({ runner }); - const token = signToken({ id: 'user-1' }); + const token = signToken({ email: 'no-id@example.com' }); const response = await request(server.callback) .post('/runs/run-1/trigger') .set('Authorization', `Bearer ${token}`); - expect(response.status).toBe(404); - expect(response.body).toEqual({ error: 'Run not found or unavailable' }); + expect(response.status).toBe(400); + expect(response.body).toEqual({ error: 'Missing or invalid user id in token' }); + expect(runner.triggerPoll).not.toHaveBeenCalled(); }); - it('returns 500 when triggerPoll rejects with an unexpected error', async () => { + it('passes pendingData from request body to runner.triggerPoll', async () => { + const runner = createMockRunner(); + const server = createServer({ runner }); + const token = signToken({ id: 1 }); + + const response = await request(server.callback) + .post('/runs/run-1/trigger') + .set('Authorization', `Bearer ${token}`) + .send({ pendingData: { userConfirmed: true } }); + + expect(response.status).toBe(200); + expect(runner.triggerPoll).toHaveBeenCalledWith('run-1', { + pendingData: { userConfirmed: true }, + bearerUserId: 1, + }); + }); + + it('returns 404 when triggerPoll rejects with RunNotFoundError', async () => { const runner = createMockRunner({ - triggerPoll: jest.fn().mockRejectedValue(new Error('unexpected')), + triggerPoll: jest.fn().mockRejectedValue(new RunNotFoundError('run-1')), }); const server = createServer({ runner }); - const token = signToken({ id: 'user-1' }); + const token = signToken({ id: 1 }); const response = await request(server.callback) .post('/runs/run-1/trigger') .set('Authorization', `Bearer ${token}`); - expect(response.status).toBe(500); - expect(response.body).toEqual({ error: 'Internal server error' }); + expect(response.status).toBe(404); + expect(response.body).toEqual({ error: 'Run not found or unavailable' }); }); - }); - describe('PATCH /runs/:runId/steps/:stepIndex/pending-data', () => { - it('returns 204 when patchPendingData succeeds', async () => { + it('returns 403 when triggerPoll rejects with UserMismatchError', async () => { const runner = createMockRunner({ - patchPendingData: jest.fn().mockResolvedValue(undefined), + triggerPoll: jest.fn().mockRejectedValue(new UserMismatchError('run-1')), }); + const server = createServer({ runner }); - const token = signToken({ id: 'user-1' }); + const token = signToken({ id: 1 }); const response = await request(server.callback) - .patch('/runs/run-1/steps/2/pending-data') - .set('Authorization', `Bearer ${token}`) - .send({ userConfirmed: true }); + .post('/runs/run-1/trigger') + .set('Authorization', `Bearer ${token}`); - expect(response.status).toBe(204); - expect(runner.patchPendingData).toHaveBeenCalledWith('run-1', 2, { userConfirmed: true }); + expect(response.status).toBe(403); + expect(response.body).toEqual({ error: 'Forbidden' }); }); - it('returns 404 when patchPendingData throws PendingDataNotFoundError', async () => { + it('returns 404 when triggerPoll rejects with PendingDataNotFoundError', async () => { const runner = createMockRunner({ - patchPendingData: jest.fn().mockRejectedValue(new PendingDataNotFoundError('run-1', 0)), + triggerPoll: jest.fn().mockRejectedValue(new PendingDataNotFoundError('run-1', 0)), }); + const server = createServer({ runner }); - const token = signToken({ id: 'user-1' }); + const token = signToken({ id: 1 }); const response = await request(server.callback) - .patch('/runs/run-1/steps/0/pending-data') - .set('Authorization', `Bearer ${token}`) - .send({ userConfirmed: true }); + .post('/runs/run-1/trigger') + .set('Authorization', `Bearer ${token}`); expect(response.status).toBe(404); expect(response.body).toEqual({ error: 'Step execution not found or has no pending data' }); }); - it('returns 400 with details when patchPendingData throws InvalidPendingDataError', async () => { + it('returns 400 when triggerPoll rejects with InvalidPendingDataError', async () => { const issues = [ { path: ['userConfirmed'], message: 'Expected boolean', code: 'invalid_type' }, ]; const runner = createMockRunner({ - patchPendingData: jest.fn().mockRejectedValue(new InvalidPendingDataError(issues)), + triggerPoll: jest.fn().mockRejectedValue(new InvalidPendingDataError(issues)), }); - const server = createServer({ runner }); - const token = signToken({ id: 'user-1' }); - - const response = await request(server.callback) - .patch('/runs/run-1/steps/0/pending-data') - .set('Authorization', `Bearer ${token}`) - .send({ userConfirmed: 'yes' }); - - expect(response.status).toBe(400); - expect(response.body).toEqual({ error: 'Invalid request body', details: issues }); - }); - it('returns 400 when stepIndex is not a valid integer', async () => { - const runner = createMockRunner(); const server = createServer({ runner }); - const token = signToken({ id: 'user-1' }); + const token = signToken({ id: 1 }); const response = await request(server.callback) - .patch('/runs/run-1/steps/abc/pending-data') - .set('Authorization', `Bearer ${token}`) - .send({ userConfirmed: true }); + .post('/runs/run-1/trigger') + .set('Authorization', `Bearer ${token}`); expect(response.status).toBe(400); - expect(response.body).toEqual({ error: 'Invalid stepIndex' }); - expect(runner.patchPendingData).not.toHaveBeenCalled(); + expect(response.body).toEqual({ error: 'Invalid request body', details: issues }); }); - it('returns 500 when patchPendingData throws an unexpected error', async () => { + it('returns 500 when triggerPoll rejects with an unexpected error', async () => { const runner = createMockRunner({ - patchPendingData: jest.fn().mockRejectedValue(new Error('disk full')), + triggerPoll: jest.fn().mockRejectedValue(new Error('unexpected')), }); + const server = createServer({ runner }); - const token = signToken({ id: 'user-1' }); + const token = signToken({ id: 1 }); const response = await request(server.callback) - .patch('/runs/run-1/steps/0/pending-data') - .set('Authorization', `Bearer ${token}`) - .send({ userConfirmed: true }); + .post('/runs/run-1/trigger') + .set('Authorization', `Bearer ${token}`); expect(response.status).toBe(500); expect(response.body).toEqual({ error: 'Internal server error' }); diff --git a/packages/workflow-executor/test/integration/workflow-execution.test.ts b/packages/workflow-executor/test/integration/workflow-execution.test.ts new file mode 100644 index 0000000000..09c5ec5536 --- /dev/null +++ b/packages/workflow-executor/test/integration/workflow-execution.test.ts @@ -0,0 +1,783 @@ +import type { AgentPort } from '../../src/ports/agent-port'; +import type { WorkflowPort } from '../../src/ports/workflow-port'; +import type { PendingStepExecution, StepUser } from '../../src/types/execution'; +import type { CollectionSchema } from '../../src/types/record'; +import type { AiClient, BaseChatModel, RemoteTool } from '@forestadmin/ai-proxy'; + +import jsonwebtoken from 'jsonwebtoken'; +import request from 'supertest'; +import { z } from 'zod'; + +import ExecutorHttpServer from '../../src/http/executor-http-server'; +import Runner from '../../src/runner'; +import SchemaCache from '../../src/schema-cache'; +import InMemoryStore from '../../src/stores/in-memory-store'; +import { StepType } from '../../src/types/step-definition'; + +// --------------------------------------------------------------------------- +// Constants +// --------------------------------------------------------------------------- + +const AUTH_SECRET = 'test-auth-secret'; +const ENV_SECRET = 'a'.repeat(64); + +const STEP_USER: StepUser = { + id: 1, + email: 'john@example.com', + firstName: 'John', + lastName: 'Doe', + team: 'Operations', + renderingId: 1, + role: 'admin', + permissionLevel: 'admin', + tags: {}, +}; + +const COLLECTION_SCHEMA: CollectionSchema = { + collectionName: 'customers', + collectionDisplayName: 'Customers', + primaryKeyFields: ['id'], + fields: [ + { fieldName: 'id', displayName: 'Id', isRelationship: false }, + { fieldName: 'email', displayName: 'Email', isRelationship: false }, + { fieldName: 'name', displayName: 'Name', isRelationship: false }, + ], + actions: [], +}; + +const COLLECTION_SCHEMA_WITH_STATUS: CollectionSchema = { + collectionName: 'customers', + collectionDisplayName: 'Customers', + primaryKeyFields: ['id'], + fields: [ + { fieldName: 'id', displayName: 'Id', isRelationship: false }, + { fieldName: 'status', displayName: 'Status', isRelationship: false }, + ], + actions: [], +}; + +const COLLECTION_SCHEMA_WITH_ACTIONS: CollectionSchema = { + collectionName: 'customers', + collectionDisplayName: 'Customers', + primaryKeyFields: ['id'], + fields: [{ fieldName: 'id', displayName: 'Id', isRelationship: false }], + actions: [ + { name: 'send_email', displayName: 'Send Email', endpoint: '/forest/actions/send-email' }, + ], +}; + +const COLLECTION_SCHEMA_WITH_RELATION: CollectionSchema = { + collectionName: 'customers', + collectionDisplayName: 'Customers', + primaryKeyFields: ['id'], + fields: [ + { fieldName: 'id', displayName: 'Id', isRelationship: false }, + { + fieldName: 'order', + displayName: 'Order', + isRelationship: true, + relationType: 'BelongsTo', + relatedCollectionName: 'orders', + }, + ], + actions: [], +}; + +const ORDERS_SCHEMA: CollectionSchema = { + collectionName: 'orders', + collectionDisplayName: 'Orders', + primaryKeyFields: ['id'], + fields: [ + { fieldName: 'id', displayName: 'Id', isRelationship: false }, + { fieldName: 'total', displayName: 'Total', isRelationship: false }, + ], + actions: [], +}; + +const BASE_RECORD_REF = { collectionName: 'customers', recordId: [42], stepIndex: 0 }; + +// --------------------------------------------------------------------------- +// Mock builders +// --------------------------------------------------------------------------- + +function signToken(payload: object) { + return jsonwebtoken.sign(payload, AUTH_SECRET, { expiresIn: '1h' }); +} + +function createMockModel(toolCallArgs: Record): BaseChatModel { + const invoke = jest.fn().mockResolvedValue({ + tool_calls: [{ name: 'read-selected-record-fields', args: toolCallArgs }], + }); + + return { invoke, bindTools: jest.fn().mockReturnThis() } as unknown as BaseChatModel; +} + +function createSequentialMockModel( + ...responses: Array<{ name: string; args: Record }> +): BaseChatModel { + const invoke = jest.fn(); + + for (const resp of responses) { + invoke.mockResolvedValueOnce({ tool_calls: [{ name: resp.name, args: resp.args }] }); + } + + return { invoke, bindTools: jest.fn().mockReturnThis() } as unknown as BaseChatModel; +} + +function createMockAiClient(model: BaseChatModel): AiClient { + return { + getModel: jest.fn().mockReturnValue(model), + loadRemoteTools: jest.fn().mockResolvedValue([]), + closeConnections: jest.fn().mockResolvedValue(undefined), + } as unknown as AiClient; +} + +function createMockWorkflowPort(overrides: Partial = {}): jest.Mocked { + return { + getPendingStepExecutions: jest.fn().mockResolvedValue([]), + getPendingStepExecutionsForRun: jest.fn().mockResolvedValue(null), + updateStepExecution: jest.fn().mockResolvedValue(undefined), + getCollectionSchema: jest.fn().mockResolvedValue(COLLECTION_SCHEMA), + getMcpServerConfigs: jest.fn().mockResolvedValue([]), + hasRunAccess: jest.fn().mockResolvedValue(true), + ...overrides, + } as jest.Mocked; +} + +function createMockAgentPort(): jest.Mocked { + return { + getRecord: jest.fn().mockResolvedValue({ + collectionName: 'customers', + recordId: [42], + values: { id: 42, email: 'john@example.com', name: 'John Doe' }, + }), + updateRecord: jest.fn().mockResolvedValue({ + collectionName: 'customers', + recordId: [42], + values: { id: 42, status: 'active' }, + }), + getRelatedData: jest.fn().mockResolvedValue([]), + executeAction: jest.fn().mockResolvedValue(undefined), + } as jest.Mocked; +} + +// --------------------------------------------------------------------------- +// Integration setup +// --------------------------------------------------------------------------- + +function createIntegrationSetup(overrides?: { + workflowPort?: jest.Mocked; + model?: BaseChatModel; + agentPort?: jest.Mocked; + aiClient?: AiClient; + pollingIntervalMs?: number; +}) { + const model = overrides?.model ?? createMockModel({ fieldNames: ['Email'] }); + const aiClient = overrides?.aiClient ?? createMockAiClient(model); + const workflowPort = overrides?.workflowPort ?? createMockWorkflowPort(); + const agentPort = overrides?.agentPort ?? createMockAgentPort(); + const runStore = new InMemoryStore(); + const schemaCache = new SchemaCache(); + + const runner = new Runner({ + agentPort, + workflowPort, + runStore, + schemaCache, + aiClient, + pollingIntervalMs: overrides?.pollingIntervalMs ?? 60_000, + envSecret: ENV_SECRET, + authSecret: AUTH_SECRET, + }); + + const server = new ExecutorHttpServer({ + port: 0, + runner, + authSecret: AUTH_SECRET, + workflowPort, + }); + + return { runner, server, workflowPort, agentPort, runStore, aiClient, model }; +} + +function buildPendingStep( + overrides: Partial & Pick, +): PendingStepExecution { + return { + runId: 'run-1', + stepId: 'step-1', + stepIndex: 0, + baseRecordRef: BASE_RECORD_REF, + previousSteps: [], + user: STEP_USER, + ...overrides, + }; +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +describe('workflow execution (integration)', () => { + it('read-record happy path: trigger → AI selects field → read record → success', async () => { + const workflowPort = createMockWorkflowPort({ + getPendingStepExecutionsForRun: jest.fn().mockResolvedValue({ + runId: 'run-1', + stepId: 'step-1', + stepIndex: 0, + baseRecordRef: { collectionName: 'customers', recordId: [42], stepIndex: 0 }, + stepDefinition: { type: StepType.ReadRecord, prompt: 'Read the customer email' }, + previousSteps: [], + user: STEP_USER, + }), + }); + + const { server, agentPort, runStore } = createIntegrationSetup({ workflowPort }); + await runStore.init(); + + const token = signToken({ id: STEP_USER.id }); + + // Act — the front triggers the step + const response = await request(server.callback) + .post('/runs/run-1/trigger') + .set('Authorization', `Bearer ${token}`) + .send(); + + // Assert — HTTP response + expect(response.status).toBe(200); + expect(response.body).toEqual({ triggered: true }); + + // Assert — orchestrator was notified with success + expect(workflowPort.updateStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + type: 'record-task', + status: 'success', + stepId: 'step-1', + stepIndex: 0, + }), + ); + + // Assert — agent was called to read the record + expect(agentPort.getRecord).toHaveBeenCalledWith( + expect.objectContaining({ + collection: 'customers', + id: [42], + fields: ['email'], + }), + expect.objectContaining({ id: STEP_USER.id }), + ); + + // Assert — step data was saved in the RunStore + const steps = await runStore.getStepExecutions('run-1'); + expect(steps).toHaveLength(1); + expect(steps[0]).toEqual( + expect.objectContaining({ + type: 'read-record', + stepIndex: 0, + executionResult: { + fields: [{ value: 'john@example.com', name: 'email', displayName: 'Email' }], + }, + }), + ); + }); + + // ------------------------------------------------------------------------- + // 1. Condition step: AI chooses an option → success + // ------------------------------------------------------------------------- + + it('condition: AI chooses an option → success', async () => { + const model = createMockModel({ + option: 'Yes', + reasoning: 'Customer is active', + question: 'Is active?', + }); + + const step = buildPendingStep({ + stepDefinition: { + type: StepType.Condition, + options: ['Yes', 'No'], + prompt: 'Is the customer active?', + }, + }); + + const workflowPort = createMockWorkflowPort({ + getPendingStepExecutionsForRun: jest.fn().mockResolvedValue(step), + }); + + const { server, runStore } = createIntegrationSetup({ workflowPort, model }); + await runStore.init(); + + const token = signToken({ id: STEP_USER.id }); + + const response = await request(server.callback) + .post('/runs/run-1/trigger') + .set('Authorization', `Bearer ${token}`) + .send(); + + expect(response.status).toBe(200); + + expect(workflowPort.updateStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + type: 'condition', + status: 'success', + stepId: 'step-1', + stepIndex: 0, + selectedOption: 'Yes', + }), + ); + }); + + // ------------------------------------------------------------------------- + // 2. Update-record: awaiting-input → confirm → success + // ------------------------------------------------------------------------- + + it('update-record: awaiting-input → confirm → success', async () => { + const model = createMockModel({ + fieldName: 'Status', + value: 'active', + reasoning: 'update status', + }); + + const step = buildPendingStep({ + stepDefinition: { type: StepType.UpdateRecord, prompt: 'Update the status' }, + }); + + const workflowPort = createMockWorkflowPort({ + getPendingStepExecutionsForRun: jest.fn().mockResolvedValue(step), + getCollectionSchema: jest.fn().mockResolvedValue(COLLECTION_SCHEMA_WITH_STATUS), + }); + + const { server, agentPort, runStore } = createIntegrationSetup({ workflowPort, model }); + await runStore.init(); + + const token = signToken({ id: STEP_USER.id }); + + // 1st trigger → awaiting-input + const res1 = await request(server.callback) + .post('/runs/run-1/trigger') + .set('Authorization', `Bearer ${token}`) + .send(); + + expect(res1.status).toBe(200); + expect(workflowPort.updateStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ type: 'record-task', status: 'awaiting-input' }), + ); + + // 2nd trigger with userConfirmed: true → success + const res2 = await request(server.callback) + .post('/runs/run-1/trigger') + .set('Authorization', `Bearer ${token}`) + .send({ pendingData: { userConfirmed: true } }); + + expect(res2.status).toBe(200); + expect(agentPort.updateRecord).toHaveBeenCalledWith( + expect.objectContaining({ + collection: 'customers', + id: [42], + values: { status: 'active' }, + }), + expect.objectContaining({ id: STEP_USER.id }), + ); + expect(workflowPort.updateStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ type: 'record-task', status: 'success' }), + ); + }); + + // ------------------------------------------------------------------------- + // 3. Trigger-action: awaiting-input → confirm → success + // ------------------------------------------------------------------------- + + it('trigger-action: awaiting-input → confirm → success', async () => { + const model = createMockModel({ + actionName: 'Send Email', + reasoning: 'send email', + }); + + const step = buildPendingStep({ + stepDefinition: { type: StepType.TriggerAction, prompt: 'Send the email' }, + }); + + const workflowPort = createMockWorkflowPort({ + getPendingStepExecutionsForRun: jest.fn().mockResolvedValue(step), + getCollectionSchema: jest.fn().mockResolvedValue(COLLECTION_SCHEMA_WITH_ACTIONS), + }); + + const { server, agentPort, runStore } = createIntegrationSetup({ workflowPort, model }); + await runStore.init(); + + const token = signToken({ id: STEP_USER.id }); + + // 1st trigger → awaiting-input + const res1 = await request(server.callback) + .post('/runs/run-1/trigger') + .set('Authorization', `Bearer ${token}`) + .send(); + + expect(res1.status).toBe(200); + expect(workflowPort.updateStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ type: 'record-task', status: 'awaiting-input' }), + ); + + // 2nd trigger with userConfirmed: true → success + const res2 = await request(server.callback) + .post('/runs/run-1/trigger') + .set('Authorization', `Bearer ${token}`) + .send({ pendingData: { userConfirmed: true } }); + + expect(res2.status).toBe(200); + expect(agentPort.executeAction).toHaveBeenCalledWith( + expect.objectContaining({ + collection: 'customers', + action: 'send_email', + id: [42], + }), + expect.objectContaining({ id: STEP_USER.id }), + ); + expect(workflowPort.updateStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ type: 'record-task', status: 'success' }), + ); + }); + + // ------------------------------------------------------------------------- + // 4. Load-related-record: BelongsTo → awaiting-input → confirm → success + // ------------------------------------------------------------------------- + + it('load-related-record: BelongsTo → awaiting-input → confirm → success', async () => { + const model = createMockModel({ + relationName: 'Order', + reasoning: 'load order', + }); + + const step = buildPendingStep({ + stepDefinition: { type: StepType.LoadRelatedRecord, prompt: 'Load the order' }, + }); + + const workflowPort = createMockWorkflowPort({ + getPendingStepExecutionsForRun: jest.fn().mockResolvedValue(step), + getCollectionSchema: jest.fn().mockImplementation(async (collectionName: string) => { + if (collectionName === 'orders') return ORDERS_SCHEMA; + + return COLLECTION_SCHEMA_WITH_RELATION; + }), + }); + + const agentPort = createMockAgentPort(); + agentPort.getRelatedData.mockResolvedValue([ + { collectionName: 'orders', recordId: [99], values: { id: 99, total: 100 } }, + ]); + + const { server, runStore } = createIntegrationSetup({ + workflowPort, + model, + agentPort, + }); + await runStore.init(); + + const token = signToken({ id: STEP_USER.id }); + + // 1st trigger → awaiting-input + const res1 = await request(server.callback) + .post('/runs/run-1/trigger') + .set('Authorization', `Bearer ${token}`) + .send(); + + expect(res1.status).toBe(200); + expect(workflowPort.updateStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ type: 'record-task', status: 'awaiting-input' }), + ); + + // 2nd trigger with userConfirmed: true → success + const res2 = await request(server.callback) + .post('/runs/run-1/trigger') + .set('Authorization', `Bearer ${token}`) + .send({ pendingData: { userConfirmed: true } }); + + expect(res2.status).toBe(200); + expect(workflowPort.updateStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ type: 'record-task', status: 'success' }), + ); + + const steps = await runStore.getStepExecutions('run-1'); + expect(steps).toHaveLength(1); + expect(steps[0]).toEqual( + expect.objectContaining({ + type: 'load-related-record', + executionResult: { + relation: { name: 'order', displayName: 'Order' }, + record: { collectionName: 'orders', recordId: [99], stepIndex: 0 }, + }, + }), + ); + }); + + // ------------------------------------------------------------------------- + // 5. MCP task: awaiting-input → confirm → success + // ------------------------------------------------------------------------- + + it('mcp-task: awaiting-input → confirm → success', async () => { + const mcpToolInvoke = jest.fn().mockResolvedValue('OK'); + const fakeRemoteTool = { + base: { + name: 'send_notification', + description: 'Send a notification', + schema: z.object({ message: z.string() }), + invoke: mcpToolInvoke, + }, + sourceId: 'mcp-1', + sourceType: 'mcp', + } as unknown as RemoteTool; + + const model = createSequentialMockModel( + { name: 'send_notification', args: { message: 'Hello' } }, + { name: 'summarize-result', args: { summary: 'Notification sent' } }, + ); + + const aiClient = createMockAiClient(model); + (aiClient.loadRemoteTools as jest.Mock).mockResolvedValue([fakeRemoteTool]); + + const step = buildPendingStep({ + stepDefinition: { type: StepType.McpTask, prompt: 'Send a notification' }, + }); + + const workflowPort = createMockWorkflowPort({ + getPendingStepExecutionsForRun: jest.fn().mockResolvedValue(step), + getMcpServerConfigs: jest + .fn() + .mockResolvedValue([{ type: 'sse', configs: { 'mcp-1': { url: 'http://fake' } } }]), + }); + + const { server, runStore } = createIntegrationSetup({ + workflowPort, + model, + aiClient, + }); + await runStore.init(); + + const token = signToken({ id: STEP_USER.id }); + + // 1st trigger → awaiting-input + const res1 = await request(server.callback) + .post('/runs/run-1/trigger') + .set('Authorization', `Bearer ${token}`) + .send(); + + expect(res1.status).toBe(200); + expect(workflowPort.updateStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ type: 'mcp-task', status: 'awaiting-input' }), + ); + + // 2nd trigger with userConfirmed: true → tool executed → success + const res2 = await request(server.callback) + .post('/runs/run-1/trigger') + .set('Authorization', `Bearer ${token}`) + .send({ pendingData: { userConfirmed: true } }); + + expect(res2.status).toBe(200); + expect(mcpToolInvoke).toHaveBeenCalledWith({ message: 'Hello' }); + expect(workflowPort.updateStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ type: 'mcp-task', status: 'success' }), + ); + }); + + // ------------------------------------------------------------------------- + // 6. User mismatch → 403 + // ------------------------------------------------------------------------- + + it('user mismatch → HTTP 403', async () => { + const step = buildPendingStep({ + stepDefinition: { type: StepType.ReadRecord, prompt: 'Read email' }, + user: { ...STEP_USER, id: 1 }, + }); + + const workflowPort = createMockWorkflowPort({ + getPendingStepExecutionsForRun: jest.fn().mockResolvedValue(step), + }); + + const { server, runStore } = createIntegrationSetup({ workflowPort }); + await runStore.init(); + + // Sign token with a different user id + const token = signToken({ id: 999 }); + + const response = await request(server.callback) + .post('/runs/run-1/trigger') + .set('Authorization', `Bearer ${token}`) + .send(); + + expect(response.status).toBe(403); + expect(response.body).toEqual({ error: 'Forbidden' }); + expect(workflowPort.updateStepExecution).not.toHaveBeenCalled(); + }); + + // ------------------------------------------------------------------------- + // 7. GET /runs/:runId after trigger returns saved step data + // ------------------------------------------------------------------------- + + it('GET /runs/:runId returns step data after trigger', async () => { + const step = buildPendingStep({ + stepDefinition: { type: StepType.ReadRecord, prompt: 'Read the customer email' }, + }); + + const workflowPort = createMockWorkflowPort({ + getPendingStepExecutionsForRun: jest.fn().mockResolvedValue(step), + }); + + const { server, runStore } = createIntegrationSetup({ workflowPort }); + await runStore.init(); + + const token = signToken({ id: STEP_USER.id }); + + // Trigger the step first + await request(server.callback) + .post('/runs/run-1/trigger') + .set('Authorization', `Bearer ${token}`) + .send(); + + // GET the run data + const response = await request(server.callback) + .get('/runs/run-1') + .set('Authorization', `Bearer ${token}`) + .send(); + + expect(response.status).toBe(200); + expect(response.body.steps).toHaveLength(1); + expect(response.body.steps[0]).toEqual( + expect.objectContaining({ + type: 'read-record', + stepIndex: 0, + }), + ); + }); + + // ------------------------------------------------------------------------- + // 8. Run not found → 404 + // ------------------------------------------------------------------------- + + it('run not found → HTTP 404', async () => { + // Default mock returns null for getPendingStepExecutionsForRun + const { server, runStore, workflowPort } = createIntegrationSetup(); + await runStore.init(); + + const token = signToken({ id: STEP_USER.id }); + + const response = await request(server.callback) + .post('/runs/run-unknown/trigger') + .set('Authorization', `Bearer ${token}`) + .send(); + + expect(response.status).toBe(404); + expect(response.body).toEqual({ error: 'Run not found or unavailable' }); + expect(workflowPort.updateStepExecution).not.toHaveBeenCalled(); + }); + + // ------------------------------------------------------------------------- + // 9. Skip step (userConfirmed: false) → success without side effects + // ------------------------------------------------------------------------- + + it('skip step (userConfirmed: false) → success without executing action', async () => { + const model = createMockModel({ + fieldName: 'Status', + value: 'active', + reasoning: 'update status', + }); + + const step = buildPendingStep({ + stepDefinition: { type: StepType.UpdateRecord, prompt: 'Update the status' }, + }); + + const workflowPort = createMockWorkflowPort({ + getPendingStepExecutionsForRun: jest.fn().mockResolvedValue(step), + getCollectionSchema: jest.fn().mockResolvedValue(COLLECTION_SCHEMA_WITH_STATUS), + }); + + const { server, agentPort, runStore } = createIntegrationSetup({ workflowPort, model }); + await runStore.init(); + + const token = signToken({ id: STEP_USER.id }); + + // 1st trigger → awaiting-input + await request(server.callback) + .post('/runs/run-1/trigger') + .set('Authorization', `Bearer ${token}`) + .send(); + + // 2nd trigger with userConfirmed: false → skip + const res2 = await request(server.callback) + .post('/runs/run-1/trigger') + .set('Authorization', `Bearer ${token}`) + .send({ pendingData: { userConfirmed: false } }); + + expect(res2.status).toBe(200); + expect(agentPort.updateRecord).not.toHaveBeenCalled(); + expect(workflowPort.updateStepExecution).toHaveBeenLastCalledWith( + 'run-1', + expect.objectContaining({ type: 'record-task', status: 'success' }), + ); + + const steps = await runStore.getStepExecutions('run-1'); + expect(steps[0]).toEqual(expect.objectContaining({ executionResult: { skipped: true } })); + }); + + // ------------------------------------------------------------------------- + // 10. Polling executes a step + // ------------------------------------------------------------------------- + + it('polling executes a step', async () => { + const model = createMockModel({ fieldNames: ['Email'] }); + + const pendingStep = buildPendingStep({ + stepDefinition: { type: StepType.ReadRecord, prompt: 'Read the customer email' }, + }); + + const workflowPort = createMockWorkflowPort({ + // Return the step only on the first poll, then empty (to avoid re-execution loops) + getPendingStepExecutions: jest + .fn() + .mockResolvedValueOnce([pendingStep]) + .mockResolvedValue([]), + }); + + const { runner, runStore } = createIntegrationSetup({ + workflowPort, + model, + pollingIntervalMs: 50, + }); + + await runStore.init(); + + // Start the runner (no httpPort → no real server started) + await runner.start(); + + // Wait for the poll cycle to execute the step + await new Promise(resolve => { + const check = setInterval(() => { + if ((workflowPort.updateStepExecution as jest.Mock).mock.calls.length > 0) { + clearInterval(check); + resolve(); + } + }, 20); + }); + + expect(workflowPort.updateStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + type: 'record-task', + status: 'success', + stepId: 'step-1', + stepIndex: 0, + }), + ); + + await runner.stop(); + }, 10_000); +}); diff --git a/packages/workflow-executor/test/runner.test.ts b/packages/workflow-executor/test/runner.test.ts index 462bdf09d0..e6d4cee424 100644 --- a/packages/workflow-executor/test/runner.test.ts +++ b/packages/workflow-executor/test/runner.test.ts @@ -12,6 +12,7 @@ import { InvalidPendingDataError, PendingDataNotFoundError, RunNotFoundError, + UserMismatchError, } from '../src/errors'; import BaseStepExecutor from '../src/executors/base-step-executor'; import ConditionStepExecutor from '../src/executors/condition-step-executor'; @@ -23,6 +24,7 @@ import TriggerRecordActionStepExecutor from '../src/executors/trigger-record-act import UpdateRecordStepExecutor from '../src/executors/update-record-step-executor'; import ExecutorHttpServer from '../src/http/executor-http-server'; import Runner from '../src/runner'; +import SchemaCache from '../src/schema-cache'; import { StepType } from '../src/types/step-definition'; jest.mock('../src/http/executor-http-server'); @@ -86,6 +88,7 @@ function createRunnerConfig( httpPort: number; envSecret: string; authSecret: string; + schemaCache: SchemaCache; }> = {}, ) { return { @@ -100,6 +103,7 @@ function createRunnerConfig( pollingIntervalMs: POLLING_INTERVAL_MS, aiClient: createMockAiClient() as unknown as AiClient, logger: createMockLogger(), + schemaCache: new SchemaCache(), envSecret: VALID_ENV_SECRET, authSecret: VALID_AUTH_SECRET, ...overrides, @@ -130,6 +134,17 @@ function makePendingStep( baseRecordRef: { collectionName: 'customers', recordId: ['1'], stepIndex: 0 }, stepDefinition: makeStepDefinition(stepType), previousSteps: [], + user: { + id: 1, + email: 'test@example.com', + firstName: 'Test', + lastName: 'User', + team: 'admin', + renderingId: 1, + role: 'admin', + permissionLevel: 'admin', + tags: {}, + }, ...rest, }; } @@ -562,6 +577,7 @@ describe('StepExecutorFactory.create — factory', () => { agentPort: {} as AgentPort, workflowPort: {} as WorkflowPort, runStore: {} as RunStore, + schemaCache: new SchemaCache(), logger: { error: jest.fn() }, }); @@ -877,42 +893,116 @@ describe('getRunStepExecutions', () => { }); // --------------------------------------------------------------------------- -// patchPendingData +// triggerPoll with options (bearerUserId, pendingData) // --------------------------------------------------------------------------- -describe('patchPendingData', () => { +describe('triggerPoll with options', () => { + it('succeeds when bearerUserId matches step.user.id', async () => { + const workflowPort = createMockWorkflowPort(); + const step = makePendingStep({ runId: 'run-1' }); // user.id = 1 + workflowPort.getPendingStepExecutionsForRun.mockResolvedValue(step); + + runner = new Runner(createRunnerConfig({ workflowPort })); + await expect(runner.triggerPoll('run-1', { bearerUserId: 1 })).resolves.toBeUndefined(); + + expect(executeSpy).toHaveBeenCalledTimes(1); + }); + + it('throws UserMismatchError when bearerUserId does not match step.user.id', async () => { + const workflowPort = createMockWorkflowPort(); + const step = makePendingStep({ runId: 'run-1' }); // user.id = 1 + workflowPort.getPendingStepExecutionsForRun.mockResolvedValue(step); + + runner = new Runner(createRunnerConfig({ workflowPort })); + + await expect(runner.triggerPoll('run-1', { bearerUserId: 999 })).rejects.toThrow( + UserMismatchError, + ); + expect(executeSpy).not.toHaveBeenCalled(); + }); + + it('skips user check when bearerUserId is undefined', async () => { + const workflowPort = createMockWorkflowPort(); + const step = makePendingStep({ runId: 'run-1' }); + workflowPort.getPendingStepExecutionsForRun.mockResolvedValue(step); + + runner = new Runner(createRunnerConfig({ workflowPort })); + await expect(runner.triggerPoll('run-1', {})).resolves.toBeUndefined(); + + expect(executeSpy).toHaveBeenCalledTimes(1); + }); + + it('patches pending data then executes when pendingData is provided', async () => { + const workflowPort = createMockWorkflowPort(); + const step = makePendingStep({ runId: 'run-1', stepIndex: 0 }); + workflowPort.getPendingStepExecutionsForRun.mockResolvedValue(step); + + const runStore = createMockRunStore({ + getStepExecutions: jest.fn().mockResolvedValue([ + { + type: 'update-record', + stepIndex: 0, + pendingData: { fieldName: 'status', value: 'old' }, + }, + ]), + }); + runner = new Runner(createRunnerConfig({ workflowPort, runStore })); + + await runner.triggerPoll('run-1', { pendingData: { userConfirmed: true, value: 'new' } }); + + expect(runStore.saveStepExecution).toHaveBeenCalledWith( + 'run-1', + expect.objectContaining({ + pendingData: { fieldName: 'status', value: 'new', userConfirmed: true }, + }), + ); + expect(executeSpy).toHaveBeenCalledTimes(1); + }); + it('throws PendingDataNotFoundError when step is not found', async () => { + const workflowPort = createMockWorkflowPort(); + const step = makePendingStep({ runId: 'run-1', stepIndex: 0 }); + workflowPort.getPendingStepExecutionsForRun.mockResolvedValue(step); const runStore = createMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([]) }); - runner = new Runner(createRunnerConfig({ runStore })); + runner = new Runner(createRunnerConfig({ workflowPort, runStore })); - await expect(runner.patchPendingData('run-1', 0, { userConfirmed: true })).rejects.toThrow( - PendingDataNotFoundError, - ); + await expect( + runner.triggerPoll('run-1', { pendingData: { userConfirmed: true } }), + ).rejects.toThrow(PendingDataNotFoundError); }); it('throws PendingDataNotFoundError when step has no pendingData', async () => { + const workflowPort = createMockWorkflowPort(); + const step = makePendingStep({ runId: 'run-1', stepIndex: 0 }); + workflowPort.getPendingStepExecutionsForRun.mockResolvedValue(step); const runStore = createMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([{ type: 'update-record', stepIndex: 0 }]), }); - runner = new Runner(createRunnerConfig({ runStore })); + runner = new Runner(createRunnerConfig({ workflowPort, runStore })); - await expect(runner.patchPendingData('run-1', 0, { userConfirmed: true })).rejects.toThrow( - PendingDataNotFoundError, - ); + await expect( + runner.triggerPoll('run-1', { pendingData: { userConfirmed: true } }), + ).rejects.toThrow(PendingDataNotFoundError); }); it('throws PendingDataNotFoundError when step type has no schema (e.g. condition)', async () => { + const workflowPort = createMockWorkflowPort(); + const step = makePendingStep({ runId: 'run-1', stepIndex: 0 }); + workflowPort.getPendingStepExecutionsForRun.mockResolvedValue(step); const runStore = createMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([{ type: 'condition', stepIndex: 0 }]), }); - runner = new Runner(createRunnerConfig({ runStore })); + runner = new Runner(createRunnerConfig({ workflowPort, runStore })); - await expect(runner.patchPendingData('run-1', 0, { userConfirmed: true })).rejects.toThrow( - PendingDataNotFoundError, - ); + await expect( + runner.triggerPoll('run-1', { pendingData: { userConfirmed: true } }), + ).rejects.toThrow(PendingDataNotFoundError); }); it('throws InvalidPendingDataError with mapped issues when body fails Zod validation', async () => { + const workflowPort = createMockWorkflowPort(); + const step = makePendingStep({ runId: 'run-1', stepIndex: 0 }); + workflowPort.getPendingStepExecutionsForRun.mockResolvedValue(step); const runStore = createMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([ { @@ -922,12 +1012,14 @@ describe('patchPendingData', () => { }, ]), }); - runner = new Runner(createRunnerConfig({ runStore })); + runner = new Runner(createRunnerConfig({ workflowPort, runStore })); - const error = await runner.patchPendingData('run-1', 0, { userConfirmed: 'yes' }).catch(e => e); + const error = await runner + .triggerPoll('run-1', { pendingData: { userConfirmed: 'yes' } }) + .catch((e: unknown) => e); expect(error).toBeInstanceOf(InvalidPendingDataError); - expect(error.issues).toEqual( + expect((error as InvalidPendingDataError).issues).toEqual( expect.arrayContaining([ expect.objectContaining({ path: ['userConfirmed'], code: expect.any(String) }), ]), @@ -935,6 +1027,9 @@ describe('patchPendingData', () => { }); it('throws InvalidPendingDataError when body contains unknown fields', async () => { + const workflowPort = createMockWorkflowPort(); + const step = makePendingStep({ runId: 'run-1', stepIndex: 0 }); + workflowPort.getPendingStepExecutionsForRun.mockResolvedValue(step); const runStore = createMockRunStore({ getStepExecutions: jest .fn() @@ -942,14 +1037,19 @@ describe('patchPendingData', () => { { type: 'trigger-action', stepIndex: 0, pendingData: { name: 'send_email' } }, ]), }); - runner = new Runner(createRunnerConfig({ runStore })); + runner = new Runner(createRunnerConfig({ workflowPort, runStore })); await expect( - runner.patchPendingData('run-1', 0, { userConfirmed: true, extra: 'field' }), + runner.triggerPoll('run-1', { + pendingData: { userConfirmed: true, extra: 'field' }, + }), ).rejects.toThrow(InvalidPendingDataError); }); it('update-record: merges value override into pendingData and calls saveStepExecution', async () => { + const workflowPort = createMockWorkflowPort(); + const step = makePendingStep({ runId: 'run-1', stepIndex: 0 }); + workflowPort.getPendingStepExecutionsForRun.mockResolvedValue(step); const existing = { type: 'update-record' as const, stepIndex: 0, @@ -958,9 +1058,11 @@ describe('patchPendingData', () => { const runStore = createMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([existing]), }); - runner = new Runner(createRunnerConfig({ runStore })); + runner = new Runner(createRunnerConfig({ workflowPort, runStore })); - await runner.patchPendingData('run-1', 0, { userConfirmed: true, value: 'new_value' }); + await runner.triggerPoll('run-1', { + pendingData: { userConfirmed: true, value: 'new_value' }, + }); expect(runStore.saveStepExecution).toHaveBeenCalledWith( 'run-1', @@ -973,6 +1075,9 @@ describe('patchPendingData', () => { }); it('load-related-record: merges selectedRecordId override correctly', async () => { + const workflowPort = createMockWorkflowPort(); + const step = makePendingStep({ runId: 'run-1', stepIndex: 1 }); + workflowPort.getPendingStepExecutionsForRun.mockResolvedValue(step); const existing = { type: 'load-related-record' as const, stepIndex: 1, @@ -987,9 +1092,11 @@ describe('patchPendingData', () => { const runStore = createMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([existing]), }); - runner = new Runner(createRunnerConfig({ runStore })); + runner = new Runner(createRunnerConfig({ workflowPort, runStore })); - await runner.patchPendingData('run-1', 1, { userConfirmed: true, selectedRecordId: ['42'] }); + await runner.triggerPoll('run-1', { + pendingData: { userConfirmed: true, selectedRecordId: ['42'] }, + }); expect(runStore.saveStepExecution).toHaveBeenCalledWith( 'run-1', @@ -1000,6 +1107,9 @@ describe('patchPendingData', () => { }); it('trigger-action: merges userConfirmed:true only, rejects extra field', async () => { + const workflowPort = createMockWorkflowPort(); + const step = makePendingStep({ runId: 'run-1', stepIndex: 0 }); + workflowPort.getPendingStepExecutionsForRun.mockResolvedValue(step); const existing = { type: 'trigger-action' as const, stepIndex: 0, @@ -1008,9 +1118,9 @@ describe('patchPendingData', () => { const runStore = createMockRunStore({ getStepExecutions: jest.fn().mockResolvedValue([existing]), }); - runner = new Runner(createRunnerConfig({ runStore })); + runner = new Runner(createRunnerConfig({ workflowPort, runStore })); - await runner.patchPendingData('run-1', 0, { userConfirmed: true }); + await runner.triggerPoll('run-1', { pendingData: { userConfirmed: true } }); expect(runStore.saveStepExecution).toHaveBeenCalledWith( 'run-1', @@ -1020,7 +1130,9 @@ describe('patchPendingData', () => { ); await expect( - runner.patchPendingData('run-1', 0, { userConfirmed: true, name: 'override' }), + runner.triggerPoll('run-1', { + pendingData: { userConfirmed: true, name: 'override' }, + }), ).rejects.toThrow(InvalidPendingDataError); }); }); diff --git a/packages/workflow-executor/test/schema-cache.test.ts b/packages/workflow-executor/test/schema-cache.test.ts new file mode 100644 index 0000000000..e90a3815c4 --- /dev/null +++ b/packages/workflow-executor/test/schema-cache.test.ts @@ -0,0 +1,131 @@ +import type { CollectionSchema } from '../src/types/record'; + +import SchemaCache from '../src/schema-cache'; + +function makeSchema(collectionName: string): CollectionSchema { + return { + collectionName, + collectionDisplayName: collectionName, + primaryKeyFields: ['id'], + fields: [], + actions: [], + }; +} + +describe('SchemaCache', () => { + describe('get / set', () => { + it('returns undefined for unknown keys', () => { + const cache = new SchemaCache(); + + expect(cache.get('unknown')).toBeUndefined(); + }); + + it('returns the schema after set', () => { + const cache = new SchemaCache(); + const schema = makeSchema('customers'); + + cache.set('customers', schema); + + expect(cache.get('customers')).toBe(schema); + }); + + it('overwrites existing entry on set', () => { + const cache = new SchemaCache(); + const old = makeSchema('customers'); + const updated = { ...makeSchema('customers'), primaryKeyFields: ['uid'] }; + + cache.set('customers', old); + cache.set('customers', updated); + + expect(cache.get('customers')).toBe(updated); + }); + }); + + describe('TTL expiration', () => { + it('returns the schema before TTL expires', () => { + let time = 0; + const cache = new SchemaCache(1000, () => time); + const schema = makeSchema('customers'); + + cache.set('customers', schema); + time = 999; + + expect(cache.get('customers')).toBe(schema); + }); + + it('returns undefined after TTL expires', () => { + let time = 0; + const cache = new SchemaCache(1000, () => time); + + cache.set('customers', makeSchema('customers')); + time = 1001; + + expect(cache.get('customers')).toBeUndefined(); + }); + + it('deletes the expired entry from the store on get', () => { + let time = 0; + const cache = new SchemaCache(1000, () => time); + + cache.set('customers', makeSchema('customers')); + time = 1001; + + cache.get('customers'); // triggers delete + time = 0; // rewind — entry should still be gone + + expect(cache.get('customers')).toBeUndefined(); + }); + + it('refreshes TTL when entry is re-set', () => { + let time = 0; + const cache = new SchemaCache(1000, () => time); + + cache.set('customers', makeSchema('customers')); + time = 800; + cache.set('customers', makeSchema('customers')); // re-set refreshes + time = 1500; // 700ms since re-set, within TTL + + expect(cache.get('customers')).toBeDefined(); + }); + }); + + describe('iterator', () => { + it('yields all non-expired entries', () => { + const cache = new SchemaCache(); + + cache.set('customers', makeSchema('customers')); + cache.set('orders', makeSchema('orders')); + + const entries = [...cache]; + + expect(entries).toHaveLength(2); + expect(entries[0][0]).toBe('customers'); + expect(entries[1][0]).toBe('orders'); + }); + + it('skips and deletes expired entries', () => { + let time = 0; + const cache = new SchemaCache(1000, () => time); + + cache.set('fresh', makeSchema('fresh')); + time = 500; + cache.set('also-fresh', makeSchema('also-fresh')); + time = 1100; // 'fresh' expired, 'also-fresh' still valid + + const entries = [...cache]; + + expect(entries).toHaveLength(1); + expect(entries[0][0]).toBe('also-fresh'); + + // expired entry was cleaned up + time = 0; + expect(cache.get('fresh')).toBeUndefined(); + }); + + it('returns empty for a fresh cache', () => { + const cache = new SchemaCache(); + + expect([...cache]).toHaveLength(0); + }); + }); +});