Flowise Changes
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .dockerignore +7 -0
- .eslintrc.js +28 -0
- .gitattributes +1 -0
- .gitignore +45 -0
- .husky/pre-commit +5 -0
- .prettierignore +3 -0
- .prettierrc.js +9 -0
- CODE_OF_CONDUCT.md +74 -0
- CONTRIBUTING.md +127 -0
- Dockerfile +33 -0
- LICENSE.md +21 -0
- README.md +1 -0
- assets/Demo.png +0 -0
- assets/FloWiseAI.png +0 -0
- assets/FloWiseAI_black.png +0 -0
- assets/FloWiseAI_dark.png +0 -0
- assets/FloWiseAI_primary.png +0 -0
- babel.config.js +13 -0
- docker/.env.example +3 -0
- docker/Dockerfile +13 -0
- docker/README.md +24 -0
- docker/docker-compose.yml +15 -0
- images/flowise.gif +3 -0
- package.json +56 -0
- packages/components/.env.example +1 -0
- packages/components/README.md +25 -0
- packages/components/gulpfile.ts +9 -0
- packages/components/nodes/agents/AutoGPT/AutoGPT.ts +99 -0
- packages/components/nodes/agents/AutoGPT/autogpt.png +0 -0
- packages/components/nodes/agents/BabyAGI/BabyAGI.ts +63 -0
- packages/components/nodes/agents/BabyAGI/babyagi.jpg +0 -0
- packages/components/nodes/agents/BabyAGI/core.ts +270 -0
- packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts +116 -0
- packages/components/nodes/agents/ConversationalAgent/agent.svg +9 -0
- packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts +60 -0
- packages/components/nodes/agents/MRKLAgentChat/agent.svg +9 -0
- packages/components/nodes/agents/MRKLAgentLLM/MRKLAgentLLM.ts +61 -0
- packages/components/nodes/agents/MRKLAgentLLM/agent.svg +9 -0
- packages/components/nodes/chains/ApiChain/GETApiChain.ts +129 -0
- packages/components/nodes/chains/ApiChain/POSTApiChain.ts +118 -0
- packages/components/nodes/chains/ApiChain/apichain.svg +3 -0
- packages/components/nodes/chains/ApiChain/postCore.ts +162 -0
- packages/components/nodes/chains/ConversationChain/ConversationChain.ts +104 -0
- packages/components/nodes/chains/ConversationChain/chain.svg +6 -0
- packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts +163 -0
- packages/components/nodes/chains/ConversationalRetrievalQAChain/chain.svg +6 -0
- packages/components/nodes/chains/LLMChain/LLMChain.ts +167 -0
- packages/components/nodes/chains/LLMChain/chain.svg +6 -0
- packages/components/nodes/chains/MultiPromptChain/MultiPromptChain.ts +74 -0
- packages/components/nodes/chains/MultiPromptChain/chain.svg +6 -0
.dockerignore
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
node_modules
|
2 |
+
dist
|
3 |
+
build
|
4 |
+
|
5 |
+
**/node_modules
|
6 |
+
**/build
|
7 |
+
**/dist
|
.eslintrc.js
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
module.exports = {
|
2 |
+
extends: [
|
3 |
+
'eslint:recommended',
|
4 |
+
'plugin:markdown/recommended',
|
5 |
+
'plugin:react/recommended',
|
6 |
+
'plugin:react/jsx-runtime',
|
7 |
+
'plugin:react-hooks/recommended',
|
8 |
+
'plugin:jsx-a11y/recommended',
|
9 |
+
'plugin:prettier/recommended'
|
10 |
+
],
|
11 |
+
settings: {
|
12 |
+
react: {
|
13 |
+
version: 'detect'
|
14 |
+
}
|
15 |
+
},
|
16 |
+
parser: '@typescript-eslint/parser',
|
17 |
+
ignorePatterns: ['**/node_modules', '**/dist', '**/build', '**/package-lock.json'],
|
18 |
+
plugins: ['unused-imports'],
|
19 |
+
rules: {
|
20 |
+
'@typescript-eslint/explicit-module-boundary-types': 'off',
|
21 |
+
'no-unused-vars': 'off',
|
22 |
+
'unused-imports/no-unused-imports': 'warn',
|
23 |
+
'unused-imports/no-unused-vars': ['warn', { vars: 'all', varsIgnorePattern: '^_', args: 'after-used', argsIgnorePattern: '^_' }],
|
24 |
+
'no-undef': 'off',
|
25 |
+
'no-console': [process.env.CI ? 'error' : 'warn', { allow: ['warn', 'error', 'info'] }],
|
26 |
+
'prettier/prettier': 'error'
|
27 |
+
}
|
28 |
+
}
|
.gitattributes
CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*.gif filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# editor
|
2 |
+
.idea
|
3 |
+
.vscode
|
4 |
+
|
5 |
+
# dependencies
|
6 |
+
**/node_modules
|
7 |
+
**/package-lock.json
|
8 |
+
**/yarn.lock
|
9 |
+
|
10 |
+
## logs
|
11 |
+
**/*.log
|
12 |
+
|
13 |
+
## build
|
14 |
+
**/dist
|
15 |
+
**/build
|
16 |
+
|
17 |
+
## temp
|
18 |
+
**/tmp
|
19 |
+
**/temp
|
20 |
+
|
21 |
+
## test
|
22 |
+
**/coverage
|
23 |
+
|
24 |
+
# misc
|
25 |
+
.DS_Store
|
26 |
+
|
27 |
+
## env
|
28 |
+
.env.local
|
29 |
+
.env.development.local
|
30 |
+
.env.test.local
|
31 |
+
.env.production.local
|
32 |
+
.env
|
33 |
+
|
34 |
+
## turbo
|
35 |
+
.turbo
|
36 |
+
|
37 |
+
## secrets
|
38 |
+
**/*.key
|
39 |
+
**/api.json
|
40 |
+
|
41 |
+
## uploads
|
42 |
+
**/uploads
|
43 |
+
|
44 |
+
## compressed
|
45 |
+
**/*.tgz
|
.husky/pre-commit
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/sh
|
2 |
+
. "$(dirname "$0")/_/husky.sh"
|
3 |
+
|
4 |
+
yarn quick # prettify
|
5 |
+
yarn lint-staged # eslint lint(also include prettify but prettify support more file extensions than eslint, so run prettify first)
|
.prettierignore
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
**/node_modules
|
2 |
+
**/dist
|
3 |
+
**/build
|
.prettierrc.js
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
module.exports = {
|
2 |
+
printWidth: 140,
|
3 |
+
singleQuote: true,
|
4 |
+
jsxSingleQuote: true,
|
5 |
+
trailingComma: 'none',
|
6 |
+
tabWidth: 4,
|
7 |
+
semi: false,
|
8 |
+
endOfLine: 'auto'
|
9 |
+
}
|
CODE_OF_CONDUCT.md
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Contributor Covenant Code of Conduct
|
2 |
+
|
3 |
+
## Our Pledge
|
4 |
+
|
5 |
+
In the interest of fostering an open and welcoming environment, we as
|
6 |
+
contributors and maintainers pledge to making participation in our project and
|
7 |
+
our community a harassment-free experience for everyone, regardless of age, body
|
8 |
+
size, disability, ethnicity, gender identity and expression, level of experience,
|
9 |
+
nationality, personal appearance, race, religion, or sexual identity and
|
10 |
+
orientation.
|
11 |
+
|
12 |
+
## Our Standards
|
13 |
+
|
14 |
+
Examples of behavior that contributes to creating a positive environment
|
15 |
+
include:
|
16 |
+
|
17 |
+
- Using welcoming and inclusive language
|
18 |
+
- Being respectful of differing viewpoints and experiences
|
19 |
+
- Gracefully accepting constructive criticism
|
20 |
+
- Focusing on what is best for the community
|
21 |
+
- Showing empathy towards other community members
|
22 |
+
|
23 |
+
Examples of unacceptable behavior by participants include:
|
24 |
+
|
25 |
+
- The use of sexualized language or imagery and unwelcome sexual attention or
|
26 |
+
advances
|
27 |
+
- Trolling, insulting/derogatory comments, and personal or political attacks
|
28 |
+
- Public or private harassment
|
29 |
+
- Publishing others' private information, such as a physical or electronic
|
30 |
+
address, without explicit permission
|
31 |
+
- Other conduct which could reasonably be considered inappropriate in a
|
32 |
+
professional setting
|
33 |
+
|
34 |
+
## Our Responsibilities
|
35 |
+
|
36 |
+
Project maintainers are responsible for clarifying the standards of acceptable
|
37 |
+
behavior and are expected to take appropriate and fair corrective action in
|
38 |
+
response to any instances of unacceptable behavior.
|
39 |
+
|
40 |
+
Project maintainers have the right and responsibility to remove, edit, or
|
41 |
+
reject comments, commits, code, wiki edits, issues, and other contributions
|
42 |
+
that are not aligned to this Code of Conduct, or to ban temporarily or
|
43 |
+
permanently any contributor for other behaviors that they deem inappropriate,
|
44 |
+
threatening, offensive, or harmful.
|
45 |
+
|
46 |
+
## Scope
|
47 |
+
|
48 |
+
This Code of Conduct applies both within project spaces and in public spaces
|
49 |
+
when an individual is representing the project or its community. Examples of
|
50 |
+
representing a project or community include using an official project e-mail
|
51 |
+
address, posting via an official social media account, or acting as an appointed
|
52 |
+
representative at an online or offline event. Representation of a project may be
|
53 |
+
further defined and clarified by project maintainers.
|
54 |
+
|
55 |
+
## Enforcement
|
56 |
+
|
57 |
+
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
58 |
+
reported by contacting the project team at [email protected]. All
|
59 |
+
complaints will be reviewed and investigated and will result in a response that
|
60 |
+
is deemed necessary and appropriate to the circumstances. The project team is
|
61 |
+
obligated to maintain confidentiality with regard to the reporter of an incident.
|
62 |
+
Further details of specific enforcement policies may be posted separately.
|
63 |
+
|
64 |
+
Project maintainers who do not follow or enforce the Code of Conduct in good
|
65 |
+
faith may face temporary or permanent repercussions as determined by other
|
66 |
+
members of the project's leadership.
|
67 |
+
|
68 |
+
## Attribution
|
69 |
+
|
70 |
+
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
71 |
+
available at [http://contributor-covenant.org/version/1/4][version]
|
72 |
+
|
73 |
+
[homepage]: http://contributor-covenant.org
|
74 |
+
[version]: http://contributor-covenant.org/version/1/4/
|
CONTRIBUTING.md
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!-- markdownlint-disable MD030 -->
|
2 |
+
|
3 |
+
# Contributing to Flowise
|
4 |
+
|
5 |
+
We appreciate any form of contributions.
|
6 |
+
|
7 |
+
## ⭐ Star
|
8 |
+
|
9 |
+
Star and share the [Github Repo](https://github.com/FlowiseAI/Flowise).
|
10 |
+
|
11 |
+
## 🙋 Q&A
|
12 |
+
|
13 |
+
Search up for any questions in [Q&A section](https://github.com/FlowiseAI/Flowise/discussions/categories/q-a), if you can't find one, don't hesitate to create one. It might helps others that have similar question.
|
14 |
+
|
15 |
+
## 🙌 Share Chatflow
|
16 |
+
|
17 |
+
Yes! Sharing how you use Flowise is a way of contribution. Export your chatflow as JSON, attach a screenshot and share it in [Show and Tell section](https://github.com/FlowiseAI/Flowise/discussions/categories/show-and-tell).
|
18 |
+
|
19 |
+
## 💡 Ideas
|
20 |
+
|
21 |
+
Ideas are welcome such as new feature, apps integration, and blockchain networks. Submit in [Ideas section](https://github.com/FlowiseAI/Flowise/discussions/categories/ideas).
|
22 |
+
|
23 |
+
## 🐞 Report Bugs
|
24 |
+
|
25 |
+
Found an issue? [Report it](https://github.com/FlowiseAI/Flowise/issues/new/choose).
|
26 |
+
|
27 |
+
## 👨💻 Contribute to Code
|
28 |
+
|
29 |
+
Not sure what to contribute? Some ideas:
|
30 |
+
|
31 |
+
- Create new components from Langchain
|
32 |
+
- Update existing components such as extending functionality, fixing bugs
|
33 |
+
- Add new chatflow ideas
|
34 |
+
|
35 |
+
### Developers
|
36 |
+
|
37 |
+
Flowise has 3 different modules in a single mono repository.
|
38 |
+
|
39 |
+
- `server`: Node backend to serve API logics
|
40 |
+
- `ui`: React frontend
|
41 |
+
- `components`: Langchain components
|
42 |
+
|
43 |
+
#### Prerequisite
|
44 |
+
|
45 |
+
- Install Yarn
|
46 |
+
```bash
|
47 |
+
npm i -g yarn
|
48 |
+
```
|
49 |
+
|
50 |
+
#### Step by step
|
51 |
+
|
52 |
+
1. Fork the official [Flowise Github Repository](https://github.com/FlowiseAI/Flowise).
|
53 |
+
|
54 |
+
2. Clone your forked repository.
|
55 |
+
|
56 |
+
3. Create a new branch, see [guide](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-and-deleting-branches-within-your-repository). Naming conventions:
|
57 |
+
|
58 |
+
- For feature branch: `feature/<Your New Feature>`
|
59 |
+
- For bug fix branch: `bugfix/<Your New Bugfix>`.
|
60 |
+
|
61 |
+
4. Switch to the newly created branch.
|
62 |
+
|
63 |
+
5. Go into repository folder
|
64 |
+
|
65 |
+
```bash
|
66 |
+
cd Flowise
|
67 |
+
```
|
68 |
+
|
69 |
+
6. Install all dependencies of all modules:
|
70 |
+
|
71 |
+
```bash
|
72 |
+
yarn install
|
73 |
+
```
|
74 |
+
|
75 |
+
7. Build all the code:
|
76 |
+
|
77 |
+
```bash
|
78 |
+
yarn build
|
79 |
+
```
|
80 |
+
|
81 |
+
8. Start the app on [http://localhost:3000](http://localhost:3000)
|
82 |
+
|
83 |
+
```bash
|
84 |
+
yarn start
|
85 |
+
```
|
86 |
+
|
87 |
+
9. For development, run
|
88 |
+
|
89 |
+
```bash
|
90 |
+
yarn dev
|
91 |
+
```
|
92 |
+
|
93 |
+
Any changes made in `packages/ui` or `packages/server` will be reflected on [http://localhost:8080](http://localhost:8080)
|
94 |
+
|
95 |
+
For changes made in `packages/components`, run `yarn build` again to pickup the changes.
|
96 |
+
|
97 |
+
10. After making all the changes, run
|
98 |
+
|
99 |
+
```bash
|
100 |
+
yarn build
|
101 |
+
```
|
102 |
+
|
103 |
+
and
|
104 |
+
|
105 |
+
```bash
|
106 |
+
yarn start
|
107 |
+
```
|
108 |
+
|
109 |
+
to make sure everything works fine in production.
|
110 |
+
|
111 |
+
11. Commit code and submit Pull Request from forked branch pointing to [Flowise master](https://github.com/FlowiseAI/Flowise/tree/master).
|
112 |
+
|
113 |
+
## 📖 Contribute to Docs
|
114 |
+
|
115 |
+
In-Progress
|
116 |
+
|
117 |
+
## 🏷️ Pull Request process
|
118 |
+
|
119 |
+
A member of the FlowiseAI team will automatically be notified/assigned when you open a pull request. You can also reach out to us on [Discord](https://discord.gg/jbaHfsRVBW).
|
120 |
+
|
121 |
+
## 📃 Contributor License Agreement
|
122 |
+
|
123 |
+
Before we can merge your contribution you have to sign our [Contributor License Agreement (CLA)](https://cla-assistant.io/FlowiseAI/Flowise). The CLA contains the terms and conditions under which the contribution is submitted. You need to do this only once for your first pull request. Keep in mind that without a signed CLA we cannot merge your contribution.
|
124 |
+
|
125 |
+
## 📜 Code of Conduct
|
126 |
+
|
127 |
+
This project and everyone participating in it are governed by the Code of Conduct which can be found in the [file](CODE_OF_CONDUCT.md). By participating, you are expected to uphold this code. Please report unacceptable behavior to [email protected].
|
Dockerfile
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Build local monorepo image
|
2 |
+
# docker build --no-cache -t flowise .
|
3 |
+
|
4 |
+
# Run image
|
5 |
+
# docker run -d -p 3000:3000 flowise
|
6 |
+
|
7 |
+
FROM node:18-alpine
|
8 |
+
RUN apk add --update libc6-compat python3 make g++
|
9 |
+
|
10 |
+
WORKDIR /usr/src/packages
|
11 |
+
|
12 |
+
# Copy root package.json and lockfile
|
13 |
+
COPY package.json yarn.loc[k] ./
|
14 |
+
|
15 |
+
# Copy components package.json
|
16 |
+
COPY packages/components/package.json ./packages/components/package.json
|
17 |
+
|
18 |
+
# Copy ui package.json
|
19 |
+
COPY packages/ui/package.json ./packages/ui/package.json
|
20 |
+
|
21 |
+
# Copy server package.json
|
22 |
+
COPY packages/server/package.json ./packages/server/package.json
|
23 |
+
|
24 |
+
RUN yarn install
|
25 |
+
|
26 |
+
# Copy app source
|
27 |
+
COPY . .
|
28 |
+
|
29 |
+
RUN yarn build
|
30 |
+
|
31 |
+
EXPOSE 7860
|
32 |
+
|
33 |
+
CMD [ "yarn", "start" ]
|
LICENSE.md
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
The MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2023 FlowiseAI
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in
|
13 |
+
all copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
21 |
+
THE SOFTWARE.
|
README.md
CHANGED
@@ -5,6 +5,7 @@ colorFrom: gray
|
|
5 |
colorTo: pink
|
6 |
sdk: docker
|
7 |
pinned: false
|
|
|
8 |
---
|
9 |
|
10 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
5 |
colorTo: pink
|
6 |
sdk: docker
|
7 |
pinned: false
|
8 |
+
app_port: 7860
|
9 |
---
|
10 |
|
11 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
assets/Demo.png
ADDED
![]() |
assets/FloWiseAI.png
ADDED
![]() |
assets/FloWiseAI_black.png
ADDED
![]() |
assets/FloWiseAI_dark.png
ADDED
![]() |
assets/FloWiseAI_primary.png
ADDED
![]() |
babel.config.js
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
module.exports = {
|
2 |
+
presets: [
|
3 |
+
'@babel/preset-typescript',
|
4 |
+
[
|
5 |
+
'@babel/preset-env',
|
6 |
+
{
|
7 |
+
targets: {
|
8 |
+
node: 'current'
|
9 |
+
}
|
10 |
+
}
|
11 |
+
]
|
12 |
+
]
|
13 |
+
}
|
docker/.env.example
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
PORT=3000
|
2 |
+
# FLOWISE_USERNAME=user
|
3 |
+
# FLOWISE_PASSWORD=1234
|
docker/Dockerfile
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM node:18-alpine
|
2 |
+
|
3 |
+
USER root
|
4 |
+
|
5 |
+
RUN apk add --no-cache git
|
6 |
+
RUN apk add --no-cache python3 py3-pip make g++
|
7 |
+
|
8 |
+
# You can install a specific version like: [email protected]
|
9 |
+
RUN npm install -g flowise
|
10 |
+
|
11 |
+
WORKDIR /data
|
12 |
+
|
13 |
+
CMD "flowise"
|
docker/README.md
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Flowise Docker Hub Image
|
2 |
+
|
3 |
+
Starts Flowise from [DockerHub Image](https://hub.docker.com/repository/docker/flowiseai/flowise/general)
|
4 |
+
|
5 |
+
## Usage
|
6 |
+
|
7 |
+
1. Create `.env` file and specify the `PORT` (refer to `.env.example`)
|
8 |
+
2. `docker-compose up -d`
|
9 |
+
3. Open [http://localhost:3000](http://localhost:3000)
|
10 |
+
4. You can bring the containers down by `docker-compose stop`
|
11 |
+
|
12 |
+
## With Authrorization
|
13 |
+
|
14 |
+
1. Create `.env` file and specify the `PORT`, `FLOWISE_USERNAME`, and `FLOWISE_PASSWORD` (refer to `.env.example`)
|
15 |
+
2. Pass `FLOWISE_USERNAME` and `FLOWISE_PASSWORD` to the `docker-compose.yml` file:
|
16 |
+
```
|
17 |
+
environment:
|
18 |
+
- PORT=${PORT}
|
19 |
+
- FLOWISE_USERNAME=${FLOWISE_USERNAME}
|
20 |
+
- FLOWISE_PASSWORD=${FLOWISE_PASSWORD}
|
21 |
+
```
|
22 |
+
3. `docker-compose up -d`
|
23 |
+
4. Open [http://localhost:3000](http://localhost:3000)
|
24 |
+
5. You can bring the containers down by `docker-compose stop`
|
docker/docker-compose.yml
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
version: '3.1'
|
2 |
+
|
3 |
+
services:
|
4 |
+
flowise:
|
5 |
+
image: flowiseai/flowise
|
6 |
+
restart: always
|
7 |
+
environment:
|
8 |
+
- PORT=${PORT}
|
9 |
+
- FLOWISE_USERNAME=${FLOWISE_USERNAME}
|
10 |
+
- FLOWISE_PASSWORD=${FLOWISE_PASSWORD}
|
11 |
+
ports:
|
12 |
+
- '${PORT}:${PORT}'
|
13 |
+
volumes:
|
14 |
+
- ~/.flowise:/root/.flowise
|
15 |
+
command: /bin/sh -c "sleep 3; flowise start"
|
images/flowise.gif
ADDED
![]() |
Git LFS Details
|
package.json
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "flowise",
|
3 |
+
"version": "1.2.11",
|
4 |
+
"private": true,
|
5 |
+
"homepage": "https://flowiseai.com",
|
6 |
+
"workspaces": [
|
7 |
+
"packages/*",
|
8 |
+
"flowise",
|
9 |
+
"ui",
|
10 |
+
"components"
|
11 |
+
],
|
12 |
+
"scripts": {
|
13 |
+
"build": "turbo run build",
|
14 |
+
"build-force": "turbo run build --force",
|
15 |
+
"dev": "turbo run dev --parallel",
|
16 |
+
"start": "run-script-os",
|
17 |
+
"start:windows": "cd packages/server/bin && run start",
|
18 |
+
"start:default": "cd packages/server/bin && ./run start",
|
19 |
+
"clean": "npm exec -ws -- rimraf dist build",
|
20 |
+
"format": "prettier --write \"**/*.{ts,tsx,md}\"",
|
21 |
+
"test": "turbo run test",
|
22 |
+
"lint": "eslint \"**/*.{js,jsx,ts,tsx,json,md}\"",
|
23 |
+
"lint-fix": "yarn lint --fix",
|
24 |
+
"quick": "pretty-quick --staged",
|
25 |
+
"postinstall": "husky install"
|
26 |
+
},
|
27 |
+
"lint-staged": {
|
28 |
+
"*.{js,jsx,ts,tsx,json,md}": "eslint --fix"
|
29 |
+
},
|
30 |
+
"devDependencies": {
|
31 |
+
"turbo": "1.7.4",
|
32 |
+
"@babel/preset-env": "^7.19.4",
|
33 |
+
"@babel/preset-typescript": "7.18.6",
|
34 |
+
"@types/express": "^4.17.13",
|
35 |
+
"@typescript-eslint/typescript-estree": "^5.39.0",
|
36 |
+
"eslint": "^8.24.0",
|
37 |
+
"eslint-config-prettier": "^8.3.0",
|
38 |
+
"eslint-config-react-app": "^7.0.1",
|
39 |
+
"eslint-plugin-jsx-a11y": "^6.6.1",
|
40 |
+
"eslint-plugin-markdown": "^3.0.0",
|
41 |
+
"eslint-plugin-prettier": "^3.4.0",
|
42 |
+
"eslint-plugin-react": "^7.26.1",
|
43 |
+
"eslint-plugin-react-hooks": "^4.6.0",
|
44 |
+
"eslint-plugin-unused-imports": "^2.0.0",
|
45 |
+
"husky": "^8.0.1",
|
46 |
+
"lint-staged": "^13.0.3",
|
47 |
+
"prettier": "^2.7.1",
|
48 |
+
"pretty-quick": "^3.1.3",
|
49 |
+
"rimraf": "^3.0.2",
|
50 |
+
"run-script-os": "^1.1.6",
|
51 |
+
"typescript": "^4.8.4"
|
52 |
+
},
|
53 |
+
"engines": {
|
54 |
+
"node": ">=18.15.0"
|
55 |
+
}
|
56 |
+
}
|
packages/components/.env.example
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
DEBUG=true
|
packages/components/README.md
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!-- markdownlint-disable MD030 -->
|
2 |
+
|
3 |
+
# Flowise Components
|
4 |
+
|
5 |
+
Apps integration for Flowise. Contain Nodes and Credentials.
|
6 |
+
|
7 |
+

|
8 |
+
|
9 |
+
Install:
|
10 |
+
|
11 |
+
```bash
|
12 |
+
npm i flowise-components
|
13 |
+
```
|
14 |
+
|
15 |
+
## Debug
|
16 |
+
|
17 |
+
To view all the logs, create an `.env` file and add:
|
18 |
+
|
19 |
+
```
|
20 |
+
DEBUG=true
|
21 |
+
```
|
22 |
+
|
23 |
+
## License
|
24 |
+
|
25 |
+
Source code in this repository is made available under the [MIT License](https://github.com/FlowiseAI/Flowise/blob/master/LICENSE.md).
|
packages/components/gulpfile.ts
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gulp from 'gulp'
|
2 |
+
|
3 |
+
const { src, dest } = gulp
|
4 |
+
|
5 |
+
function copyIcons() {
|
6 |
+
return src(['nodes/**/*.{jpg,png,svg}']).pipe(dest('dist/nodes'))
|
7 |
+
}
|
8 |
+
|
9 |
+
exports.default = copyIcons
|
packages/components/nodes/agents/AutoGPT/AutoGPT.ts
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
2 |
+
import { BaseChatModel } from 'langchain/chat_models/base'
|
3 |
+
import { AutoGPT } from 'langchain/experimental/autogpt'
|
4 |
+
import { Tool } from 'langchain/tools'
|
5 |
+
import { VectorStoreRetriever } from 'langchain/vectorstores/base'
|
6 |
+
import { flatten } from 'lodash'
|
7 |
+
|
8 |
+
class AutoGPT_Agents implements INode {
|
9 |
+
label: string
|
10 |
+
name: string
|
11 |
+
description: string
|
12 |
+
type: string
|
13 |
+
icon: string
|
14 |
+
category: string
|
15 |
+
baseClasses: string[]
|
16 |
+
inputs: INodeParams[]
|
17 |
+
|
18 |
+
constructor() {
|
19 |
+
this.label = 'AutoGPT'
|
20 |
+
this.name = 'autoGPT'
|
21 |
+
this.type = 'AutoGPT'
|
22 |
+
this.category = 'Agents'
|
23 |
+
this.icon = 'autogpt.png'
|
24 |
+
this.description = 'Autonomous agent with chain of thoughts for self-guided task completion'
|
25 |
+
this.baseClasses = ['AutoGPT']
|
26 |
+
this.inputs = [
|
27 |
+
{
|
28 |
+
label: 'Allowed Tools',
|
29 |
+
name: 'tools',
|
30 |
+
type: 'Tool',
|
31 |
+
list: true
|
32 |
+
},
|
33 |
+
{
|
34 |
+
label: 'Chat Model',
|
35 |
+
name: 'model',
|
36 |
+
type: 'BaseChatModel'
|
37 |
+
},
|
38 |
+
{
|
39 |
+
label: 'Vector Store Retriever',
|
40 |
+
name: 'vectorStoreRetriever',
|
41 |
+
type: 'BaseRetriever'
|
42 |
+
},
|
43 |
+
{
|
44 |
+
label: 'AutoGPT Name',
|
45 |
+
name: 'aiName',
|
46 |
+
type: 'string',
|
47 |
+
placeholder: 'Tom',
|
48 |
+
optional: true
|
49 |
+
},
|
50 |
+
{
|
51 |
+
label: 'AutoGPT Role',
|
52 |
+
name: 'aiRole',
|
53 |
+
type: 'string',
|
54 |
+
placeholder: 'Assistant',
|
55 |
+
optional: true
|
56 |
+
},
|
57 |
+
{
|
58 |
+
label: 'Maximum Loop',
|
59 |
+
name: 'maxLoop',
|
60 |
+
type: 'number',
|
61 |
+
default: 5,
|
62 |
+
optional: true
|
63 |
+
}
|
64 |
+
]
|
65 |
+
}
|
66 |
+
|
67 |
+
async init(nodeData: INodeData): Promise<any> {
|
68 |
+
const model = nodeData.inputs?.model as BaseChatModel
|
69 |
+
const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever as VectorStoreRetriever
|
70 |
+
let tools = nodeData.inputs?.tools as Tool[]
|
71 |
+
tools = flatten(tools)
|
72 |
+
const aiName = (nodeData.inputs?.aiName as string) || 'AutoGPT'
|
73 |
+
const aiRole = (nodeData.inputs?.aiRole as string) || 'Assistant'
|
74 |
+
const maxLoop = nodeData.inputs?.maxLoop as string
|
75 |
+
|
76 |
+
const autogpt = AutoGPT.fromLLMAndTools(model, tools, {
|
77 |
+
memory: vectorStoreRetriever,
|
78 |
+
aiName,
|
79 |
+
aiRole
|
80 |
+
})
|
81 |
+
|
82 |
+
autogpt.maxIterations = parseInt(maxLoop, 10)
|
83 |
+
|
84 |
+
return autogpt
|
85 |
+
}
|
86 |
+
|
87 |
+
async run(nodeData: INodeData, input: string): Promise<string> {
|
88 |
+
const executor = nodeData.instance as AutoGPT
|
89 |
+
try {
|
90 |
+
const res = await executor.run([input])
|
91 |
+
return res || 'I have completed all my tasks.'
|
92 |
+
} catch (e) {
|
93 |
+
console.error(e)
|
94 |
+
throw new Error(e)
|
95 |
+
}
|
96 |
+
}
|
97 |
+
}
|
98 |
+
|
99 |
+
module.exports = { nodeClass: AutoGPT_Agents }
|
packages/components/nodes/agents/AutoGPT/autogpt.png
ADDED
![]() |
packages/components/nodes/agents/BabyAGI/BabyAGI.ts
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
2 |
+
import { BabyAGI } from './core'
|
3 |
+
import { BaseChatModel } from 'langchain/chat_models/base'
|
4 |
+
import { VectorStore } from 'langchain/vectorstores'
|
5 |
+
|
6 |
+
class BabyAGI_Agents implements INode {
|
7 |
+
label: string
|
8 |
+
name: string
|
9 |
+
description: string
|
10 |
+
type: string
|
11 |
+
icon: string
|
12 |
+
category: string
|
13 |
+
baseClasses: string[]
|
14 |
+
inputs: INodeParams[]
|
15 |
+
|
16 |
+
constructor() {
|
17 |
+
this.label = 'BabyAGI'
|
18 |
+
this.name = 'babyAGI'
|
19 |
+
this.type = 'BabyAGI'
|
20 |
+
this.category = 'Agents'
|
21 |
+
this.icon = 'babyagi.jpg'
|
22 |
+
this.description = 'Task Driven Autonomous Agent which creates new task and reprioritizes task list based on objective'
|
23 |
+
this.baseClasses = ['BabyAGI']
|
24 |
+
this.inputs = [
|
25 |
+
{
|
26 |
+
label: 'Chat Model',
|
27 |
+
name: 'model',
|
28 |
+
type: 'BaseChatModel'
|
29 |
+
},
|
30 |
+
{
|
31 |
+
label: 'Vector Store',
|
32 |
+
name: 'vectorStore',
|
33 |
+
type: 'VectorStore'
|
34 |
+
},
|
35 |
+
{
|
36 |
+
label: 'Task Loop',
|
37 |
+
name: 'taskLoop',
|
38 |
+
type: 'number',
|
39 |
+
default: 3
|
40 |
+
}
|
41 |
+
]
|
42 |
+
}
|
43 |
+
|
44 |
+
async init(nodeData: INodeData): Promise<any> {
|
45 |
+
const model = nodeData.inputs?.model as BaseChatModel
|
46 |
+
const vectorStore = nodeData.inputs?.vectorStore as VectorStore
|
47 |
+
const taskLoop = nodeData.inputs?.taskLoop as string
|
48 |
+
const k = (vectorStore as any)?.k ?? 4
|
49 |
+
|
50 |
+
const babyAgi = BabyAGI.fromLLM(model, vectorStore, parseInt(taskLoop, 10), k)
|
51 |
+
return babyAgi
|
52 |
+
}
|
53 |
+
|
54 |
+
async run(nodeData: INodeData, input: string): Promise<string> {
|
55 |
+
const executor = nodeData.instance as BabyAGI
|
56 |
+
const objective = input
|
57 |
+
|
58 |
+
const res = await executor.call({ objective })
|
59 |
+
return res
|
60 |
+
}
|
61 |
+
}
|
62 |
+
|
63 |
+
module.exports = { nodeClass: BabyAGI_Agents }
|
packages/components/nodes/agents/BabyAGI/babyagi.jpg
ADDED
![]() |
packages/components/nodes/agents/BabyAGI/core.ts
ADDED
@@ -0,0 +1,270 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { LLMChain } from 'langchain/chains'
|
2 |
+
import { BaseChatModel } from 'langchain/chat_models/base'
|
3 |
+
import { VectorStore } from 'langchain/dist/vectorstores/base'
|
4 |
+
import { Document } from 'langchain/document'
|
5 |
+
import { PromptTemplate } from 'langchain/prompts'
|
6 |
+
|
7 |
+
class TaskCreationChain extends LLMChain {
|
8 |
+
constructor(prompt: PromptTemplate, llm: BaseChatModel) {
|
9 |
+
super({ prompt, llm })
|
10 |
+
}
|
11 |
+
|
12 |
+
static from_llm(llm: BaseChatModel): LLMChain {
|
13 |
+
const taskCreationTemplate: string =
|
14 |
+
'You are a task creation AI that uses the result of an execution agent' +
|
15 |
+
' to create new tasks with the following objective: {objective},' +
|
16 |
+
' The last completed task has the result: {result}.' +
|
17 |
+
' This result was based on this task description: {task_description}.' +
|
18 |
+
' These are incomplete tasks list: {incomplete_tasks}.' +
|
19 |
+
' Based on the result, create new tasks to be completed' +
|
20 |
+
' by the AI system that do not overlap with incomplete tasks.' +
|
21 |
+
' Return the tasks as an array.'
|
22 |
+
|
23 |
+
const prompt = new PromptTemplate({
|
24 |
+
template: taskCreationTemplate,
|
25 |
+
inputVariables: ['result', 'task_description', 'incomplete_tasks', 'objective']
|
26 |
+
})
|
27 |
+
|
28 |
+
return new TaskCreationChain(prompt, llm)
|
29 |
+
}
|
30 |
+
}
|
31 |
+
|
32 |
+
class TaskPrioritizationChain extends LLMChain {
|
33 |
+
constructor(prompt: PromptTemplate, llm: BaseChatModel) {
|
34 |
+
super({ prompt, llm })
|
35 |
+
}
|
36 |
+
|
37 |
+
static from_llm(llm: BaseChatModel): TaskPrioritizationChain {
|
38 |
+
const taskPrioritizationTemplate: string =
|
39 |
+
'You are a task prioritization AI tasked with cleaning the formatting of and reprioritizing' +
|
40 |
+
' the following task list: {task_names}.' +
|
41 |
+
' Consider the ultimate objective of your team: {objective}.' +
|
42 |
+
' Do not remove any tasks. Return the result as a numbered list, like:' +
|
43 |
+
' #. First task' +
|
44 |
+
' #. Second task' +
|
45 |
+
' Start the task list with number {next_task_id}.'
|
46 |
+
const prompt = new PromptTemplate({
|
47 |
+
template: taskPrioritizationTemplate,
|
48 |
+
inputVariables: ['task_names', 'next_task_id', 'objective']
|
49 |
+
})
|
50 |
+
return new TaskPrioritizationChain(prompt, llm)
|
51 |
+
}
|
52 |
+
}
|
53 |
+
|
54 |
+
class ExecutionChain extends LLMChain {
|
55 |
+
constructor(prompt: PromptTemplate, llm: BaseChatModel) {
|
56 |
+
super({ prompt, llm })
|
57 |
+
}
|
58 |
+
|
59 |
+
static from_llm(llm: BaseChatModel): LLMChain {
|
60 |
+
const executionTemplate: string =
|
61 |
+
'You are an AI who performs one task based on the following objective: {objective}.' +
|
62 |
+
' Take into account these previously completed tasks: {context}.' +
|
63 |
+
' Your task: {task}.' +
|
64 |
+
' Response:'
|
65 |
+
|
66 |
+
const prompt = new PromptTemplate({
|
67 |
+
template: executionTemplate,
|
68 |
+
inputVariables: ['objective', 'context', 'task']
|
69 |
+
})
|
70 |
+
|
71 |
+
return new ExecutionChain(prompt, llm)
|
72 |
+
}
|
73 |
+
}
|
74 |
+
|
75 |
+
async function getNextTask(
|
76 |
+
taskCreationChain: LLMChain,
|
77 |
+
result: string,
|
78 |
+
taskDescription: string,
|
79 |
+
taskList: string[],
|
80 |
+
objective: string
|
81 |
+
): Promise<any[]> {
|
82 |
+
const incompleteTasks: string = taskList.join(', ')
|
83 |
+
const response: string = await taskCreationChain.predict({
|
84 |
+
result,
|
85 |
+
task_description: taskDescription,
|
86 |
+
incomplete_tasks: incompleteTasks,
|
87 |
+
objective
|
88 |
+
})
|
89 |
+
|
90 |
+
const newTasks: string[] = response.split('\n')
|
91 |
+
|
92 |
+
return newTasks.filter((taskName) => taskName.trim()).map((taskName) => ({ task_name: taskName }))
|
93 |
+
}
|
94 |
+
|
95 |
+
interface Task {
|
96 |
+
task_id: number
|
97 |
+
task_name: string
|
98 |
+
}
|
99 |
+
|
100 |
+
async function prioritizeTasks(
|
101 |
+
taskPrioritizationChain: LLMChain,
|
102 |
+
thisTaskId: number,
|
103 |
+
taskList: Task[],
|
104 |
+
objective: string
|
105 |
+
): Promise<Task[]> {
|
106 |
+
const next_task_id = thisTaskId + 1
|
107 |
+
const task_names = taskList.map((t) => t.task_name).join(', ')
|
108 |
+
const response = await taskPrioritizationChain.predict({ task_names, next_task_id, objective })
|
109 |
+
const newTasks = response.split('\n')
|
110 |
+
const prioritizedTaskList: Task[] = []
|
111 |
+
|
112 |
+
for (const taskString of newTasks) {
|
113 |
+
if (!taskString.trim()) {
|
114 |
+
// eslint-disable-next-line no-continue
|
115 |
+
continue
|
116 |
+
}
|
117 |
+
const taskParts = taskString.trim().split('. ', 2)
|
118 |
+
if (taskParts.length === 2) {
|
119 |
+
const task_id = parseInt(taskParts[0].trim(), 10)
|
120 |
+
const task_name = taskParts[1].trim()
|
121 |
+
prioritizedTaskList.push({ task_id, task_name })
|
122 |
+
}
|
123 |
+
}
|
124 |
+
|
125 |
+
return prioritizedTaskList
|
126 |
+
}
|
127 |
+
|
128 |
+
export async function get_top_tasks(vectorStore: VectorStore, query: string, k: number): Promise<string[]> {
|
129 |
+
const docs = await vectorStore.similaritySearch(query, k)
|
130 |
+
let returnDocs: string[] = []
|
131 |
+
for (const doc of docs) {
|
132 |
+
returnDocs.push(doc.metadata.task)
|
133 |
+
}
|
134 |
+
return returnDocs
|
135 |
+
}
|
136 |
+
|
137 |
+
async function executeTask(vectorStore: VectorStore, executionChain: LLMChain, objective: string, task: string, k = 5): Promise<string> {
|
138 |
+
const context = await get_top_tasks(vectorStore, objective, k)
|
139 |
+
return executionChain.predict({ objective, context, task })
|
140 |
+
}
|
141 |
+
|
142 |
+
export class BabyAGI {
|
143 |
+
taskList: Array<Task> = []
|
144 |
+
|
145 |
+
taskCreationChain: TaskCreationChain
|
146 |
+
|
147 |
+
taskPrioritizationChain: TaskPrioritizationChain
|
148 |
+
|
149 |
+
executionChain: ExecutionChain
|
150 |
+
|
151 |
+
taskIdCounter = 1
|
152 |
+
|
153 |
+
vectorStore: VectorStore
|
154 |
+
|
155 |
+
maxIterations = 3
|
156 |
+
|
157 |
+
topK = 4
|
158 |
+
|
159 |
+
constructor(
|
160 |
+
taskCreationChain: TaskCreationChain,
|
161 |
+
taskPrioritizationChain: TaskPrioritizationChain,
|
162 |
+
executionChain: ExecutionChain,
|
163 |
+
vectorStore: VectorStore,
|
164 |
+
maxIterations: number,
|
165 |
+
topK: number
|
166 |
+
) {
|
167 |
+
this.taskCreationChain = taskCreationChain
|
168 |
+
this.taskPrioritizationChain = taskPrioritizationChain
|
169 |
+
this.executionChain = executionChain
|
170 |
+
this.vectorStore = vectorStore
|
171 |
+
this.maxIterations = maxIterations
|
172 |
+
this.topK = topK
|
173 |
+
}
|
174 |
+
|
175 |
+
addTask(task: Task) {
|
176 |
+
this.taskList.push(task)
|
177 |
+
}
|
178 |
+
|
179 |
+
printTaskList() {
|
180 |
+
// eslint-disable-next-line no-console
|
181 |
+
console.log('\x1b[95m\x1b[1m\n*****TASK LIST*****\n\x1b[0m\x1b[0m')
|
182 |
+
// eslint-disable-next-line no-console
|
183 |
+
this.taskList.forEach((t) => console.log(`${t.task_id}: ${t.task_name}`))
|
184 |
+
}
|
185 |
+
|
186 |
+
printNextTask(task: Task) {
|
187 |
+
// eslint-disable-next-line no-console
|
188 |
+
console.log('\x1b[92m\x1b[1m\n*****NEXT TASK*****\n\x1b[0m\x1b[0m')
|
189 |
+
// eslint-disable-next-line no-console
|
190 |
+
console.log(`${task.task_id}: ${task.task_name}`)
|
191 |
+
}
|
192 |
+
|
193 |
+
printTaskResult(result: string) {
|
194 |
+
// eslint-disable-next-line no-console
|
195 |
+
console.log('\x1b[93m\x1b[1m\n*****TASK RESULT*****\n\x1b[0m\x1b[0m')
|
196 |
+
// eslint-disable-next-line no-console
|
197 |
+
console.log(result)
|
198 |
+
}
|
199 |
+
|
200 |
+
getInputKeys(): string[] {
|
201 |
+
return ['objective']
|
202 |
+
}
|
203 |
+
|
204 |
+
getOutputKeys(): string[] {
|
205 |
+
return []
|
206 |
+
}
|
207 |
+
|
208 |
+
async call(inputs: Record<string, any>): Promise<string> {
|
209 |
+
const { objective } = inputs
|
210 |
+
const firstTask = inputs.first_task || 'Make a todo list'
|
211 |
+
this.addTask({ task_id: 1, task_name: firstTask })
|
212 |
+
let numIters = 0
|
213 |
+
let loop = true
|
214 |
+
let finalResult = ''
|
215 |
+
|
216 |
+
while (loop) {
|
217 |
+
if (this.taskList.length) {
|
218 |
+
this.printTaskList()
|
219 |
+
|
220 |
+
// Step 1: Pull the first task
|
221 |
+
const task = this.taskList.shift()
|
222 |
+
if (!task) break
|
223 |
+
this.printNextTask(task)
|
224 |
+
|
225 |
+
// Step 2: Execute the task
|
226 |
+
const result = await executeTask(this.vectorStore, this.executionChain, objective, task.task_name, this.topK)
|
227 |
+
const thisTaskId = task.task_id
|
228 |
+
finalResult = result
|
229 |
+
this.printTaskResult(result)
|
230 |
+
|
231 |
+
// Step 3: Store the result in Pinecone
|
232 |
+
const docs = new Document({ pageContent: result, metadata: { task: task.task_name } })
|
233 |
+
this.vectorStore.addDocuments([docs])
|
234 |
+
|
235 |
+
// Step 4: Create new tasks and reprioritize task list
|
236 |
+
const newTasks = await getNextTask(
|
237 |
+
this.taskCreationChain,
|
238 |
+
result,
|
239 |
+
task.task_name,
|
240 |
+
this.taskList.map((t) => t.task_name),
|
241 |
+
objective
|
242 |
+
)
|
243 |
+
newTasks.forEach((newTask) => {
|
244 |
+
this.taskIdCounter += 1
|
245 |
+
// eslint-disable-next-line no-param-reassign
|
246 |
+
newTask.task_id = this.taskIdCounter
|
247 |
+
this.addTask(newTask)
|
248 |
+
})
|
249 |
+
this.taskList = await prioritizeTasks(this.taskPrioritizationChain, thisTaskId, this.taskList, objective)
|
250 |
+
}
|
251 |
+
|
252 |
+
numIters += 1
|
253 |
+
if (this.maxIterations !== null && numIters === this.maxIterations) {
|
254 |
+
// eslint-disable-next-line no-console
|
255 |
+
console.log('\x1b[91m\x1b[1m\n*****TASK ENDING*****\n\x1b[0m\x1b[0m')
|
256 |
+
loop = false
|
257 |
+
this.taskList = []
|
258 |
+
}
|
259 |
+
}
|
260 |
+
|
261 |
+
return finalResult
|
262 |
+
}
|
263 |
+
|
264 |
+
static fromLLM(llm: BaseChatModel, vectorstore: VectorStore, maxIterations = 3, topK = 4): BabyAGI {
|
265 |
+
const taskCreationChain = TaskCreationChain.from_llm(llm)
|
266 |
+
const taskPrioritizationChain = TaskPrioritizationChain.from_llm(llm)
|
267 |
+
const executionChain = ExecutionChain.from_llm(llm)
|
268 |
+
return new BabyAGI(taskCreationChain, taskPrioritizationChain, executionChain, vectorstore, maxIterations, topK)
|
269 |
+
}
|
270 |
+
}
|
packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
|
2 |
+
import { initializeAgentExecutorWithOptions, AgentExecutor, InitializeAgentExecutorOptions } from 'langchain/agents'
|
3 |
+
import { Tool } from 'langchain/tools'
|
4 |
+
import { BaseChatMemory, ChatMessageHistory } from 'langchain/memory'
|
5 |
+
import { getBaseClasses } from '../../../src/utils'
|
6 |
+
import { AIChatMessage, HumanChatMessage } from 'langchain/schema'
|
7 |
+
import { BaseLanguageModel } from 'langchain/base_language'
|
8 |
+
import { flatten } from 'lodash'
|
9 |
+
|
10 |
+
class ConversationalAgent_Agents implements INode {
|
11 |
+
label: string
|
12 |
+
name: string
|
13 |
+
description: string
|
14 |
+
type: string
|
15 |
+
icon: string
|
16 |
+
category: string
|
17 |
+
baseClasses: string[]
|
18 |
+
inputs: INodeParams[]
|
19 |
+
|
20 |
+
constructor() {
|
21 |
+
this.label = 'Conversational Agent'
|
22 |
+
this.name = 'conversationalAgent'
|
23 |
+
this.type = 'AgentExecutor'
|
24 |
+
this.category = 'Agents'
|
25 |
+
this.icon = 'agent.svg'
|
26 |
+
this.description = 'Conversational agent for a chat model. It will utilize chat specific prompts'
|
27 |
+
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
|
28 |
+
this.inputs = [
|
29 |
+
{
|
30 |
+
label: 'Allowed Tools',
|
31 |
+
name: 'tools',
|
32 |
+
type: 'Tool',
|
33 |
+
list: true
|
34 |
+
},
|
35 |
+
{
|
36 |
+
label: 'Language Model',
|
37 |
+
name: 'model',
|
38 |
+
type: 'BaseLanguageModel'
|
39 |
+
},
|
40 |
+
{
|
41 |
+
label: 'Memory',
|
42 |
+
name: 'memory',
|
43 |
+
type: 'BaseChatMemory'
|
44 |
+
},
|
45 |
+
{
|
46 |
+
label: 'System Message',
|
47 |
+
name: 'systemMessage',
|
48 |
+
type: 'string',
|
49 |
+
rows: 4,
|
50 |
+
optional: true,
|
51 |
+
additionalParams: true
|
52 |
+
},
|
53 |
+
{
|
54 |
+
label: 'Human Message',
|
55 |
+
name: 'humanMessage',
|
56 |
+
type: 'string',
|
57 |
+
rows: 4,
|
58 |
+
optional: true,
|
59 |
+
additionalParams: true
|
60 |
+
}
|
61 |
+
]
|
62 |
+
}
|
63 |
+
|
64 |
+
async init(nodeData: INodeData): Promise<any> {
|
65 |
+
const model = nodeData.inputs?.model as BaseLanguageModel
|
66 |
+
let tools = nodeData.inputs?.tools as Tool[]
|
67 |
+
tools = flatten(tools)
|
68 |
+
const memory = nodeData.inputs?.memory as BaseChatMemory
|
69 |
+
const humanMessage = nodeData.inputs?.humanMessage as string
|
70 |
+
const systemMessage = nodeData.inputs?.systemMessage as string
|
71 |
+
|
72 |
+
const obj: InitializeAgentExecutorOptions = {
|
73 |
+
agentType: 'chat-conversational-react-description',
|
74 |
+
verbose: process.env.DEBUG === 'true' ? true : false
|
75 |
+
}
|
76 |
+
|
77 |
+
const agentArgs: any = {}
|
78 |
+
if (humanMessage) {
|
79 |
+
agentArgs.humanMessage = humanMessage
|
80 |
+
}
|
81 |
+
if (systemMessage) {
|
82 |
+
agentArgs.systemMessage = systemMessage
|
83 |
+
}
|
84 |
+
|
85 |
+
if (Object.keys(agentArgs).length) obj.agentArgs = agentArgs
|
86 |
+
|
87 |
+
const executor = await initializeAgentExecutorWithOptions(tools, model, obj)
|
88 |
+
executor.memory = memory
|
89 |
+
return executor
|
90 |
+
}
|
91 |
+
|
92 |
+
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
93 |
+
const executor = nodeData.instance as AgentExecutor
|
94 |
+
const memory = nodeData.inputs?.memory as BaseChatMemory
|
95 |
+
|
96 |
+
if (options && options.chatHistory) {
|
97 |
+
const chatHistory = []
|
98 |
+
const histories: IMessage[] = options.chatHistory
|
99 |
+
|
100 |
+
for (const message of histories) {
|
101 |
+
if (message.type === 'apiMessage') {
|
102 |
+
chatHistory.push(new AIChatMessage(message.message))
|
103 |
+
} else if (message.type === 'userMessage') {
|
104 |
+
chatHistory.push(new HumanChatMessage(message.message))
|
105 |
+
}
|
106 |
+
}
|
107 |
+
memory.chatHistory = new ChatMessageHistory(chatHistory)
|
108 |
+
executor.memory = memory
|
109 |
+
}
|
110 |
+
const result = await executor.call({ input })
|
111 |
+
|
112 |
+
return result?.output
|
113 |
+
}
|
114 |
+
}
|
115 |
+
|
116 |
+
module.exports = { nodeClass: ConversationalAgent_Agents }
|
packages/components/nodes/agents/ConversationalAgent/agent.svg
ADDED
|
packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
2 |
+
import { initializeAgentExecutorWithOptions, AgentExecutor } from 'langchain/agents'
|
3 |
+
import { getBaseClasses } from '../../../src/utils'
|
4 |
+
import { Tool } from 'langchain/tools'
|
5 |
+
import { BaseLanguageModel } from 'langchain/base_language'
|
6 |
+
import { flatten } from 'lodash'
|
7 |
+
|
8 |
+
class MRKLAgentChat_Agents implements INode {
|
9 |
+
label: string
|
10 |
+
name: string
|
11 |
+
description: string
|
12 |
+
type: string
|
13 |
+
icon: string
|
14 |
+
category: string
|
15 |
+
baseClasses: string[]
|
16 |
+
inputs: INodeParams[]
|
17 |
+
|
18 |
+
constructor() {
|
19 |
+
this.label = 'MRKL Agent for Chat Models'
|
20 |
+
this.name = 'mrklAgentChat'
|
21 |
+
this.type = 'AgentExecutor'
|
22 |
+
this.category = 'Agents'
|
23 |
+
this.icon = 'agent.svg'
|
24 |
+
this.description = 'Agent that uses the ReAct Framework to decide what action to take, optimized to be used with Chat Models'
|
25 |
+
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
|
26 |
+
this.inputs = [
|
27 |
+
{
|
28 |
+
label: 'Allowed Tools',
|
29 |
+
name: 'tools',
|
30 |
+
type: 'Tool',
|
31 |
+
list: true
|
32 |
+
},
|
33 |
+
{
|
34 |
+
label: 'Language Model',
|
35 |
+
name: 'model',
|
36 |
+
type: 'BaseLanguageModel'
|
37 |
+
}
|
38 |
+
]
|
39 |
+
}
|
40 |
+
|
41 |
+
async init(nodeData: INodeData): Promise<any> {
|
42 |
+
const model = nodeData.inputs?.model as BaseLanguageModel
|
43 |
+
let tools = nodeData.inputs?.tools as Tool[]
|
44 |
+
tools = flatten(tools)
|
45 |
+
const executor = await initializeAgentExecutorWithOptions(tools, model, {
|
46 |
+
agentType: 'chat-zero-shot-react-description',
|
47 |
+
verbose: process.env.DEBUG === 'true' ? true : false
|
48 |
+
})
|
49 |
+
return executor
|
50 |
+
}
|
51 |
+
|
52 |
+
async run(nodeData: INodeData, input: string): Promise<string> {
|
53 |
+
const executor = nodeData.instance as AgentExecutor
|
54 |
+
const result = await executor.call({ input })
|
55 |
+
|
56 |
+
return result?.output
|
57 |
+
}
|
58 |
+
}
|
59 |
+
|
60 |
+
module.exports = { nodeClass: MRKLAgentChat_Agents }
|
packages/components/nodes/agents/MRKLAgentChat/agent.svg
ADDED
|
packages/components/nodes/agents/MRKLAgentLLM/MRKLAgentLLM.ts
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
2 |
+
import { initializeAgentExecutorWithOptions, AgentExecutor } from 'langchain/agents'
|
3 |
+
import { Tool } from 'langchain/tools'
|
4 |
+
import { getBaseClasses } from '../../../src/utils'
|
5 |
+
import { BaseLanguageModel } from 'langchain/base_language'
|
6 |
+
import { flatten } from 'lodash'
|
7 |
+
|
8 |
+
class MRKLAgentLLM_Agents implements INode {
|
9 |
+
label: string
|
10 |
+
name: string
|
11 |
+
description: string
|
12 |
+
type: string
|
13 |
+
icon: string
|
14 |
+
category: string
|
15 |
+
baseClasses: string[]
|
16 |
+
inputs: INodeParams[]
|
17 |
+
|
18 |
+
constructor() {
|
19 |
+
this.label = 'MRKL Agent for LLMs'
|
20 |
+
this.name = 'mrklAgentLLM'
|
21 |
+
this.type = 'AgentExecutor'
|
22 |
+
this.category = 'Agents'
|
23 |
+
this.icon = 'agent.svg'
|
24 |
+
this.description = 'Agent that uses the ReAct Framework to decide what action to take, optimized to be used with LLMs'
|
25 |
+
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
|
26 |
+
this.inputs = [
|
27 |
+
{
|
28 |
+
label: 'Allowed Tools',
|
29 |
+
name: 'tools',
|
30 |
+
type: 'Tool',
|
31 |
+
list: true
|
32 |
+
},
|
33 |
+
{
|
34 |
+
label: 'Language Model',
|
35 |
+
name: 'model',
|
36 |
+
type: 'BaseLanguageModel'
|
37 |
+
}
|
38 |
+
]
|
39 |
+
}
|
40 |
+
|
41 |
+
async init(nodeData: INodeData): Promise<any> {
|
42 |
+
const model = nodeData.inputs?.model as BaseLanguageModel
|
43 |
+
let tools = nodeData.inputs?.tools as Tool[]
|
44 |
+
tools = flatten(tools)
|
45 |
+
|
46 |
+
const executor = await initializeAgentExecutorWithOptions(tools, model, {
|
47 |
+
agentType: 'zero-shot-react-description',
|
48 |
+
verbose: process.env.DEBUG === 'true' ? true : false
|
49 |
+
})
|
50 |
+
return executor
|
51 |
+
}
|
52 |
+
|
53 |
+
async run(nodeData: INodeData, input: string): Promise<string> {
|
54 |
+
const executor = nodeData.instance as AgentExecutor
|
55 |
+
const result = await executor.call({ input })
|
56 |
+
|
57 |
+
return result?.output
|
58 |
+
}
|
59 |
+
}
|
60 |
+
|
61 |
+
module.exports = { nodeClass: MRKLAgentLLM_Agents }
|
packages/components/nodes/agents/MRKLAgentLLM/agent.svg
ADDED
|
packages/components/nodes/chains/ApiChain/GETApiChain.ts
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
2 |
+
import { APIChain } from 'langchain/chains'
|
3 |
+
import { CustomChainHandler, getBaseClasses } from '../../../src/utils'
|
4 |
+
import { BaseLanguageModel } from 'langchain/base_language'
|
5 |
+
import { PromptTemplate } from 'langchain/prompts'
|
6 |
+
|
7 |
+
export const API_URL_RAW_PROMPT_TEMPLATE = `You are given the below API Documentation:
|
8 |
+
{api_docs}
|
9 |
+
Using this documentation, generate the full API url to call for answering the user question.
|
10 |
+
You should build the API url in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.
|
11 |
+
|
12 |
+
Question:{question}
|
13 |
+
API url:`
|
14 |
+
|
15 |
+
export const API_RESPONSE_RAW_PROMPT_TEMPLATE =
|
16 |
+
'Given this {api_response} response for {api_url}. use the given response to answer this {question}'
|
17 |
+
|
18 |
+
class GETApiChain_Chains implements INode {
|
19 |
+
label: string
|
20 |
+
name: string
|
21 |
+
type: string
|
22 |
+
icon: string
|
23 |
+
category: string
|
24 |
+
baseClasses: string[]
|
25 |
+
description: string
|
26 |
+
inputs: INodeParams[]
|
27 |
+
|
28 |
+
constructor() {
|
29 |
+
this.label = 'GET API Chain'
|
30 |
+
this.name = 'getApiChain'
|
31 |
+
this.type = 'GETApiChain'
|
32 |
+
this.icon = 'apichain.svg'
|
33 |
+
this.category = 'Chains'
|
34 |
+
this.description = 'Chain to run queries against GET API'
|
35 |
+
this.baseClasses = [this.type, ...getBaseClasses(APIChain)]
|
36 |
+
this.inputs = [
|
37 |
+
{
|
38 |
+
label: 'Language Model',
|
39 |
+
name: 'model',
|
40 |
+
type: 'BaseLanguageModel'
|
41 |
+
},
|
42 |
+
{
|
43 |
+
label: 'API Documentation',
|
44 |
+
name: 'apiDocs',
|
45 |
+
type: 'string',
|
46 |
+
description:
|
47 |
+
'Description of how API works. Please refer to more <a target="_blank" href="https://github.com/hwchase17/langchain/blob/master/langchain/chains/api/open_meteo_docs.py">examples</a>',
|
48 |
+
rows: 4
|
49 |
+
},
|
50 |
+
{
|
51 |
+
label: 'Headers',
|
52 |
+
name: 'headers',
|
53 |
+
type: 'json',
|
54 |
+
additionalParams: true,
|
55 |
+
optional: true
|
56 |
+
},
|
57 |
+
{
|
58 |
+
label: 'URL Prompt',
|
59 |
+
name: 'urlPrompt',
|
60 |
+
type: 'string',
|
61 |
+
description: 'Prompt used to tell LLMs how to construct the URL. Must contains {api_docs} and {question}',
|
62 |
+
default: API_URL_RAW_PROMPT_TEMPLATE,
|
63 |
+
rows: 4,
|
64 |
+
additionalParams: true
|
65 |
+
},
|
66 |
+
{
|
67 |
+
label: 'Answer Prompt',
|
68 |
+
name: 'ansPrompt',
|
69 |
+
type: 'string',
|
70 |
+
description:
|
71 |
+
'Prompt used to tell LLMs how to return the API response. Must contains {api_response}, {api_url}, and {question}',
|
72 |
+
default: API_RESPONSE_RAW_PROMPT_TEMPLATE,
|
73 |
+
rows: 4,
|
74 |
+
additionalParams: true
|
75 |
+
}
|
76 |
+
]
|
77 |
+
}
|
78 |
+
|
79 |
+
async init(nodeData: INodeData): Promise<any> {
|
80 |
+
const model = nodeData.inputs?.model as BaseLanguageModel
|
81 |
+
const apiDocs = nodeData.inputs?.apiDocs as string
|
82 |
+
const headers = nodeData.inputs?.headers as string
|
83 |
+
const urlPrompt = nodeData.inputs?.urlPrompt as string
|
84 |
+
const ansPrompt = nodeData.inputs?.ansPrompt as string
|
85 |
+
|
86 |
+
const chain = await getAPIChain(apiDocs, model, headers, urlPrompt, ansPrompt)
|
87 |
+
return chain
|
88 |
+
}
|
89 |
+
|
90 |
+
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
91 |
+
const model = nodeData.inputs?.model as BaseLanguageModel
|
92 |
+
const apiDocs = nodeData.inputs?.apiDocs as string
|
93 |
+
const headers = nodeData.inputs?.headers as string
|
94 |
+
const urlPrompt = nodeData.inputs?.urlPrompt as string
|
95 |
+
const ansPrompt = nodeData.inputs?.ansPrompt as string
|
96 |
+
|
97 |
+
const chain = await getAPIChain(apiDocs, model, headers, urlPrompt, ansPrompt)
|
98 |
+
if (options.socketIO && options.socketIOClientId) {
|
99 |
+
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId, 2)
|
100 |
+
const res = await chain.run(input, [handler])
|
101 |
+
return res
|
102 |
+
} else {
|
103 |
+
const res = await chain.run(input)
|
104 |
+
return res
|
105 |
+
}
|
106 |
+
}
|
107 |
+
}
|
108 |
+
|
109 |
+
const getAPIChain = async (documents: string, llm: BaseLanguageModel, headers: string, urlPrompt: string, ansPrompt: string) => {
|
110 |
+
const apiUrlPrompt = new PromptTemplate({
|
111 |
+
inputVariables: ['api_docs', 'question'],
|
112 |
+
template: urlPrompt ? urlPrompt : API_URL_RAW_PROMPT_TEMPLATE
|
113 |
+
})
|
114 |
+
|
115 |
+
const apiResponsePrompt = new PromptTemplate({
|
116 |
+
inputVariables: ['api_docs', 'question', 'api_url', 'api_response'],
|
117 |
+
template: ansPrompt ? ansPrompt : API_RESPONSE_RAW_PROMPT_TEMPLATE
|
118 |
+
})
|
119 |
+
|
120 |
+
const chain = APIChain.fromLLMAndAPIDocs(llm, documents, {
|
121 |
+
apiUrlPrompt,
|
122 |
+
apiResponsePrompt,
|
123 |
+
verbose: process.env.DEBUG === 'true' ? true : false,
|
124 |
+
headers: typeof headers === 'object' ? headers : headers ? JSON.parse(headers) : {}
|
125 |
+
})
|
126 |
+
return chain
|
127 |
+
}
|
128 |
+
|
129 |
+
module.exports = { nodeClass: GETApiChain_Chains }
|
packages/components/nodes/chains/ApiChain/POSTApiChain.ts
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
2 |
+
import { CustomChainHandler, getBaseClasses } from '../../../src/utils'
|
3 |
+
import { BaseLanguageModel } from 'langchain/base_language'
|
4 |
+
import { PromptTemplate } from 'langchain/prompts'
|
5 |
+
import { API_RESPONSE_RAW_PROMPT_TEMPLATE, API_URL_RAW_PROMPT_TEMPLATE, APIChain } from './postCore'
|
6 |
+
|
7 |
+
class POSTApiChain_Chains implements INode {
|
8 |
+
label: string
|
9 |
+
name: string
|
10 |
+
type: string
|
11 |
+
icon: string
|
12 |
+
category: string
|
13 |
+
baseClasses: string[]
|
14 |
+
description: string
|
15 |
+
inputs: INodeParams[]
|
16 |
+
|
17 |
+
constructor() {
|
18 |
+
this.label = 'POST API Chain'
|
19 |
+
this.name = 'postApiChain'
|
20 |
+
this.type = 'POSTApiChain'
|
21 |
+
this.icon = 'apichain.svg'
|
22 |
+
this.category = 'Chains'
|
23 |
+
this.description = 'Chain to run queries against POST API'
|
24 |
+
this.baseClasses = [this.type, ...getBaseClasses(APIChain)]
|
25 |
+
this.inputs = [
|
26 |
+
{
|
27 |
+
label: 'Language Model',
|
28 |
+
name: 'model',
|
29 |
+
type: 'BaseLanguageModel'
|
30 |
+
},
|
31 |
+
{
|
32 |
+
label: 'API Documentation',
|
33 |
+
name: 'apiDocs',
|
34 |
+
type: 'string',
|
35 |
+
description:
|
36 |
+
'Description of how API works. Please refer to more <a target="_blank" href="https://github.com/hwchase17/langchain/blob/master/langchain/chains/api/open_meteo_docs.py">examples</a>',
|
37 |
+
rows: 4
|
38 |
+
},
|
39 |
+
{
|
40 |
+
label: 'Headers',
|
41 |
+
name: 'headers',
|
42 |
+
type: 'json',
|
43 |
+
additionalParams: true,
|
44 |
+
optional: true
|
45 |
+
},
|
46 |
+
{
|
47 |
+
label: 'URL Prompt',
|
48 |
+
name: 'urlPrompt',
|
49 |
+
type: 'string',
|
50 |
+
description: 'Prompt used to tell LLMs how to construct the URL. Must contains {api_docs} and {question}',
|
51 |
+
default: API_URL_RAW_PROMPT_TEMPLATE,
|
52 |
+
rows: 4,
|
53 |
+
additionalParams: true
|
54 |
+
},
|
55 |
+
{
|
56 |
+
label: 'Answer Prompt',
|
57 |
+
name: 'ansPrompt',
|
58 |
+
type: 'string',
|
59 |
+
description:
|
60 |
+
'Prompt used to tell LLMs how to return the API response. Must contains {api_response}, {api_url}, and {question}',
|
61 |
+
default: API_RESPONSE_RAW_PROMPT_TEMPLATE,
|
62 |
+
rows: 4,
|
63 |
+
additionalParams: true
|
64 |
+
}
|
65 |
+
]
|
66 |
+
}
|
67 |
+
|
68 |
+
async init(nodeData: INodeData): Promise<any> {
|
69 |
+
const model = nodeData.inputs?.model as BaseLanguageModel
|
70 |
+
const apiDocs = nodeData.inputs?.apiDocs as string
|
71 |
+
const headers = nodeData.inputs?.headers as string
|
72 |
+
const urlPrompt = nodeData.inputs?.urlPrompt as string
|
73 |
+
const ansPrompt = nodeData.inputs?.ansPrompt as string
|
74 |
+
|
75 |
+
const chain = await getAPIChain(apiDocs, model, headers, urlPrompt, ansPrompt)
|
76 |
+
return chain
|
77 |
+
}
|
78 |
+
|
79 |
+
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
80 |
+
const model = nodeData.inputs?.model as BaseLanguageModel
|
81 |
+
const apiDocs = nodeData.inputs?.apiDocs as string
|
82 |
+
const headers = nodeData.inputs?.headers as string
|
83 |
+
const urlPrompt = nodeData.inputs?.urlPrompt as string
|
84 |
+
const ansPrompt = nodeData.inputs?.ansPrompt as string
|
85 |
+
|
86 |
+
const chain = await getAPIChain(apiDocs, model, headers, urlPrompt, ansPrompt)
|
87 |
+
if (options.socketIO && options.socketIOClientId) {
|
88 |
+
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId, 2)
|
89 |
+
const res = await chain.run(input, [handler])
|
90 |
+
return res
|
91 |
+
} else {
|
92 |
+
const res = await chain.run(input)
|
93 |
+
return res
|
94 |
+
}
|
95 |
+
}
|
96 |
+
}
|
97 |
+
|
98 |
+
const getAPIChain = async (documents: string, llm: BaseLanguageModel, headers: string, urlPrompt: string, ansPrompt: string) => {
|
99 |
+
const apiUrlPrompt = new PromptTemplate({
|
100 |
+
inputVariables: ['api_docs', 'question'],
|
101 |
+
template: urlPrompt ? urlPrompt : API_URL_RAW_PROMPT_TEMPLATE
|
102 |
+
})
|
103 |
+
|
104 |
+
const apiResponsePrompt = new PromptTemplate({
|
105 |
+
inputVariables: ['api_docs', 'question', 'api_url_body', 'api_response'],
|
106 |
+
template: ansPrompt ? ansPrompt : API_RESPONSE_RAW_PROMPT_TEMPLATE
|
107 |
+
})
|
108 |
+
|
109 |
+
const chain = APIChain.fromLLMAndAPIDocs(llm, documents, {
|
110 |
+
apiUrlPrompt,
|
111 |
+
apiResponsePrompt,
|
112 |
+
verbose: process.env.DEBUG === 'true' ? true : false,
|
113 |
+
headers: typeof headers === 'object' ? headers : headers ? JSON.parse(headers) : {}
|
114 |
+
})
|
115 |
+
return chain
|
116 |
+
}
|
117 |
+
|
118 |
+
module.exports = { nodeClass: POSTApiChain_Chains }
|
packages/components/nodes/chains/ApiChain/apichain.svg
ADDED
|
packages/components/nodes/chains/ApiChain/postCore.ts
ADDED
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { BaseLanguageModel } from 'langchain/base_language'
|
2 |
+
import { CallbackManagerForChainRun } from 'langchain/callbacks'
|
3 |
+
import { BaseChain, ChainInputs, LLMChain, SerializedAPIChain } from 'langchain/chains'
|
4 |
+
import { BasePromptTemplate, PromptTemplate } from 'langchain/prompts'
|
5 |
+
import { ChainValues } from 'langchain/schema'
|
6 |
+
import fetch from 'node-fetch'
|
7 |
+
|
8 |
+
export const API_URL_RAW_PROMPT_TEMPLATE = `You are given the below API Documentation:
|
9 |
+
{api_docs}
|
10 |
+
Using this documentation, generate a json string with two keys: "url" and "data".
|
11 |
+
The value of "url" should be a string, which is the API url to call for answering the user question.
|
12 |
+
The value of "data" should be a dictionary of key-value pairs you want to POST to the url as a JSON body.
|
13 |
+
Be careful to always use double quotes for strings in the json string.
|
14 |
+
You should build the json string in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.
|
15 |
+
|
16 |
+
Question:{question}
|
17 |
+
json string:`
|
18 |
+
|
19 |
+
export const API_RESPONSE_RAW_PROMPT_TEMPLATE = `${API_URL_RAW_PROMPT_TEMPLATE} {api_url_body}
|
20 |
+
|
21 |
+
Here is the response from the API:
|
22 |
+
|
23 |
+
{api_response}
|
24 |
+
|
25 |
+
Summarize this response to answer the original question.
|
26 |
+
|
27 |
+
Summary:`
|
28 |
+
|
29 |
+
const defaultApiUrlPrompt = new PromptTemplate({
|
30 |
+
inputVariables: ['api_docs', 'question'],
|
31 |
+
template: API_URL_RAW_PROMPT_TEMPLATE
|
32 |
+
})
|
33 |
+
|
34 |
+
const defaultApiResponsePrompt = new PromptTemplate({
|
35 |
+
inputVariables: ['api_docs', 'question', 'api_url_body', 'api_response'],
|
36 |
+
template: API_RESPONSE_RAW_PROMPT_TEMPLATE
|
37 |
+
})
|
38 |
+
|
39 |
+
export interface APIChainInput extends Omit<ChainInputs, 'memory'> {
|
40 |
+
apiAnswerChain: LLMChain
|
41 |
+
apiRequestChain: LLMChain
|
42 |
+
apiDocs: string
|
43 |
+
inputKey?: string
|
44 |
+
headers?: Record<string, string>
|
45 |
+
/** Key to use for output, defaults to `output` */
|
46 |
+
outputKey?: string
|
47 |
+
}
|
48 |
+
|
49 |
+
export type APIChainOptions = {
|
50 |
+
headers?: Record<string, string>
|
51 |
+
apiUrlPrompt?: BasePromptTemplate
|
52 |
+
apiResponsePrompt?: BasePromptTemplate
|
53 |
+
}
|
54 |
+
|
55 |
+
export class APIChain extends BaseChain implements APIChainInput {
|
56 |
+
apiAnswerChain: LLMChain
|
57 |
+
|
58 |
+
apiRequestChain: LLMChain
|
59 |
+
|
60 |
+
apiDocs: string
|
61 |
+
|
62 |
+
headers = {}
|
63 |
+
|
64 |
+
inputKey = 'question'
|
65 |
+
|
66 |
+
outputKey = 'output'
|
67 |
+
|
68 |
+
get inputKeys() {
|
69 |
+
return [this.inputKey]
|
70 |
+
}
|
71 |
+
|
72 |
+
get outputKeys() {
|
73 |
+
return [this.outputKey]
|
74 |
+
}
|
75 |
+
|
76 |
+
constructor(fields: APIChainInput) {
|
77 |
+
super(fields)
|
78 |
+
this.apiRequestChain = fields.apiRequestChain
|
79 |
+
this.apiAnswerChain = fields.apiAnswerChain
|
80 |
+
this.apiDocs = fields.apiDocs
|
81 |
+
this.inputKey = fields.inputKey ?? this.inputKey
|
82 |
+
this.outputKey = fields.outputKey ?? this.outputKey
|
83 |
+
this.headers = fields.headers ?? this.headers
|
84 |
+
}
|
85 |
+
|
86 |
+
/** @ignore */
|
87 |
+
async _call(values: ChainValues, runManager?: CallbackManagerForChainRun): Promise<ChainValues> {
|
88 |
+
try {
|
89 |
+
const question: string = values[this.inputKey]
|
90 |
+
|
91 |
+
const api_url_body = await this.apiRequestChain.predict({ question, api_docs: this.apiDocs }, runManager?.getChild())
|
92 |
+
|
93 |
+
const { url, data } = JSON.parse(api_url_body)
|
94 |
+
|
95 |
+
const res = await fetch(url, {
|
96 |
+
method: 'POST',
|
97 |
+
headers: this.headers,
|
98 |
+
body: JSON.stringify(data)
|
99 |
+
})
|
100 |
+
|
101 |
+
const api_response = await res.text()
|
102 |
+
|
103 |
+
const answer = await this.apiAnswerChain.predict(
|
104 |
+
{ question, api_docs: this.apiDocs, api_url_body, api_response },
|
105 |
+
runManager?.getChild()
|
106 |
+
)
|
107 |
+
|
108 |
+
return { [this.outputKey]: answer }
|
109 |
+
} catch (error) {
|
110 |
+
return { [this.outputKey]: error }
|
111 |
+
}
|
112 |
+
}
|
113 |
+
|
114 |
+
_chainType() {
|
115 |
+
return 'api_chain' as const
|
116 |
+
}
|
117 |
+
|
118 |
+
static async deserialize(data: SerializedAPIChain) {
|
119 |
+
const { api_request_chain, api_answer_chain, api_docs } = data
|
120 |
+
|
121 |
+
if (!api_request_chain) {
|
122 |
+
throw new Error('LLMChain must have api_request_chain')
|
123 |
+
}
|
124 |
+
if (!api_answer_chain) {
|
125 |
+
throw new Error('LLMChain must have api_answer_chain')
|
126 |
+
}
|
127 |
+
if (!api_docs) {
|
128 |
+
throw new Error('LLMChain must have api_docs')
|
129 |
+
}
|
130 |
+
|
131 |
+
return new APIChain({
|
132 |
+
apiAnswerChain: await LLMChain.deserialize(api_answer_chain),
|
133 |
+
apiRequestChain: await LLMChain.deserialize(api_request_chain),
|
134 |
+
apiDocs: api_docs
|
135 |
+
})
|
136 |
+
}
|
137 |
+
|
138 |
+
serialize(): SerializedAPIChain {
|
139 |
+
return {
|
140 |
+
_type: this._chainType(),
|
141 |
+
api_answer_chain: this.apiAnswerChain.serialize(),
|
142 |
+
api_request_chain: this.apiRequestChain.serialize(),
|
143 |
+
api_docs: this.apiDocs
|
144 |
+
}
|
145 |
+
}
|
146 |
+
|
147 |
+
static fromLLMAndAPIDocs(
|
148 |
+
llm: BaseLanguageModel,
|
149 |
+
apiDocs: string,
|
150 |
+
options: APIChainOptions & Omit<APIChainInput, 'apiAnswerChain' | 'apiRequestChain' | 'apiDocs'> = {}
|
151 |
+
): APIChain {
|
152 |
+
const { apiUrlPrompt = defaultApiUrlPrompt, apiResponsePrompt = defaultApiResponsePrompt } = options
|
153 |
+
const apiRequestChain = new LLMChain({ prompt: apiUrlPrompt, llm })
|
154 |
+
const apiAnswerChain = new LLMChain({ prompt: apiResponsePrompt, llm })
|
155 |
+
return new this({
|
156 |
+
apiAnswerChain,
|
157 |
+
apiRequestChain,
|
158 |
+
apiDocs,
|
159 |
+
...options
|
160 |
+
})
|
161 |
+
}
|
162 |
+
}
|
packages/components/nodes/chains/ConversationChain/ConversationChain.ts
ADDED
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
|
2 |
+
import { ConversationChain } from 'langchain/chains'
|
3 |
+
import { CustomChainHandler, getBaseClasses } from '../../../src/utils'
|
4 |
+
import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from 'langchain/prompts'
|
5 |
+
import { BufferMemory, ChatMessageHistory } from 'langchain/memory'
|
6 |
+
import { BaseChatModel } from 'langchain/chat_models/base'
|
7 |
+
import { AIChatMessage, HumanChatMessage } from 'langchain/schema'
|
8 |
+
|
9 |
+
const systemMessage = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.`
|
10 |
+
|
11 |
+
class ConversationChain_Chains implements INode {
|
12 |
+
label: string
|
13 |
+
name: string
|
14 |
+
type: string
|
15 |
+
icon: string
|
16 |
+
category: string
|
17 |
+
baseClasses: string[]
|
18 |
+
description: string
|
19 |
+
inputs: INodeParams[]
|
20 |
+
|
21 |
+
constructor() {
|
22 |
+
this.label = 'Conversation Chain'
|
23 |
+
this.name = 'conversationChain'
|
24 |
+
this.type = 'ConversationChain'
|
25 |
+
this.icon = 'chain.svg'
|
26 |
+
this.category = 'Chains'
|
27 |
+
this.description = 'Chat models specific conversational chain with memory'
|
28 |
+
this.baseClasses = [this.type, ...getBaseClasses(ConversationChain)]
|
29 |
+
this.inputs = [
|
30 |
+
{
|
31 |
+
label: 'Language Model',
|
32 |
+
name: 'model',
|
33 |
+
type: 'BaseChatModel'
|
34 |
+
},
|
35 |
+
{
|
36 |
+
label: 'Memory',
|
37 |
+
name: 'memory',
|
38 |
+
type: 'BaseMemory'
|
39 |
+
},
|
40 |
+
{
|
41 |
+
label: 'System Message',
|
42 |
+
name: 'systemMessagePrompt',
|
43 |
+
type: 'string',
|
44 |
+
rows: 4,
|
45 |
+
additionalParams: true,
|
46 |
+
optional: true,
|
47 |
+
placeholder: 'You are a helpful assistant that write codes'
|
48 |
+
}
|
49 |
+
]
|
50 |
+
}
|
51 |
+
|
52 |
+
async init(nodeData: INodeData): Promise<any> {
|
53 |
+
const model = nodeData.inputs?.model as BaseChatModel
|
54 |
+
const memory = nodeData.inputs?.memory as BufferMemory
|
55 |
+
const prompt = nodeData.inputs?.systemMessagePrompt as string
|
56 |
+
|
57 |
+
const obj: any = {
|
58 |
+
llm: model,
|
59 |
+
memory,
|
60 |
+
verbose: process.env.DEBUG === 'true' ? true : false
|
61 |
+
}
|
62 |
+
|
63 |
+
const chatPrompt = ChatPromptTemplate.fromPromptMessages([
|
64 |
+
SystemMessagePromptTemplate.fromTemplate(prompt ? `${prompt}\n${systemMessage}` : systemMessage),
|
65 |
+
new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'),
|
66 |
+
HumanMessagePromptTemplate.fromTemplate('{input}')
|
67 |
+
])
|
68 |
+
obj.prompt = chatPrompt
|
69 |
+
|
70 |
+
const chain = new ConversationChain(obj)
|
71 |
+
return chain
|
72 |
+
}
|
73 |
+
|
74 |
+
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
75 |
+
const chain = nodeData.instance as ConversationChain
|
76 |
+
const memory = nodeData.inputs?.memory as BufferMemory
|
77 |
+
|
78 |
+
if (options && options.chatHistory) {
|
79 |
+
const chatHistory = []
|
80 |
+
const histories: IMessage[] = options.chatHistory
|
81 |
+
|
82 |
+
for (const message of histories) {
|
83 |
+
if (message.type === 'apiMessage') {
|
84 |
+
chatHistory.push(new AIChatMessage(message.message))
|
85 |
+
} else if (message.type === 'userMessage') {
|
86 |
+
chatHistory.push(new HumanChatMessage(message.message))
|
87 |
+
}
|
88 |
+
}
|
89 |
+
memory.chatHistory = new ChatMessageHistory(chatHistory)
|
90 |
+
chain.memory = memory
|
91 |
+
}
|
92 |
+
|
93 |
+
if (options.socketIO && options.socketIOClientId) {
|
94 |
+
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
|
95 |
+
const res = await chain.call({ input }, [handler])
|
96 |
+
return res?.response
|
97 |
+
} else {
|
98 |
+
const res = await chain.call({ input })
|
99 |
+
return res?.response
|
100 |
+
}
|
101 |
+
}
|
102 |
+
}
|
103 |
+
|
104 |
+
module.exports = { nodeClass: ConversationChain_Chains }
|
packages/components/nodes/chains/ConversationChain/chain.svg
ADDED
|
packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts
ADDED
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { BaseLanguageModel } from 'langchain/base_language'
|
2 |
+
import { ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
|
3 |
+
import { CustomChainHandler, getBaseClasses } from '../../../src/utils'
|
4 |
+
import { ConversationalRetrievalQAChain } from 'langchain/chains'
|
5 |
+
import { AIChatMessage, BaseRetriever, HumanChatMessage } from 'langchain/schema'
|
6 |
+
import { BaseChatMemory, BufferMemory, ChatMessageHistory } from 'langchain/memory'
|
7 |
+
import { PromptTemplate } from 'langchain/prompts'
|
8 |
+
|
9 |
+
const default_qa_template = `Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
10 |
+
|
11 |
+
{context}
|
12 |
+
|
13 |
+
Question: {question}
|
14 |
+
Helpful Answer:`
|
15 |
+
|
16 |
+
const qa_template = `Use the following pieces of context to answer the question at the end.
|
17 |
+
|
18 |
+
{context}
|
19 |
+
|
20 |
+
Question: {question}
|
21 |
+
Helpful Answer:`
|
22 |
+
|
23 |
+
class ConversationalRetrievalQAChain_Chains implements INode {
|
24 |
+
label: string
|
25 |
+
name: string
|
26 |
+
type: string
|
27 |
+
icon: string
|
28 |
+
category: string
|
29 |
+
baseClasses: string[]
|
30 |
+
description: string
|
31 |
+
inputs: INodeParams[]
|
32 |
+
|
33 |
+
constructor() {
|
34 |
+
this.label = 'Conversational Retrieval QA Chain'
|
35 |
+
this.name = 'conversationalRetrievalQAChain'
|
36 |
+
this.type = 'ConversationalRetrievalQAChain'
|
37 |
+
this.icon = 'chain.svg'
|
38 |
+
this.category = 'Chains'
|
39 |
+
this.description = 'Document QA - built on RetrievalQAChain to provide a chat history component'
|
40 |
+
this.baseClasses = [this.type, ...getBaseClasses(ConversationalRetrievalQAChain)]
|
41 |
+
this.inputs = [
|
42 |
+
{
|
43 |
+
label: 'Language Model',
|
44 |
+
name: 'model',
|
45 |
+
type: 'BaseLanguageModel'
|
46 |
+
},
|
47 |
+
{
|
48 |
+
label: 'Vector Store Retriever',
|
49 |
+
name: 'vectorStoreRetriever',
|
50 |
+
type: 'BaseRetriever'
|
51 |
+
},
|
52 |
+
{
|
53 |
+
label: 'Return Source Documents',
|
54 |
+
name: 'returnSourceDocuments',
|
55 |
+
type: 'boolean',
|
56 |
+
optional: true
|
57 |
+
},
|
58 |
+
{
|
59 |
+
label: 'System Message',
|
60 |
+
name: 'systemMessagePrompt',
|
61 |
+
type: 'string',
|
62 |
+
rows: 4,
|
63 |
+
additionalParams: true,
|
64 |
+
optional: true,
|
65 |
+
placeholder:
|
66 |
+
'I want you to act as a document that I am having a conversation with. Your name is "AI Assistant". You will provide me with answers from the given info. If the answer is not included, say exactly "Hmm, I am not sure." and stop after that. Refuse to answer any question not about the info. Never break character.'
|
67 |
+
},
|
68 |
+
{
|
69 |
+
label: 'Chain Option',
|
70 |
+
name: 'chainOption',
|
71 |
+
type: 'options',
|
72 |
+
options: [
|
73 |
+
{
|
74 |
+
label: 'MapReduceDocumentsChain',
|
75 |
+
name: 'map_reduce',
|
76 |
+
description:
|
77 |
+
'Suitable for QA tasks over larger documents and can run the preprocessing step in parallel, reducing the running time'
|
78 |
+
},
|
79 |
+
{
|
80 |
+
label: 'RefineDocumentsChain',
|
81 |
+
name: 'refine',
|
82 |
+
description: 'Suitable for QA tasks over a large number of documents.'
|
83 |
+
},
|
84 |
+
{
|
85 |
+
label: 'StuffDocumentsChain',
|
86 |
+
name: 'stuff',
|
87 |
+
description: 'Suitable for QA tasks over a small number of documents.'
|
88 |
+
}
|
89 |
+
],
|
90 |
+
additionalParams: true,
|
91 |
+
optional: true
|
92 |
+
}
|
93 |
+
]
|
94 |
+
}
|
95 |
+
|
96 |
+
async init(nodeData: INodeData): Promise<any> {
|
97 |
+
const model = nodeData.inputs?.model as BaseLanguageModel
|
98 |
+
const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever as BaseRetriever
|
99 |
+
const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string
|
100 |
+
const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean
|
101 |
+
const chainOption = nodeData.inputs?.chainOption as string
|
102 |
+
|
103 |
+
const obj: any = {
|
104 |
+
verbose: process.env.DEBUG === 'true' ? true : false,
|
105 |
+
qaChainOptions: {
|
106 |
+
type: 'stuff',
|
107 |
+
prompt: PromptTemplate.fromTemplate(systemMessagePrompt ? `${systemMessagePrompt}\n${qa_template}` : default_qa_template)
|
108 |
+
},
|
109 |
+
memory: new BufferMemory({
|
110 |
+
memoryKey: 'chat_history',
|
111 |
+
inputKey: 'question',
|
112 |
+
outputKey: 'text',
|
113 |
+
returnMessages: true
|
114 |
+
})
|
115 |
+
}
|
116 |
+
if (returnSourceDocuments) obj.returnSourceDocuments = returnSourceDocuments
|
117 |
+
if (chainOption) obj.qaChainOptions = { ...obj.qaChainOptions, type: chainOption }
|
118 |
+
|
119 |
+
const chain = ConversationalRetrievalQAChain.fromLLM(model, vectorStoreRetriever, obj)
|
120 |
+
return chain
|
121 |
+
}
|
122 |
+
|
123 |
+
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
|
124 |
+
const chain = nodeData.instance as ConversationalRetrievalQAChain
|
125 |
+
const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean
|
126 |
+
let model = nodeData.inputs?.model
|
127 |
+
|
128 |
+
// Temporary fix: https://github.com/hwchase17/langchainjs/issues/754
|
129 |
+
model.streaming = false
|
130 |
+
chain.questionGeneratorChain.llm = model
|
131 |
+
|
132 |
+
const obj = { question: input }
|
133 |
+
|
134 |
+
if (chain.memory && options && options.chatHistory) {
|
135 |
+
const chatHistory = []
|
136 |
+
const histories: IMessage[] = options.chatHistory
|
137 |
+
const memory = chain.memory as BaseChatMemory
|
138 |
+
|
139 |
+
for (const message of histories) {
|
140 |
+
if (message.type === 'apiMessage') {
|
141 |
+
chatHistory.push(new AIChatMessage(message.message))
|
142 |
+
} else if (message.type === 'userMessage') {
|
143 |
+
chatHistory.push(new HumanChatMessage(message.message))
|
144 |
+
}
|
145 |
+
}
|
146 |
+
memory.chatHistory = new ChatMessageHistory(chatHistory)
|
147 |
+
chain.memory = memory
|
148 |
+
}
|
149 |
+
|
150 |
+
if (options.socketIO && options.socketIOClientId) {
|
151 |
+
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId, undefined, returnSourceDocuments)
|
152 |
+
const res = await chain.call(obj, [handler])
|
153 |
+
if (res.text && res.sourceDocuments) return res
|
154 |
+
return res?.text
|
155 |
+
} else {
|
156 |
+
const res = await chain.call(obj)
|
157 |
+
if (res.text && res.sourceDocuments) return res
|
158 |
+
return res?.text
|
159 |
+
}
|
160 |
+
}
|
161 |
+
}
|
162 |
+
|
163 |
+
module.exports = { nodeClass: ConversationalRetrievalQAChain_Chains }
|
packages/components/nodes/chains/ConversationalRetrievalQAChain/chain.svg
ADDED
|
packages/components/nodes/chains/LLMChain/LLMChain.ts
ADDED
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
2 |
+
import { CustomChainHandler, getBaseClasses } from '../../../src/utils'
|
3 |
+
import { LLMChain } from 'langchain/chains'
|
4 |
+
import { BaseLanguageModel } from 'langchain/base_language'
|
5 |
+
|
6 |
+
class LLMChain_Chains implements INode {
|
7 |
+
label: string
|
8 |
+
name: string
|
9 |
+
type: string
|
10 |
+
icon: string
|
11 |
+
category: string
|
12 |
+
baseClasses: string[]
|
13 |
+
description: string
|
14 |
+
inputs: INodeParams[]
|
15 |
+
outputs: INodeOutputsValue[]
|
16 |
+
|
17 |
+
constructor() {
|
18 |
+
this.label = 'LLM Chain'
|
19 |
+
this.name = 'llmChain'
|
20 |
+
this.type = 'LLMChain'
|
21 |
+
this.icon = 'chain.svg'
|
22 |
+
this.category = 'Chains'
|
23 |
+
this.description = 'Chain to run queries against LLMs'
|
24 |
+
this.baseClasses = [this.type, ...getBaseClasses(LLMChain)]
|
25 |
+
this.inputs = [
|
26 |
+
{
|
27 |
+
label: 'Language Model',
|
28 |
+
name: 'model',
|
29 |
+
type: 'BaseLanguageModel'
|
30 |
+
},
|
31 |
+
{
|
32 |
+
label: 'Prompt',
|
33 |
+
name: 'prompt',
|
34 |
+
type: 'BasePromptTemplate'
|
35 |
+
},
|
36 |
+
{
|
37 |
+
label: 'Chain Name',
|
38 |
+
name: 'chainName',
|
39 |
+
type: 'string',
|
40 |
+
placeholder: 'Name Your Chain',
|
41 |
+
optional: true
|
42 |
+
}
|
43 |
+
]
|
44 |
+
this.outputs = [
|
45 |
+
{
|
46 |
+
label: 'LLM Chain',
|
47 |
+
name: 'llmChain',
|
48 |
+
baseClasses: [this.type, ...getBaseClasses(LLMChain)]
|
49 |
+
},
|
50 |
+
{
|
51 |
+
label: 'Output Prediction',
|
52 |
+
name: 'outputPrediction',
|
53 |
+
baseClasses: ['string']
|
54 |
+
}
|
55 |
+
]
|
56 |
+
}
|
57 |
+
|
58 |
+
async init(nodeData: INodeData, input: string): Promise<any> {
|
59 |
+
const model = nodeData.inputs?.model as BaseLanguageModel
|
60 |
+
const prompt = nodeData.inputs?.prompt
|
61 |
+
const output = nodeData.outputs?.output as string
|
62 |
+
const promptValues = prompt.promptValues as ICommonObject
|
63 |
+
|
64 |
+
if (output === this.name) {
|
65 |
+
const chain = new LLMChain({ llm: model, prompt, verbose: process.env.DEBUG === 'true' ? true : false })
|
66 |
+
return chain
|
67 |
+
} else if (output === 'outputPrediction') {
|
68 |
+
const chain = new LLMChain({ llm: model, prompt, verbose: process.env.DEBUG === 'true' ? true : false })
|
69 |
+
const inputVariables = chain.prompt.inputVariables as string[] // ["product"]
|
70 |
+
const res = await runPrediction(inputVariables, chain, input, promptValues)
|
71 |
+
// eslint-disable-next-line no-console
|
72 |
+
console.log('\x1b[92m\x1b[1m\n*****OUTPUT PREDICTION*****\n\x1b[0m\x1b[0m')
|
73 |
+
// eslint-disable-next-line no-console
|
74 |
+
console.log(res)
|
75 |
+
return res
|
76 |
+
}
|
77 |
+
}
|
78 |
+
|
79 |
+
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
80 |
+
const inputVariables = nodeData.instance.prompt.inputVariables as string[] // ["product"]
|
81 |
+
const chain = nodeData.instance as LLMChain
|
82 |
+
const promptValues = nodeData.inputs?.prompt.promptValues as ICommonObject
|
83 |
+
|
84 |
+
const res = options.socketIO
|
85 |
+
? await runPrediction(inputVariables, chain, input, promptValues, true, options.socketIO, options.socketIOClientId)
|
86 |
+
: await runPrediction(inputVariables, chain, input, promptValues)
|
87 |
+
// eslint-disable-next-line no-console
|
88 |
+
console.log('\x1b[93m\x1b[1m\n*****FINAL RESULT*****\n\x1b[0m\x1b[0m')
|
89 |
+
// eslint-disable-next-line no-console
|
90 |
+
console.log(res)
|
91 |
+
return res
|
92 |
+
}
|
93 |
+
}
|
94 |
+
|
95 |
+
const runPrediction = async (
|
96 |
+
inputVariables: string[],
|
97 |
+
chain: LLMChain,
|
98 |
+
input: string,
|
99 |
+
promptValues: ICommonObject,
|
100 |
+
isStreaming?: boolean,
|
101 |
+
socketIO?: any,
|
102 |
+
socketIOClientId = ''
|
103 |
+
) => {
|
104 |
+
if (inputVariables.length === 1) {
|
105 |
+
if (isStreaming) {
|
106 |
+
const handler = new CustomChainHandler(socketIO, socketIOClientId)
|
107 |
+
const res = await chain.run(input, [handler])
|
108 |
+
return res
|
109 |
+
} else {
|
110 |
+
const res = await chain.run(input)
|
111 |
+
return res
|
112 |
+
}
|
113 |
+
} else if (inputVariables.length > 1) {
|
114 |
+
let seen: string[] = []
|
115 |
+
|
116 |
+
for (const variable of inputVariables) {
|
117 |
+
seen.push(variable)
|
118 |
+
if (promptValues[variable]) {
|
119 |
+
seen.pop()
|
120 |
+
}
|
121 |
+
}
|
122 |
+
|
123 |
+
if (seen.length === 0) {
|
124 |
+
// All inputVariables have fixed values specified
|
125 |
+
const options = {
|
126 |
+
...promptValues
|
127 |
+
}
|
128 |
+
if (isStreaming) {
|
129 |
+
const handler = new CustomChainHandler(socketIO, socketIOClientId)
|
130 |
+
const res = await chain.call(options, [handler])
|
131 |
+
return res?.text
|
132 |
+
} else {
|
133 |
+
const res = await chain.call(options)
|
134 |
+
return res?.text
|
135 |
+
}
|
136 |
+
} else if (seen.length === 1) {
|
137 |
+
// If one inputVariable is not specify, use input (user's question) as value
|
138 |
+
const lastValue = seen.pop()
|
139 |
+
if (!lastValue) throw new Error('Please provide Prompt Values')
|
140 |
+
const options = {
|
141 |
+
...promptValues,
|
142 |
+
[lastValue]: input
|
143 |
+
}
|
144 |
+
if (isStreaming) {
|
145 |
+
const handler = new CustomChainHandler(socketIO, socketIOClientId)
|
146 |
+
const res = await chain.call(options, [handler])
|
147 |
+
return res?.text
|
148 |
+
} else {
|
149 |
+
const res = await chain.call(options)
|
150 |
+
return res?.text
|
151 |
+
}
|
152 |
+
} else {
|
153 |
+
throw new Error(`Please provide Prompt Values for: ${seen.join(', ')}`)
|
154 |
+
}
|
155 |
+
} else {
|
156 |
+
if (isStreaming) {
|
157 |
+
const handler = new CustomChainHandler(socketIO, socketIOClientId)
|
158 |
+
const res = await chain.run(input, [handler])
|
159 |
+
return res
|
160 |
+
} else {
|
161 |
+
const res = await chain.run(input)
|
162 |
+
return res
|
163 |
+
}
|
164 |
+
}
|
165 |
+
}
|
166 |
+
|
167 |
+
module.exports = { nodeClass: LLMChain_Chains }
|
packages/components/nodes/chains/LLMChain/chain.svg
ADDED
|
packages/components/nodes/chains/MultiPromptChain/MultiPromptChain.ts
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { BaseLanguageModel } from 'langchain/base_language'
|
2 |
+
import { ICommonObject, INode, INodeData, INodeParams, PromptRetriever } from '../../../src/Interface'
|
3 |
+
import { CustomChainHandler, getBaseClasses } from '../../../src/utils'
|
4 |
+
import { MultiPromptChain } from 'langchain/chains'
|
5 |
+
|
6 |
+
class MultiPromptChain_Chains implements INode {
|
7 |
+
label: string
|
8 |
+
name: string
|
9 |
+
type: string
|
10 |
+
icon: string
|
11 |
+
category: string
|
12 |
+
baseClasses: string[]
|
13 |
+
description: string
|
14 |
+
inputs: INodeParams[]
|
15 |
+
|
16 |
+
constructor() {
|
17 |
+
this.label = 'Multi Prompt Chain'
|
18 |
+
this.name = 'multiPromptChain'
|
19 |
+
this.type = 'MultiPromptChain'
|
20 |
+
this.icon = 'chain.svg'
|
21 |
+
this.category = 'Chains'
|
22 |
+
this.description = 'Chain automatically picks an appropriate prompt from multiple prompt templates'
|
23 |
+
this.baseClasses = [this.type, ...getBaseClasses(MultiPromptChain)]
|
24 |
+
this.inputs = [
|
25 |
+
{
|
26 |
+
label: 'Language Model',
|
27 |
+
name: 'model',
|
28 |
+
type: 'BaseLanguageModel'
|
29 |
+
},
|
30 |
+
{
|
31 |
+
label: 'Prompt Retriever',
|
32 |
+
name: 'promptRetriever',
|
33 |
+
type: 'PromptRetriever',
|
34 |
+
list: true
|
35 |
+
}
|
36 |
+
]
|
37 |
+
}
|
38 |
+
|
39 |
+
async init(nodeData: INodeData): Promise<any> {
|
40 |
+
const model = nodeData.inputs?.model as BaseLanguageModel
|
41 |
+
const promptRetriever = nodeData.inputs?.promptRetriever as PromptRetriever[]
|
42 |
+
const promptNames = []
|
43 |
+
const promptDescriptions = []
|
44 |
+
const promptTemplates = []
|
45 |
+
|
46 |
+
for (const prompt of promptRetriever) {
|
47 |
+
promptNames.push(prompt.name)
|
48 |
+
promptDescriptions.push(prompt.description)
|
49 |
+
promptTemplates.push(prompt.systemMessage)
|
50 |
+
}
|
51 |
+
|
52 |
+
const chain = MultiPromptChain.fromPrompts(model, promptNames, promptDescriptions, promptTemplates, undefined, {
|
53 |
+
verbose: process.env.DEBUG === 'true' ? true : false
|
54 |
+
} as any)
|
55 |
+
|
56 |
+
return chain
|
57 |
+
}
|
58 |
+
|
59 |
+
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
60 |
+
const chain = nodeData.instance as MultiPromptChain
|
61 |
+
const obj = { input }
|
62 |
+
|
63 |
+
if (options.socketIO && options.socketIOClientId) {
|
64 |
+
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
|
65 |
+
const res = await chain.call(obj, [handler])
|
66 |
+
return res?.text
|
67 |
+
} else {
|
68 |
+
const res = await chain.call(obj)
|
69 |
+
return res?.text
|
70 |
+
}
|
71 |
+
}
|
72 |
+
}
|
73 |
+
|
74 |
+
module.exports = { nodeClass: MultiPromptChain_Chains }
|
packages/components/nodes/chains/MultiPromptChain/chain.svg
ADDED
|