automaton layer
Some checks failed
Crowdin Action / synchronize-with-crowdin (push) Has been cancelled
Release Pieces / Release-Pieces (push) Has been cancelled

This commit is contained in:
rohit 2025-07-05 23:59:03 +05:30
commit cd823a2d9e
7466 changed files with 620111 additions and 0 deletions

1901
.all-contributorsrc Normal file

File diff suppressed because it is too large Load Diff

7
.cursor/mcp.json Normal file
View File

@ -0,0 +1,7 @@
{
"mcpServers": {
"nx-mcp": {
"url": "http://localhost:9536/sse"
}
}
}

43
.devcontainer/Dockerfile Normal file
View File

@ -0,0 +1,43 @@
# [Choice] Node.js version (use -bullseye variants on local arm64/Apple Silicon): 20, 18, 16, 14, 20-bullseye, 18-bullseye, 16-bullseye, 14-bullseye, 20-buster, 18-buster, 16-buster, 14-buster
ARG VARIANT=20-bullseye
FROM mcr.microsoft.com/vscode/devcontainers/javascript-node:0-${VARIANT}
# [Optional] Uncomment this section to install additional OS packages.
# RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
# && apt-get -y install --no-install-recommends <your-package-list-here>
# [Optional] Uncomment if you want to install an additional version of node using nvm
# ARG EXTRA_NODE_VERSION=10
# RUN su node -c "source /usr/local/share/nvm/nvm.sh && nvm install ${EXTRA_NODE_VERSION}"
# [Optional] Uncomment if you want to install more global node modules
# RUN su node -c "npm install -g <your-package-list-here>"
RUN npm install -g nx
RUN apt-get update && apt-get install -y git
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
locales \
locales-all \
libcap-dev \
&& rm -rf /var/lib/apt/lists/*
RUN apt-get update && apt-get install -y poppler-utils poppler-data
# Set the locale
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US:en
ENV LC_ALL en_US.UTF-8
COPY default.cf /usr/local/etc/isolate
RUN npm i -g npm@9.9.3
RUN npm i -g pnpm@9.15.0
RUN npm i -g cross-env@7.0.3
RUN pnpm config set store-dir /root/.local/share/pnpm/store
# Update to use Node.js 20 packages
RUN pnpm store add @tsconfig/node20@20.1.4
RUN pnpm store add @types/node@20.14.8
RUN pnpm store add typescript@4.9.4

View File

@ -0,0 +1,13 @@
echo "Running Setup for Codespaces"
type -p curl >/dev/null || sudo apt install curl -y
curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | sudo dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg \
&& sudo chmod go+r /usr/share/keyrings/githubcli-archive-keyring.gpg \
&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | sudo tee /etc/apt/sources.list.d/github-cli.list > /dev/null \
&& sudo apt update \
&& sudo apt install gh -y
gh codespace ports visibility 3000:public -c $CODESPACE_NAME
gh codespace ports visibility 4200:public -c $CODESPACE_NAME
export BACKEND_URL=$(gh codespace ports -c $CODESPACE_NAME --json sourcePort,browseUrl | jq -r '.[] | select(.sourcePort == 3000) | .browseUrl')
sed -i "s|apiUrl: 'http://localhost:3000/v1'|apiUrl: '${BACKEND_URL}/v1'|g" /workspace/packages/ui/common/src/lib/environments/environment.ts

24
.devcontainer/default.cf Normal file
View File

@ -0,0 +1,24 @@
# This is a configuration file for Isolate
# All sandboxes are created under this directory.
# To avoid symlink attacks, this directory and all its ancestors
# must be writeable only to root.
box_root = /var/local/lib/isolate
# Root of the control group hierarchy
cg_root = /sys/fs/cgroup
# If the following variable is defined, the per-box cgroups
# are created as sub-groups of the named cgroup
#cg_parent = boxes
# Block of UIDs and GIDs reserved for sandboxes
first_uid = 60000
first_gid = 60000
num_boxes = 1000
# Per-box settings of the set of allowed CPUs and NUMA nodes
# (see linux/Documentation/cgroups/cpusets.txt for precise syntax)
#box0.cpus = 4-7
#box0.mems = 1

View File

@ -0,0 +1,33 @@
// Update the VARIANT arg in docker-compose.yml to pick a Node.js version
{
"name": "Activepieces Dev",
"dockerComposeFile": "docker-compose.yml",
"service": "app",
"workspaceFolder": "/workspace",
// Configure tool-specific properties.
"customizations": {
// Configure properties specific to VS Code.
"vscode": {
// Add the IDs of extensions you want installed when the container is created.
"extensions": [
"cipchk.cssrem",
"huizhou.githd"
]
}
},
"forwardPorts": [3000, 4200, 5432],
"postAttachCommand": "/bin/bash .devcontainer/setup.sh",
"hostRequirements": {
"cpus": 4,
"memory": "8gb"
},
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// This can be used to network with other containers or with the host.
// Use 'postCreateCommand' to run commands after the container is created.
// "postCreateCommand": "yarn install",
"remoteUser": "root",
"postCreateCommand": "npm ci"
}

View File

@ -0,0 +1,54 @@
version: '3.8'
networks:
activepieces:
driver: bridge
services:
app:
privileged: true
build:
context: .
dockerfile: Dockerfile
volumes:
- ..:/workspace:cached
# Connect the app to the same network as db and redis
networks:
- activepieces
# Overrides default command so things don't shut down after the process ends.
command: sleep infinity
# Uncomment the next line to use a non-root user for all processes.
# user: node
# Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
# (Adding the "ports" property to this file will not forward from a Codespace.)
db:
image: postgres:14.4
environment:
POSTGRES_DB: activepieces
POSTGRES_USER: postgres
POSTGRES_PASSWORD: A79Vm5D4p2VQHOp2gd5
networks:
- activepieces
volumes:
- postgres_data:/var/lib/postgresql/data
ports:
- "5432:5432"
redis:
image: redis:7.0.7
networks:
- activepieces
volumes:
- redis_data:/data
ports:
- "6379:6379"
volumes:
postgres_data:
redis_data:

8
.devcontainer/setup.sh Normal file
View File

@ -0,0 +1,8 @@
# exit this file if we are not in Codespaces
if [ -z "${CODESPACES}" ]; then
exit 0
fi
echo "Running Setup for Codespaces"
sh .devcontainer/codespaces.sh

17
.dockerignore Normal file
View File

@ -0,0 +1,17 @@
.angular
.dockerignore
.env
.git
.gitattributes
.github
.history
.idea
*.log
*.md
.vscode
builds
deploy
Dockerfile
dist
docs
node_modules

12
.editorconfig Normal file
View File

@ -0,0 +1,12 @@
# Editor configuration, see http://editorconfig.org
root = true
[*]
charset = utf-8
indent_style = space
insert_final_newline = true
trim_trailing_whitespace = true
[*.md]
max_line_length = off
trim_trailing_whitespace = false

28
.env.example Normal file
View File

@ -0,0 +1,28 @@
## It's advisable to consult the documentation and use the tools/deploy.sh to generate the passwords, keys, instead of manually filling them.
AP_ENGINE_EXECUTABLE_PATH=dist/packages/engine/main.js
## Random Long Password (Optional for community edition)
AP_API_KEY=
## 256 bit encryption key, 32 hex character
AP_ENCRYPTION_KEY=
## JWT Secret
AP_JWT_SECRET=
AP_ENVIRONMENT=prod
AP_FRONTEND_URL=http://localhost:8080
AP_WEBHOOK_TIMEOUT_SECONDS=30
AP_TRIGGER_DEFAULT_POLL_INTERVAL=5
AP_POSTGRES_DATABASE=activepieces
AP_POSTGRES_HOST=postgres
AP_POSTGRES_PORT=5432
AP_POSTGRES_USERNAME=postgres
AP_POSTGRES_PASSWORD=
AP_EXECUTION_MODE=UNSANDBOXED
AP_REDIS_HOST=redis
AP_REDIS_PORT=6379
AP_FLOW_TIMEOUT_SECONDS=600
AP_TELEMETRY_ENABLED=true
AP_TEMPLATES_SOURCE_URL="https://cloud.activepieces.com/api/v1/flow-templates"

2
.eslintignore Normal file
View File

@ -0,0 +1,2 @@
node_modules
deploy

35
.eslintrc.base.json Normal file
View File

@ -0,0 +1,35 @@
{
"root": true,
"ignorePatterns": ["**/*"],
"plugins": ["@nx"],
"overrides": [
{
"files": ["*.ts", "*.tsx", "*.js", "*.jsx"],
"rules": {
"@nx/enforce-module-boundaries": [
"error",
{
"enforceBuildableLibDependency": true,
"allow": [],
"depConstraints": [
{
"sourceTag": "*",
"onlyDependOnLibsWithTags": ["*"]
}
]
}
]
}
},
{
"files": ["*.ts", "*.tsx"],
"extends": ["plugin:@nx/typescript"],
"rules": {}
},
{
"files": ["*.js", "*.jsx"],
"extends": ["plugin:@nx/javascript"],
"rules": {}
}
]
}

54
.eslintrc.json Normal file
View File

@ -0,0 +1,54 @@
{
"root": true,
"ignorePatterns": ["**/*", "deploy/**/*"],
"plugins": ["@nx"],
"overrides": [
{
"files": ["*.ts", "*.tsx", "*.js", "*.jsx"],
"rules": {
"@nx/enforce-module-boundaries": [
"error",
{
"enforceBuildableLibDependency": true,
"allow": [],
"depConstraints": [
{
"sourceTag": "*",
"onlyDependOnLibsWithTags": ["*"]
}
]
}
],
"no-restricted-imports": [
"error",
{
"patterns": ["lodash", "lodash/*"]
}
]
}
},
{
"files": ["*.ts", "*.tsx"],
"extends": ["plugin:@nx/typescript"],
"rules": {
"@typescript-eslint/no-extra-semi": "error",
"no-extra-semi": "off"
}
},
{
"files": ["*.js", "*.jsx"],
"extends": ["plugin:@nx/javascript"],
"rules": {
"@typescript-eslint/no-extra-semi": "error",
"no-extra-semi": "off"
}
},
{
"files": ["*.spec.ts", "*.spec.tsx", "*.spec.js", "*.spec.jsx"],
"env": {
"jest": true
},
"rules": {}
}
]
}

103
.github/CODE_OF_CONDUCT.md vendored Normal file
View File

@ -0,0 +1,103 @@
The primary source of the Code of Conduct is here.
## 1. Purpose
A primary goal of the Activepieces community is to support you and your business in the development, use and implementation of Activepieces. Its to be inclusive and add value to the largest number of participants, with the most varied and diverse backgrounds possible. As such, we are committed to providing a friendly, safe and welcoming environment for all.
This code of conduct outlines our expectations for all those who participate in our community, whether in-person or online, as well as the consequences for unacceptable behavior.
Your participation is contingent upon following these guidelines in all Activepieces activities, including but not limited to:
* Using Activepieces community resources.
* Working with other Activepiecesians and other Activepieces community participants whether virtually or co-located.
* Representing Activepieces at public events.
* Representing Activepieces in social media (official accounts, personal accounts, Facebook pages and groups).
* Participating in Activepieces sprints and training events.
* Participating in Activepieces-related forums, mailing lists, wikis, websites, chat channels, bugs, group or person-to-person meetings, and Activepieces-related correspondence.
We invite all those who participate in Activepieces activities online to help us create safe and positive experiences for everyone, everywhere.
## 2. Open Source & Culture Citizenship
A supplemental goal of this Code of Conduct is to increase open source and culture citizenship by encouraging participants to recognize and strengthen the relationships between our actions and their effects on our community.
Communities mirror the societies in which they exist and positive action is essential to counteract the many forms of inequality and abuses of power that exist in society.
If you see someone who is making an extra effort to ensure our community is welcoming, friendly, and encourages all participants to contribute to the fullest extent, please recognize their efforts.
## 3. Welcoming to all
We are committed to providing a friendly, safe and welcoming environment for all, regardless of level of experience or job role, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, national origin, citizenship and immigration status, neurodiversity, mental health or socio-economic status.
## 4. Expected Behavior
The following behaviors are expected and requested of all community members:
* Participate in an authentic and active way. In doing so, you contribute to the health and longevity of this community.
* Exercise consideration and respect in your speech and actions.
* Attempt collaboration before conflict.
* Guide conversations toward issue resolution.
* Refrain from demeaning, discriminatory, or harassing behavior and speech.
Alert Activepieces team members if you notice a dangerous situation, someone in distress, or violations of this Code of Conduct, even if they seem inconsequential.
## 5. Unacceptable Behavior
The following behaviors are considered harassment and are unacceptable within our community:
* **Violence and Threats of Violence** are not acceptable - online or offline. This includes incitement of violence toward any individual, including encouraging a person to commit self-harm. This also includes posting or threatening to post other peoples personally identifying information (“doxxing”) online.
* **Public or private harassment** is never acceptable in any form.
* **Personal Attacks** Conflicts will inevitably arise, but frustration should never turn into a personal attack. It is not okay to insult, demean or belittle others. Attacking someone for their opinions, beliefs and ideas is not acceptable. It is important to speak directly when we disagree and when we think we need to improve, but such discussions must be conducted respectfully and professionally, remaining focused on the issue at hand.
* **Derogatory Language** Hurtful or harmful language is never acceptable in any context related to: background, family status, gender, gender identity or expression, marital status, sex, sexual orientation, personal appearance, body size, native language, age, ability, neurodiversity, mental health, race and/or ethnicity, national origin, citizenship and immigration status, socioeconomic status, religion, geographic location.
* **Unwelcome Sexual Attention or Physical Contact** Unwelcome sexual attention or unwelcome physical contact is not acceptable. This includes sexualized comments, jokes or imagery in interactions, communications or presentation materials, as well as inappropriate touching, groping, or sexual advances. This includes touching a person without permission, including sensitive areas such as their hair, pregnant stomach, mobility device (wheelchair, scooter, etc) or tattoos. This also includes physically blocking or intimidating another person. Physical contact or simulated physical contact (such as emojis like “kiss”) without affirmative consent is not acceptable. This includes sharing or distribution of sexualized images or text.
* **Disruptive Behavior** Sustained disruption of events, forums, or meetings, including talks and presentations, will not be tolerated. This includes spamming community discussions with the solicitation of unwanted products or services.
* **Influencing Disruptive Behavior** We will treat influencing or leading such activities the same way we treat the activities themselves, and thus the same consequences apply.
* **Corporate Promotions** Sharing of demo/trial/landing page links and other corporate promotions are never permitted unless explicitly requested by a community member. The only exceptions are that the moderated [Commercial forum category](https://forum.Activepieces.org/c/commercial) may be used to promote opportunities which may be relevant for members of the community (for example job opportunities, freelance gigs) and Activepieces Community Partners may promote their products and services on their partners page.
* **Scraping contacts** by name or any other personally identifiable information for unsolicited communication is never acceptable in any form.
## 6. Consequences of Unacceptable Behavior
Unacceptable behavior from any community member, including sponsors and those with decision-making authority, will not be tolerated.
Anyone asked to stop unacceptable behavior is expected to comply immediately.
If a community member engages in unacceptable behavior, we may take any action deemed appropriate, up to and including a temporary ban or permanent expulsion from the community without warning. Examples of sanctions which may be applied include but is not limited to:
* Verbal warnings.
* Written warnings.
* Temporary absence from participation.
* Long-term absence from participation.
* Being required to follow a conduct agreement that dictates the process of returning to the community.
## 7. Reporting Guidelines
If you are subject to or witness unacceptable behavior, or have any other concerns, please notify us as soon as possible by emailing info@activepieces.com, or contacting a Activepieces team member on the specific platform.
Processes for dealing with breaches of the Code of Conduct can be found [here][coc-breaches].
## 8. Addressing Grievances
Only permanent resolutions (such as bans) may be appealed. To appeal a decision, contact the Activepieces team at info@activepieces.com with your appeal and the team will review the situation.
## 9. Scope
We expect all community participants (contributors, moderators and other guests) to abide by this Code of Conduct in all community venuesonline and in-personas well as in all one-on-one communications pertaining to community affairs.
While this code of conduct is specifically aimed at Activepiecess official resources and community, we recognize that it is possible for actions taken outside of Activepiecess official online or in person spaces to have a deep impact on community health.
Resources or incidents which break this code of conduct for any reason in a non-Activepieces community location will be considered in the same way as resources or incidents from owned channels, and subject to the same sanctions.
## 10. Contact info
For more information, please contact info@activepieces.com.
## 11. License and attribution
This Code of Conduct is directly adapted from the Stumptown Syndicate and distributed under a [Creative Commons Attribution-ShareAlike license][cc-by-sa].
Additional text from [Mozilla Community Participation Guidelines][mozilla-guidelines] distributed under a [Creative Commons Attribution-ShareAlike license][cc-by-sa].
Reviewed and updated using the [Mozilla Code of Conduct Assessment Tool][mozilla-tool].
[coc-breaches]: </policies/code-of-conduct-breaches>
[mozilla-guidelines]: <https://www.mozilla.org/en-US/about/governance/policies/participation/>
[cc-by-sa]: <https://creativecommons.org/licenses/by-sa/3.0/>
[mozilla-tool]:<https://mozilla.github.io/diversity-coc-review.io>
(Code of Conduct is subject to change without notice).

27
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@ -0,0 +1,27 @@
---
name: Bug report
about: Create a report to help us improve
title: "[BUG]: the code piece fails"
labels: bug
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Additional context**
Add any other context about the problem here.

View File

@ -0,0 +1,20 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: enhancement
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

60
.github/ISSUE_TEMPLATE/mcp_bounty.md vendored Normal file
View File

@ -0,0 +1,60 @@
---
name: MCP Bounty
about: Request a new Piece for Activepieces
title: '[Piece Request] <Your Product Name>'
labels: ['pieces']
assignees: ''
---
(Replace everything other than the titles that start with ##)
## 🧩 Product Overview
What does your product do? Who uses it?
---
## ⚙️ Actions
These are the things you want AI agents to be able to do with your product.
Theyll also be available for automations in Activepieces.
Examples:
- Create Contact
- Send Message
- Update Deal
---
## ⏱️ Triggers
Triggers are only used in automation.
What events in your product should start a workflow?
Examples:
- New Contact Created
- Ticket Closed
---
## 📚 API Reference
Link to your public API docs or developer portal.
---
## 🧪 Test Account Access
How can contributors test your API? (Free trial, sandbox credentials, etc.)
---
## 💡 Extra Notes
Anything else worth mentioning? (Edge cases, beta features, known limitations)
---
## 🔄 Alternatives Explored (Optional)
Have you used other platforms or approaches for integration?
---
## 📬 Contact for Contributors (Optional)
If youre not actively monitoring this issue, how can contributors reach you?
Example: yourname@yourcompany.com

14
.github/ISSUE_TEMPLATE/piece-request.md vendored Normal file
View File

@ -0,0 +1,14 @@
---
name: Piece Request
about: Request new Action / Trigger
title: ''
labels: pieces
assignees: ''
---
**Describe your usecase.**
A clear and concise description how exactly you want to use the piece.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions you've considered.

37
.github/pre-release-drafter.yml vendored Normal file
View File

@ -0,0 +1,37 @@
include-pre-releases: true
exclude-labels:
- 'skip-changelog'
- 'release'
- 'pre-release'
categories:
- title: "⛓️‍💥 Breaking Changes"
labels:
- "⛓️‍💥 breaking-change"
- title: "✨ Exciting New Features"
labels:
- "🌟 feature"
- title: "🧩 Pieces"
labels:
- "🔌 pieces"
- title: "🛠️ Piece Framework"
labels:
- "🛠️ piece-framework"
- title: "🐞 Bug Fixes"
labels:
- "🐛 bug"
- title: "🎨 Enhancements & Polish"
labels:
- "✨ polishing"
- title: "📚 Documentation"
labels:
- "📚 documentation"
- title: "🧹 Maintenance"
labels:
- "🧹 clean up"
template: |
$CHANGES
## Thanks ❤️
$CONTRIBUTORS

17
.github/pull_request_template.md vendored Normal file
View File

@ -0,0 +1,17 @@
## What does this PR do?
<!-- We need a clear description of what the PR does, as this will be used for the marketing team to generate the release notes. -->
### Explain How the Feature Works
<!-- Adding a video demonstration is optional but encourged! It helps reviewers / marketing team understand your implementation better. -->
<!-- [Insert the video link here] -->
### Relevant User Scenarios
<!-- List specific use cases where this feature would be valuable. -->
<!-- [Insert Pylon tickets or community posts here if possible] -->
Fixes # (issue)

40
.github/release-drafter.yml vendored Normal file
View File

@ -0,0 +1,40 @@
include-pre-releases: false
exclude-labels:
- 'skip-changelog'
- 'release'
- 'pre-release'
categories:
- title: "⛓️‍💥 Breaking Changes"
labels:
- "⛓️‍💥 breaking-change"
- title: "✨ Exciting New Features"
labels:
- "🌟 feature"
- title: "🧩 Pieces"
labels:
- "🔌 pieces"
- title: "🛠️ Piece Framework"
labels:
- "🛠️ piece-framework"
- title: "🐞 Bug Fixes"
labels:
- "🐛 bug"
- title: "🎨 Enhancements & Polish"
labels:
- "✨ polishing"
- title: "📚 Documentation"
labels:
- "📚 documentation"
- title: "🧹 Maintenance & Dev Experience"
labels:
- "🧹 clean up"
- title: "Other Changes"
labels:
- "*"
template: |
$CHANGES
## Thanks ❤️
$CONTRIBUTORS

17
.github/stale.yml vendored Normal file
View File

@ -0,0 +1,17 @@
# Number of days of inactivity before an issue becomes stale
daysUntilStale: 10
# Number of days of inactivity before a stale issue is closed
daysUntilClose: 5
# Issues with these labels will never be considered stale
exemptLabels:
- pinned
- security
# Label to use when marking an issue as stale
staleLabel: Automatically Closed
# Comment to post when marking an issue as stale. Set to `false` to disable
markComment: >
This issue has been automatically marked as stale because it has not had
recent activity. It will be closed if no further activity occurs. Thank you
for your contributions.
# Comment to post when closing a stale issue. Set to `false` to disable
closeComment: true

69
.github/workflows/automate-deploy.yml vendored Normal file
View File

@ -0,0 +1,69 @@
name: Automate Deploy
on:
workflow_dispatch:
pull_request:
types: [merged]
branches:
- main
merged: true
jobs:
Release:
if: contains(github.event.pull_request.labels.*.name, 'auto-deploy') || github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
steps:
- name: Check out repository code
uses: actions/checkout@v3
- name: Set RELEASE env var from package.json
run: echo RELEASE=$(node --print "require('./package.json').version") >> $GITHUB_ENV
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Depot CLI
uses: depot/setup-action@v1
- name: Build and push
uses: depot/build-push-action@v1
with:
project: du7O4b0e8P
token: ${{ secrets.DEPOT_PROJECT_TOKEN }}
context: .
file: ./Dockerfile
platforms: |
linux/amd64
push: true
tags: |
ghcr.io/activepieces/activepieces-cloud:${{ env.RELEASE }}.${{ github.sha }}.beta
- name: Configure SSH
run: |
mkdir -p ~/.ssh/
echo "$SSH_KEY" > ~/.ssh/ops.key
chmod 600 ~/.ssh/ops.key
cat >>~/.ssh/config <<END
Host ops
HostName $SSH_HOST
User $SSH_USER
IdentityFile ~/.ssh/ops.key
StrictHostKeyChecking no
END
env:
SSH_USER: ${{ secrets.DEV_OPS_USERNAME }}
SSH_KEY: ${{ secrets.SSH_PRIVATE_KEY }}
SSH_HOST: ${{ secrets.DEV_OPS_HOST }}
- name: Deploy React
run: |
ssh ops -t -t 'bash -ic "cd mrsk && kamal deploy --version ${{ env.RELEASE }}.${{ github.sha }}.beta --config-file=config/react.yml --skip-push; exit"'
- name: Deploy Workers
run: |
ssh ops -t -t 'bash -ic "cd mrsk && kamal deploy --version ${{ env.RELEASE }}.${{ github.sha }}.beta --config-file=config/workers.yml --skip-push; exit"'

32
.github/workflows/build-cloud-image.yml vendored Normal file
View File

@ -0,0 +1,32 @@
name: Build Cloud Image
on:
workflow_dispatch:
jobs:
Release:
runs-on: ubuntu-latest
steps:
- name: Check out repository code
uses: actions/checkout@v3
- name: Set RELEASE env var from package.json
run: echo RELEASE=$(node --print "require('./package.json').version") >> $GITHUB_ENV
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./Dockerfile
platforms: |
linux/amd64
push: true
tags: |
ghcr.io/activepieces/activepieces-cloud:${{ env.RELEASE }}.${{ github.sha }}

79
.github/workflows/build-cloud-nx.yml vendored Normal file
View File

@ -0,0 +1,79 @@
name: CI
on:
pull_request:
permissions:
actions: read
contents: read
jobs:
main:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: actions/setup-node@v3
with:
node-version: 20
cache: 'npm'
- run: npx nx-cloud start-ci-run --distribute-on="3 linux-large-js" --agents
- run: npm ci
- run: npx nx reset
- uses: nrwl/nx-set-shas@v4
- name: List all nx targets
run: npx nx show projects --all
- name: Get changed files
id: changed-files
run: echo "files=$(git diff --name-only HEAD origin/main | tr '\n' ' ')" >> $GITHUB_OUTPUT
- name: Check if framework or common pieces are changed
id: check-framework-common
run: |
CHANGED_FILES="${{ steps.changed-files.outputs.files }}"
if echo "$CHANGED_FILES" | grep -q "community/framework\|community/common"; then
echo "framework_or_common_changed=true" >> $GITHUB_OUTPUT
else
echo "framework_or_common_changed=false" >> $GITHUB_OUTPUT
fi
- name: Extract pieces projects from changed files
id: extract-pieces
run: |
PIECES=$(echo "${{ steps.changed-files.outputs.files }}" | grep -o "packages/pieces/[^/]*/[^/]*/" | awk -F'/' '{print "pieces-" $4}' | sort -u | tr '\n' ',' | sed 's/,$//')
echo "pieces_projects=$PIECES" >> $GITHUB_OUTPUT
- name: Lint affected projects excluding pieces
run: npx nx affected --target=lint --exclude="pieces-*" --agents --parallel
- name: Lint changed pieces projects
if: steps.extract-pieces.outputs.pieces_projects != '' && steps.check-framework-common.outputs.framework_or_common_changed == 'false'
run: npx nx run-many --target=lint --projects="${{ steps.extract-pieces.outputs.pieces_projects }}" --agents --parallel
- name: Lint all pieces projects
if: steps.check-framework-common.outputs.framework_or_common_changed == 'true'
run: npx nx run-many --target=lint --projects="pieces-*" --agents --parallel
- name: Build affected projects excluding pieces
run: npx nx affected --target=build -c production --exclude="pieces-*" --agents --parallel
- name: Build changed pieces projects
if: steps.extract-pieces.outputs.pieces_projects != '' && steps.check-framework-common.outputs.framework_or_common_changed == 'false'
run: npx nx run-many --target=build -c production --projects="${{ steps.extract-pieces.outputs.pieces_projects }}" --agents --parallel
- name: Build all pieces projects
if: steps.check-framework-common.outputs.framework_or_common_changed == 'true'
run: npx nx run-many --target=build -c production --projects="pieces-*" --agents --parallel
- name: Run all tests in parallel
run: |
npx nx run-many --target=test --projects=engine,shared --agents --parallel &
npx nx run server-api:test-ce &
npx nx run server-api:test-ee &
npx nx run server-api:test-cloud &
wait

View File

@ -0,0 +1,21 @@
name: Closed Issue Message
on:
issues:
types: [closed]
permissions: {}
jobs:
auto_comment:
permissions:
issues: write # to comment on issues (aws-actions/closed-issue-message)
runs-on: ubuntu-latest
if: github.repository_owner == 'activepieces'
steps:
- uses: aws-actions/closed-issue-message@v1
with:
# These inputs are both required
repo-token: "${{ secrets.GITHUB_TOKEN }}"
message: |
### ⚠COMMENT VISIBILITY WARNING⚠
Comments on closed issues are hard for our team to see.
If this issue is continuing with the latest stable version of Activepieces, please open a new issue that references this one.

37
.github/workflows/crowdin.yml vendored Normal file
View File

@ -0,0 +1,37 @@
name: Crowdin Action
on:
push:
branches: [ main ]
workflow_dispatch:
jobs:
synchronize-with-crowdin:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: crowdin action
uses: crowdin/github-action@v2
with:
# Configuration file
config: 'crowdin.yml'
upload_sources: true
upload_translations: false
download_translations: true
localization_branch_name: l10n_crowdin_translations
create_pull_request: true
pull_request_title: 'New Crowdin Translations'
pull_request_body: 'New Crowdin translations by [Crowdin GH Action](https://github.com/crowdin/github-action)'
pull_request_base_branch_name: 'main'
env:
# A classic GitHub Personal Access Token with the 'repo' scope selected (the user should have write access to the repository).
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# A numeric ID, found at https://crowdin.com/project/<projectName>/tools/api
CROWDIN_PROJECT_ID: ${{ secrets.CROWDIN_PROJECT_ID }}
# Visit https://crowdin.com/settings#api-key to create this token
CROWDIN_PERSONAL_TOKEN: ${{ secrets.CROWDIN_PERSONAL_TOKEN }}

View File

@ -0,0 +1,33 @@
name: Write Pre Release notes
on:
pull_request:
types: [opened, reopened, synchronize, edited, closed, labeled]
permissions:
contents: write
pull-requests: write
jobs:
Release:
if: contains(github.event.pull_request.labels.*.name, 'pre-release')
runs-on: ubuntu-latest
steps:
- name: Check out repository code
uses: actions/checkout@v3
- name: Set RELEASE env var from package.json
run: echo RELEASE=$(node --print "require('./package.json').rcVersion") >> $GITHUB_ENV
- name: Create release notes
uses: release-drafter/release-drafter@v5
with:
config-name: pre-release-drafter.yml
commitish: main
prerelease: true
tag: ${{ env.RELEASE }}
name: ${{ env.RELEASE }}
version: ${{ env.RELEASE }}
latest: false
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

43
.github/workflows/release-ap-base.yml vendored Normal file
View File

@ -0,0 +1,43 @@
name: Release AP base
on:
workflow_dispatch:
inputs:
tag:
description: 'image tag'
required: true
jobs:
Release-AP-Base:
runs-on: ubuntu-latest
steps:
- name: Check out repository code
uses: actions/checkout@v3
- name: Fail if tag already exists
run: "! docker manifest inspect activepieces/ap-base:${{ inputs.tag }}"
- name: Set up Depot CLI
uses: depot/setup-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
uses: depot/build-push-action@v1
with:
project: du7O4b0e8P
token: ${{ secrets.DEPOT_PROJECT_TOKEN }}
context: .
file: ./ap-base.dockerfile
platforms: |
linux/amd64
linux/arm64
linux/arm/v7
push: true
tags: |
activepieces/ap-base:${{ inputs.tag }}
activepieces/ap-base:latest

32
.github/workflows/release-changelog.yml vendored Normal file
View File

@ -0,0 +1,32 @@
name: Write release notes
on:
pull_request:
types: [opened, reopened, synchronize, edited, closed, labeled]
permissions:
contents: write
pull-requests: write
jobs:
Release:
if: contains(github.event.pull_request.labels.*.name, 'release')
runs-on: ubuntu-latest
steps:
- name: Check out repository code
uses: actions/checkout@v3
- name: Set RELEASE env var from package.json
run: echo RELEASE=$(node --print "require('./package.json').version") >> $GITHUB_ENV
- name: Create release notes
uses: release-drafter/release-drafter@v5
with:
commitish: main
prerelease: false
tag: ${{ env.RELEASE }}
name: ${{ env.RELEASE }}
version: ${{ env.RELEASE }}
latest: true
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

61
.github/workflows/release-pieces.yml vendored Normal file
View File

@ -0,0 +1,61 @@
name: Release Pieces
on:
workflow_dispatch:
push:
branches:
- main
paths:
- 'packages/pieces/**'
- 'packages/shared/**'
jobs:
Release-Pieces:
if: github.repository == 'activepieces/activepieces'
runs-on: ubuntu-latest
steps:
- name: Check out repository code
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Cache dependencies
uses: actions/cache@v3
with:
path: ~/.npm
key: npm-${{ hashFiles('package-lock.json') }}
restore-keys: npm-
- name: Install dependencies
run: npm ci --ignore-scripts
- name: build packages
run: npx nx run-many --target=build
- name: copy project .npmrc to user level
run: cp .npmrc $HOME/.npmrc
- name: publish shared package
run: npx ts-node -r tsconfig-paths/register -P packages/engine/tsconfig.lib.json tools/scripts/utils/publish-nx-project.ts packages/shared
env:
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
- name: publish pieces-common package
run: npx ts-node -r tsconfig-paths/register -P packages/engine/tsconfig.lib.json tools/scripts/utils/publish-nx-project.ts packages/pieces/community/common
env:
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
- name: publish pieces-framework package
run: npx ts-node -r tsconfig-paths/register -P packages/engine/tsconfig.lib.json tools/scripts/utils/publish-nx-project.ts packages/pieces/community/framework
env:
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
- name: publish pieces packages
run: npx ts-node -r tsconfig-paths/register -P packages/engine/tsconfig.lib.json tools/scripts/pieces/publish-pieces-to-npm.ts
env:
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
- name: update pieces metadata
run: npx ts-node -r tsconfig-paths/register -P packages/engine/tsconfig.lib.json tools/scripts/pieces/update-pieces-metadata.ts packages/pieces/community/framework
env:
AP_CLOUD_API_KEY: ${{ secrets.AP_CLOUD_API_KEY }}

51
.github/workflows/release-rc.yml vendored Normal file
View File

@ -0,0 +1,51 @@
name: Release RC
on:
workflow_dispatch:
jobs:
Release:
runs-on: ubuntu-latest
steps:
- name: Check out repository code
uses: actions/checkout@v3
- name: Set RELEASE env var from package.json
run: echo RELEASE=$(node --print "require('./package.json').rcVersion") >> $GITHUB_ENV
- name: Set CLOUD_RELEASE env var from package.json
run: echo CLOUD_RELEASE=$(node --print "require('./package.json').rcVersion.replace(/-/g, '')") >> $GITHUB_ENV
- name: Fail if tag already exists
run: '! docker manifest inspect activepieces/activepieces:${{ env.RELEASE }}'
- name: Set up Depot CLI
uses: depot/setup-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push
uses: depot/build-push-action@v1
with:
project: du7O4b0e8P
token: ${{ secrets.DEPOT_PROJECT_TOKEN }}
context: .
file: ./Dockerfile
platforms: |
linux/amd64
linux/arm64
push: true
tags: |
ghcr.io/activepieces/activepieces:${{ env.RELEASE }}
ghcr.io/activepieces/activepieces-cloud:${{ env.CLOUD_RELEASE }}

50
.github/workflows/release.yml vendored Normal file
View File

@ -0,0 +1,50 @@
name: Release Everything
on:
workflow_dispatch:
jobs:
Release:
runs-on: ubuntu-latest
steps:
- name: Check out repository code
uses: actions/checkout@v3
- name: Set RELEASE env var from package.json
run: echo RELEASE=$(node --print "require('./package.json').version") >> $GITHUB_ENV
- name: Fail if tag already exists
run: '! docker manifest inspect activepieces/activepieces:${{ env.RELEASE }}'
- name: Set up Depot CLI
uses: depot/setup-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push
uses: depot/build-push-action@v1
with:
project: du7O4b0e8P
token: ${{ secrets.DEPOT_PROJECT_TOKEN }}
context: .
file: ./Dockerfile
platforms: |
linux/amd64
linux/arm64
push: true
tags: |
activepieces/activepieces:${{ env.RELEASE }}
activepieces/activepieces:latest
ghcr.io/activepieces/activepieces:${{ env.RELEASE }}
ghcr.io/activepieces/activepieces:latest

View File

@ -0,0 +1,29 @@
name: "Issue Linking - Require Issue Reference"
on:
pull_request_target:
types:
- opened
- edited
- synchronize
permissions:
pull-requests: read
issues: read
jobs:
validate-issue-linking:
name: Check Issue Linking
runs-on: ubuntu-latest
steps:
- name: Check Issue Linking
uses: actions/github-script@v7
with:
script: |
const body = context.payload.pull_request.body || '';
const issuePattern = /(?:closes|fixes|resolves)\s+#(\d+)/i;
const linkedIssue = body.match(issuePattern);
if (!linkedIssue) {
core.setFailed('Pull request must be linked to an issue using "closes #issue_number", "fixes #issue_number", or "resolves #issue_number"');
}

View File

@ -0,0 +1,25 @@
name: "Pull Request Labels - Require At Least One Label"
on:
pull_request_target:
types:
- opened
- edited
- synchronize
permissions:
pull-requests: read
jobs:
validate-labels:
name: Check PR Labels
runs-on: ubuntu-latest
steps:
- name: Check PR Labels
uses: actions/github-script@v7
with:
script: |
const labels = context.payload.pull_request.labels;
if (!labels || labels.length === 0) {
core.setFailed('Pull request must have at least one label');
}

20
.github/workflows/validate-pr-title.yml vendored Normal file
View File

@ -0,0 +1,20 @@
name: "Lint PR"
on:
pull_request_target:
types:
- opened
- edited
- synchronize
permissions:
pull-requests: read
jobs:
main:
name: Validate PR title
runs-on: ubuntu-latest
steps:
- uses: amannn/action-semantic-pull-request@v5.2.0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@ -0,0 +1,37 @@
name: Validate publishable packages
on:
workflow_dispatch:
pull_request:
branches:
- main
paths:
- 'packages/pieces/**'
jobs:
validate-publishable-packages:
runs-on: ubuntu-latest
steps:
- name: Check out repository code
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Cache dependencies
id: cache-npm
uses: actions/cache@v3
with:
path: ~/.npm
key: npm-${{ hashFiles('package-lock.json') }}
restore-keys: npm-
- if: ${{ steps.cache-npm.outputs.cache-hit != 'true' }}
name: List the state of node modules
continue-on-error: true
run: npm list
- name: Install dependencies
run: npm ci --ignore-scripts
- name: validate publishable packages
run: npx ts-node -r tsconfig-paths/register -P packages/engine/tsconfig.lib.json tools/scripts/validate-publishable-packages.ts

67
.gitignore vendored Normal file
View File

@ -0,0 +1,67 @@
# See http://help.github.com/ignore-files/ for more about ignoring files.
# compiled output
dist
tmp
.nx/
/out-tsc
firebase-admin-sdk.json
dev
/cache
.env
# SDK Build
builds
# dependencies
node_modules
# IDEs and editors
/.idea
.project
.classpath
.c9/
*.launch
.settings/
*.sublime-workspace
/cache
# IDE - VSCode
.vscode/*
!.vscode/settings.json
!.vscode/tasks.json
!.vscode/launch.json
!.vscode/extensions.json
# misc
/.sass-cache
/connect.lock
/coverage
/libpeerconnection.log
npm-debug.log
yarn-error.log
testem.log
/typings
.nx/
# System Files
.DS_Store
Thumbs.db
.angular
activepieces-engine.js
.history/
# produced by unsandboxed engine execution
.pnpm-store
# produced by backend tests
.npm-cache
# scratch pad
scratch.md
# environment variables
.env
.cursor/rules/nx-rules.mdc
.github/instructions/nx.instructions.md

10
.husky/commit-msg Executable file
View File

@ -0,0 +1,10 @@
#!/usr/bin/env sh
. "$(dirname -- "$0")/_/husky.sh"
# Check if the commit includes changes to the backend's .env file
if git diff --cached --name-only -- packages/server/api/.env | grep -q '^packages/server/api/.env$'; then
echo "Error: You're attempting to commit the backend's .env file. Please avoid committing this file."
exit 1
fi
npx --no -- commitlint --edit ${1}

2
.husky/pre-push Executable file
View File

@ -0,0 +1,2 @@
#!/usr/bin/env sh
. "$(dirname -- "$0")/_/husky.sh"

4
.npmrc Normal file
View File

@ -0,0 +1,4 @@
@activepieces:registry=https://registry.npmjs.org/
//registry.npmjs.org/:_authToken=${NPM_TOKEN}
legacy-peer-deps=true
save-exact=true

1
.nvmrc Normal file
View File

@ -0,0 +1 @@
v18.19.0

4
.nxignore Normal file
View File

@ -0,0 +1,4 @@
deploy/
dist/
cache/
dev/

9
.prettierignore Normal file
View File

@ -0,0 +1,9 @@
# Add files here to ignore them from prettier formatting
/dist
/coverage
.angular
/.nx/cache
/.nx/workspace-data

3
.prettierrc Normal file
View File

@ -0,0 +1,3 @@
{
"singleQuote": true
}

27
.typos.toml Normal file
View File

@ -0,0 +1,27 @@
[files]
extend-exclude = [
".git/",
"**/database/**",
"packages/ui/core/src/locale/",
# French
"packages/pieces/community/wedof/src/",
]
ignore-hidden = false
[default]
extend-ignore-re = [
"[0-9A-Za-z]{34}",
"name: 'referal'",
"getRepository\\('referal'\\)",
"label: 'FO Language', value: 'fo'",
"649c83111c9cbe6ba1d4cabe",
"hYy9pRFVxpDsO1FB05SunFWUe9JZY",
"lod6JEdKyPlvrnErdnrGa",
]
[default.extend-identifiers]
"crazyTweek" = "crazyTweek"
"optin_ip" = "optin_ip"
# Typos
"Github" = "GitHub"

28
.verdaccio/config.yml Normal file
View File

@ -0,0 +1,28 @@
# path to a directory with all packages
storage: ../tmp/local-registry/storage
# a list of other known repositories we can talk to
uplinks:
npmjs:
url: https://registry.npmjs.org/
maxage: 60m
packages:
'**':
# give all users (including non-authenticated users) full access
# because it is a local registry
access: $all
publish: $all
unpublish: $all
# if package is not available locally, proxy requests to npm registry
proxy: npmjs
# log settings
logs:
type: stdout
format: pretty
level: warn
publish:
allow_offline: true # set offline to true to allow publish offline

8
.vscode/extensions.json vendored Normal file
View File

@ -0,0 +1,8 @@
{
"recommendations": [
"nrwl.angular-console",
"esbenp.prettier-vscode",
"firsttris.vscode-jest-runner",
"rvest.vs-code-prettier-eslint"
]
}

19
.vscode/launch.json vendored Normal file
View File

@ -0,0 +1,19 @@
{
"version": "0.2.0",
"configurations": [
{
"name": "backend",
"port": 9229,
"request": "attach",
"skipFiles": [
"<node_internals>/**",
"**/node_modules/**"
],
"type": "node",
"localRoot": "${workspaceFolder}/packages/server/api",
"remoteRoot": "/usr/src/app",
"restart": true,
"autoAttachChildProcesses": false
}
]
}

15
.vscode/settings.json vendored Normal file
View File

@ -0,0 +1,15 @@
{
"cSpell.words": [
"Activepieces",
"Fastify",
"mpim",
"sendgrid"
],
"editor.codeActionsOnSave": {
"source.fixAll.eslint": "explicit"
},
"editor.formatOnSave": false,
"typescript.tsdk": "node_modules/typescript/lib",
"javascript.preferences.importModuleSpecifier": "relative",
"nxConsole.generateAiAgentRules": true,
}

13
CONTRIBUTING.md Normal file
View File

@ -0,0 +1,13 @@
<!-- omit in toc -->
# Contributing to Activepieces
First off, thanks for taking the time to contribute! ❤️
All types of contributions are encouraged and valued. See the [Contributing Guide](https://www.activepieces.com/docs/contributing/overview) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉
> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about:
> - Star the project
> - Tweet about it
> - Refer this project in your project's readme
> - Mention the project at local meetups and tell your friends/colleagues

97
Dockerfile Normal file
View File

@ -0,0 +1,97 @@
FROM node:18.20.5-bullseye-slim AS base
# Use a cache mount for apt to speed up the process
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update && \
apt-get install -y --no-install-recommends \
openssh-client \
python3 \
g++ \
build-essential \
git \
poppler-utils \
poppler-data \
procps && \
yarn config set python /usr/bin/python3 && \
npm install -g node-gyp
RUN npm i -g npm@9.9.3 pnpm@9.15.0
# Set the locale
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US:en
ENV LC_ALL en_US.UTF-8
ENV NX_DAEMON=false
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
locales \
locales-all \
libcap-dev \
&& rm -rf /var/lib/apt/lists/*
# install isolated-vm in a parent directory to avoid linking the package in every sandbox
RUN cd /usr/src && npm i isolated-vm@5.0.1
RUN pnpm store add @tsconfig/node18@1.0.0
RUN pnpm store add @types/node@18.17.1
RUN pnpm store add typescript@4.9.4
### STAGE 1: Build ###
FROM base AS build
# Set up backend
WORKDIR /usr/src/app
COPY .npmrc package.json package-lock.json ./
RUN npm ci
COPY . .
RUN npx nx run-many --target=build --projects=server-api --configuration production
RUN npx nx run-many --target=build --projects=react-ui
# Install backend production dependencies
RUN cd dist/packages/server/api && npm install --production --force
### STAGE 2: Run ###
FROM base AS run
# Set up backend
WORKDIR /usr/src/app
COPY packages/server/api/src/assets/default.cf /usr/local/etc/isolate
# Install Nginx and gettext for envsubst
RUN apt-get update && apt-get install -y nginx gettext
# Copy Nginx configuration template
COPY nginx.react.conf /etc/nginx/nginx.conf
COPY --from=build /usr/src/app/LICENSE .
RUN mkdir -p /usr/src/app/dist/packages/server/
RUN mkdir -p /usr/src/app/dist/packages/engine/
RUN mkdir -p /usr/src/app/dist/packages/shared/
# Copy Output files to appropriate directory from build stage
COPY --from=build /usr/src/app/dist/packages/engine/ /usr/src/app/dist/packages/engine/
COPY --from=build /usr/src/app/dist/packages/server/ /usr/src/app/dist/packages/server/
COPY --from=build /usr/src/app/dist/packages/shared/ /usr/src/app/dist/packages/shared/
RUN cd /usr/src/app/dist/packages/server/api/ && npm install --production --force
# Copy Output files to appropriate directory from build stage
COPY --from=build /usr/src/app/packages packages
# Copy frontend files to Nginx document root directory from build stage
COPY --from=build /usr/src/app/dist/packages/react-ui /usr/share/nginx/html/
LABEL service=activepieces
# Set up entrypoint script
COPY docker-entrypoint.sh .
RUN chmod +x docker-entrypoint.sh
ENTRYPOINT ["./docker-entrypoint.sh"]
EXPOSE 80

25
LICENSE Executable file
View File

@ -0,0 +1,25 @@
Copyright (c) 2020-2024 Activepieces Inc.
Portions of this software are licensed as follows:
* All content that resides under the "packages/ee/" and "packages/server/api/src/app/ee" directory of this repository, if that directory exists, is licensed under the license defined in packages/ee/LICENSE
* All third party components incorporated into the Activepieces Inc Software are licensed under the original license provided by the owner of the applicable component.
* Content outside of the above mentioned directories or restrictions above is available under the "MIT Expat" license as defined below.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

39
SECURITY.md Normal file
View File

@ -0,0 +1,39 @@
# Security
Contact: security@activepieces.com
Based on [https://supabase.com/.well-known/security.txt](https://supabase.com/.well-known/security.txt)
At Activepieces.com, we consider the security of our systems a top priority. But no matter how much effort we put into system security, there can still be vulnerabilities present.
If you discover a vulnerability, we would like to know about it so we can take steps to address it as quickly as possible. We would like to ask you to help us better protect our clients and our systems.
## Out of scope vulnerabilities:
- Clickjacking on pages with no sensitive actions.
- Unauthenticated/logout/login CSRF.
- Attacks requiring MITM or physical access to a user's device.
- Any activity that could lead to the disruption of our service (DoS).
- Content spoofing and text injection issues without showing an attack vector/without being able to modify HTML/CSS.
- Email spoofing
- Missing DNSSEC, CAA, CSP headers
- Lack of Secure or HTTP only flag on non-sensitive cookies
- Deadlinks
## Please do the following:
- E-mail your findings to [security@activepieces.com](mailto:security@activepieces.com).
- Do not run automated scanners on our infrastructure or dashboard. If you wish to do this, contact us and we will set up a sandbox for you.
- Do not take advantage of the vulnerability or problem you have discovered, for example by downloading more data than necessary to demonstrate the vulnerability or deleting or modifying other people's data,
- Do not reveal the problem to others until it has been resolved,
- Do not use attacks on physical security, social engineering, distributed denial of service, spam or applications of third parties,
- Do provide sufficient information to reproduce the problem, so we will be able to resolve it as quickly as possible. Usually, the IP address or the URL of the affected system and a description of the vulnerability will be sufficient, but complex vulnerabilities may require further explanation.
## What we promise:
- We will respond to your report within 3 business days with our evaluation of the report and an expected resolution date,
- If you have followed the instructions above, we will not take any legal action against you in regard to the report,
- We will handle your report with strict confidentiality, and not pass on your personal details to third parties without your permission,
- We will keep you informed of the progress towards resolving the problem,
- In the public information concerning the problem reported, we will give your name as the discoverer of the problem (unless you desire otherwise), and
- We strive to resolve all problems as quickly as possible, and we would like to play an active role in the ultimate publication on the problem after it is resolved.

BIN
assets/ap-logo.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.9 KiB

1
commitlint.config.js Normal file
View File

@ -0,0 +1 @@
module.exports = { extends: ['@commitlint/config-conventional'] };

12
crowdin.yml Normal file
View File

@ -0,0 +1,12 @@
project_id_env: CROWDIN_PROJECT_ID
api_token_env: CROWDIN_PERSONAL_TOKEN
base_path: .
base_url: 'https://api.crowdin.com'
preserve_hierarchy: 1
files:
- type: i18next_json
source: packages/react-ui/public/locales/en/translation.json
translation: /packages/react-ui/public/locales/%two_letters_code%/translation.json
- type: json
source: packages/pieces/**/**/src/i18n/translation.json
translation: /packages/pieces/**/**/src/i18n/%two_letters_code%.json

33
deploy/pulumi/.gitignore vendored Normal file
View File

@ -0,0 +1,33 @@
/.pulumi/
/.vscode/
/.vs/
bin/
build/
node_modules/
*.pyc
.Python
venv/
include/
lib/
yarn.lock
package-lock.json
# https://www.pulumi.com/blog/iac-recommended-practices-developer-stacks-git-branches/#using-developer-stacks
# Pulumi.*.yaml
# Pulumi.*dev*.yaml
.idea/
.ionide/
*.iml
key.rsa*
obj/
vendor
Gopkg.lock
**/.DS_Store
**/ci-scripts
# Java app
.gradle/
.settings/
.project
.classpath
target/

View File

@ -0,0 +1,22 @@
encryptionsalt: v1:wHCVNl3bj/g=:v1:mxogi9ZeBjIcxNZC:q+bjpLv9rnJnu8qq7xwKGLd/GAZOqA==
config:
activepieces:environment: "dev"
activepieces:apEncryptionKey:
activepieces:apJwtSecret:
activepieces:deployLocalBuild: "false"
activepieces:repoName:
activepieces:containerCpu: "256"
activepieces:containerMemory: "512"
activepieces:containerInstances: "1"
activepieces:usePostgres: "false"
activepieces:dbInstanceClass: "db.t3.small"
activepieces:dbUsername: "postgres"
activepieces:dbIsPublic: "false"
activepieces:dbPassword:
secure: v1:MXNSOcqZCp10X2PX:mU2iTrcETjdisk8FkD5yHLJYUxRei/9l
activepieces:addIpToPostgresSecurityGroup:
activepieces:useRedis: "false"
activepieces:redisNodeType: "cache.t3.small"
activepieces:domain:
activepieces:subDomain:
aws:region: "us-east-1"

View File

@ -0,0 +1,19 @@
encryptionsalt: v1:icXg2cmIvSc=:v1:y8+4YhdMCPPDY26J:5cNYmimH353n8sjUDDc6srvcPgb+8Q==
config:
activepieces:environment: "prod"
activepieces:apEncryptionKey:
activepieces:apJwtSecret:
activepieces:deployLocalBuild: "true"
activepieces:repoName: "activepieces-prod-repo"
activepieces:containerCpu: "512"
activepieces:containerMemory: "1024"
activepieces:containerInstances: "1"
activepieces:usePostgres: "true"
activepieces:dbInstanceClass: "db.t3.small"
activepieces:dbIsPublic: "false"
activepieces:dbPassword:
secure: v1:MXNSOcqZCp10X2PX:mU2iTrcETjdisk8FkD5yHLJYUxRei/9l
activepieces:dbUsername: "postgres"
activepieces:useRedis: "true"
activepieces:redisNodeType: "cache.t3.small"
aws:region: "us-east-1"

56
deploy/pulumi/Pulumi.yaml Normal file
View File

@ -0,0 +1,56 @@
runtime: nodejs
name: activepieces
description: A Pulumi template to deploy Activepieces in a development or production configuration.
stack: activepieces-dev
template:
description: Deploy Activepieces into into an ECS Fargate instance & optionally add Postgres, Redis and a DNS registration with SSL.
config:
aws:region:
description: The AWS region to deploy into
default: us-west-2
environment:
description: Environment
default: prod
containerCpu:
description: The amount of CPU to allocate for the container
default: 256
containerMemory:
description: The amount of memory to allocate for the container
default: 512
containerInstances:
description: Number of running containers behind load balancer
default: 1
usePostgres:
description: Add Postgres for storage or use SQLite3 locally
default: true
dbIsPublic:
description: Should Db be publicly reachable. Ignored if usePostgres is false.
default: false
dbUsername:
description: Default username for the Postgres. Ignored if usePostgres is false
default: postgres
dbPassword:
description: Defaults to "postgres". Ignored if usePostgres is false
default: postgres
secret: true
dbInstanceClass:
description: The size of the RDS instance
default: db.t3.micro
useRedis:
description: Use a single node Redis cluster or in-memory
default: true
redisNodeType:
description: Node type for the Redis 7 cluster
default: cache.t3.micro
domain:
description: Optional - E.g. "yourdomain.com". Hosted zone must already exist in Route 53. Creates SSL cert
subDomain:
description: Optional - E.g. "activepieces". "domain" must be set
addIpToPostgresSecurityGroup:
description: Optional - An IP address to add to the allowed inbound traffic for the Postgres
apEncryptionKey:
description: Optional - Run 'openssl rand -hex 16' locally to generate or leave blank to auto-generate
secret: true
apJwtSecret:
description: Optional - Run 'openssl rand -hex 32' locally to generate or leave blank to auto-generate
secret: true

3
deploy/pulumi/README.md Normal file
View File

@ -0,0 +1,3 @@
# Getting Started
See instruction on https://www.activepieces.com/docs/install/options/aws

16
deploy/pulumi/autotag.ts Normal file
View File

@ -0,0 +1,16 @@
import * as pulumi from "@pulumi/pulumi";
import { isTaggable } from "./taggable";
/**
* registerAutoTags registers a global stack transformation that merges a set
* of tags with whatever was also explicitly added to the resource definition.
*/
export function registerAutoTags(autoTags: Record<string, string>): void {
pulumi.runtime.registerStackTransformation((args) => {
if (isTaggable(args.type)) {
args.props["tags"] = { ...args.props["tags"], ...autoTags };
return { props: args.props, opts: args.opts };
}
return undefined;
});
}

466
deploy/pulumi/index.ts Normal file
View File

@ -0,0 +1,466 @@
import * as aws from "@pulumi/aws";
import * as docker from "@pulumi/docker";
import * as pulumi from "@pulumi/pulumi";
import * as awsx from "@pulumi/awsx";
import { ApplicationLoadBalancer } from "@pulumi/awsx/lb/applicationLoadBalancer";
import { registerAutoTags } from './autotag';
import * as child_process from "child_process";
const stack = pulumi.getStack();
const config = new pulumi.Config();
const apEncryptionKey = config.getSecret("apEncryptionKey")?.apply(secretValue => {
return secretValue || child_process.execSync("openssl rand -hex 16").toString().trim();
});
const apJwtSecret = config.getSecret("apJwtSecret")?.apply(secretValue => {
return secretValue || child_process.execSync("openssl rand -hex 32").toString().trim();
});
const containerCpu = config.requireNumber("containerCpu");
const containerMemory = config.requireNumber("containerMemory");
const containerInstances = config.requireNumber("containerInstances");
const addIpToPostgresSecurityGroup = config.get("addIpToPostgresSecurityGroup");
const domain = config.get("domain");
const subDomain = config.get("subDomain");
const usePostgres = config.requireBoolean("usePostgres");
const useRedis = config.requireBoolean("useRedis");
const redisNodeType = config.require("redisNodeType");
const dbIsPublic = config.getBoolean("dbIsPublic");
const dbUsername = config.get("dbUsername");
const dbPassword = config.getSecret("dbPassword");
const dbInstanceClass = config.require("dbInstanceClass");
// Add tags for every resource that allows them, with the following properties.
// Useful to know who or what created the resource/service
registerAutoTags({
"pulumi:Project": pulumi.getProject(),
"pulumi:Stack": pulumi.getStack(),
"Created by": config.get("author") || child_process.execSync("pulumi whoami").toString().trim().replace('\\', '/')
});
let imageName;
// Check if we're deploying a local build or direct from Docker Hub
if (config.getBoolean("deployLocalBuild")) {
const repoName = config.require("repoName");
const repo = new aws.ecr.Repository(repoName, {
name: repoName // https://www.pulumi.com/docs/intro/concepts/resources/names/#autonaming
}); // Create a private ECR repository
const repoUrl = pulumi.interpolate`${repo.repositoryUrl}`; // Get registry info (creds and endpoint)
const name = pulumi.interpolate`${repoUrl}:latest`;
// Get the repository credentials we use to push the image to the repository
const repoCreds = repo.registryId.apply(async (registryId) => {
const credentials = await aws.ecr.getCredentials({
registryId: registryId,
});
const decodedCredentials = Buffer.from(credentials.authorizationToken, "base64").toString();
const [username, password] = decodedCredentials.split(":");
return {
server: credentials.proxyEndpoint,
username,
password
};
});
// Build and publish the container image.
const image = new docker.Image(stack, {
build: {
context: `../../`,
dockerfile: `../../Dockerfile`,
builderVersion: "BuilderBuildKit",
args: {
"BUILDKIT_INLINE_CACHE": "1"
},
},
skipPush: pulumi.runtime.isDryRun(),
imageName: name,
registry: repoCreds
});
imageName = image.imageName;
pulumi.log.info(`Finished pushing image to ECR`, image);
} else {
imageName = process.env.IMAGE_NAME || config.get("imageName") || "activepieces/activepieces:latest";
}
const containerEnvironmentVars: awsx.types.input.ecs.TaskDefinitionKeyValuePairArgs[] = [];
// Allocate a new VPC with the default settings:
const vpc = new awsx.ec2.Vpc(`${stack}-vpc`, {
numberOfAvailabilityZones: 2,
natGateways: {
strategy: "Single"
},
tags: {
// For some reason, this is how you name a VPC with AWS:
// https://github.com/pulumi/pulumi-terraform/issues/38#issue-262186406
Name: `${stack}-vpc`
},
enableDnsHostnames: true,
enableDnsSupport: true
});
const albSecGroup = new aws.ec2.SecurityGroup(`${stack}-alb-sg`, {
name: `${stack}-alb-sg`,
vpcId: vpc.vpcId,
ingress: [{ // Allow only http & https traffic
protocol: "tcp",
fromPort: 443,
toPort: 443,
cidrBlocks: ["0.0.0.0/0"]
},
{
protocol: "tcp",
fromPort: 80,
toPort: 80,
cidrBlocks: ["0.0.0.0/0"]
}],
egress: [{
protocol: "-1",
fromPort: 0,
toPort: 0,
cidrBlocks: ["0.0.0.0/0"]
}]
})
const fargateSecGroup = new aws.ec2.SecurityGroup(`${stack}-fargate-sg`, {
name: `${stack}-fargate-sg`,
vpcId: vpc.vpcId,
ingress: [
{
protocol: "tcp",
fromPort: 80,
toPort: 80,
securityGroups: [albSecGroup.id]
}
],
egress: [ // allow all outbound traffic
{
protocol: "-1",
fromPort: 0,
toPort: 0,
cidrBlocks: ["0.0.0.0/0"]
}
]
});
if (usePostgres) {
const rdsSecurityGroupArgs: aws.ec2.SecurityGroupArgs = {
name: `${stack}-db-sg`,
vpcId: vpc.vpcId,
ingress: [{
protocol: "tcp",
fromPort: 5432,
toPort: 5432,
securityGroups: [fargateSecGroup.id] // The id of the Fargate security group
}],
egress: [ // allow all outbound traffic
{
protocol: "-1",
fromPort: 0,
toPort: 0,
cidrBlocks: ["0.0.0.0/0"]
}
]
};
// Optionally add the current outgoing public IP address to the CIDR block
// so that they can connect directly to the Db during development
if (addIpToPostgresSecurityGroup) {
// @ts-ignore
rdsSecurityGroupArgs.ingress.push({
protocol: "tcp",
fromPort: 5432,
toPort: 5432,
cidrBlocks: [`${addIpToPostgresSecurityGroup}/32`],
description: `Public IP for local connection`
});
}
const rdsSecurityGroup = new aws.ec2.SecurityGroup(`${stack}-db-sg`, rdsSecurityGroupArgs);
const rdsSubnets = new aws.rds.SubnetGroup(`${stack}-db-subnet-group`, {
name: `${stack}-db-subnet-group`,
subnetIds: dbIsPublic ? vpc.publicSubnetIds : vpc.privateSubnetIds
});
const db = new aws.rds.Instance(stack, {
allocatedStorage: 10,
engine: "postgres",
engineVersion: "14.9",
identifier: stack, // In RDS
dbName: "postgres", // When connected to the DB host
instanceClass: dbInstanceClass,
port: 5432,
publiclyAccessible: dbIsPublic,
skipFinalSnapshot: true,
storageType: "gp2",
username: dbUsername,
password: dbPassword,
dbSubnetGroupName: rdsSubnets.id,
vpcSecurityGroupIds: [rdsSecurityGroup.id],
backupRetentionPeriod: 0,
applyImmediately: true,
allowMajorVersionUpgrade: true,
autoMinorVersionUpgrade: true
}, {
protect: dbIsPublic === false,
deleteBeforeReplace: true
});
containerEnvironmentVars.push(
{
name: "AP_POSTGRES_DATABASE",
value: db.dbName
},
{
name: "AP_POSTGRES_HOST",
value: db.address
},
{
name: "AP_POSTGRES_PORT",
value: pulumi.interpolate`${db.port}`
},
{
name: "AP_POSTGRES_USERNAME",
value: db.username
},
{
name: "AP_POSTGRES_PASSWORD",
value: config.requireSecret("dbPassword")
},
{
name: "AP_POSTGRES_USE_SSL",
value: "false"
});
} else {
containerEnvironmentVars.push(
{
name: "AP_DB_TYPE",
value: "SQLITE3"
});
}
if (useRedis) {
const redisCluster = new aws.elasticache.Cluster(`${stack}-redis-cluster`, {
clusterId: `${stack}-redis-cluster`,
engine: "redis",
engineVersion: '7.0',
nodeType: redisNodeType,
numCacheNodes: 1,
parameterGroupName: "default.redis7",
port: 6379,
subnetGroupName: new aws.elasticache.SubnetGroup(`${stack}-redis-subnet-group`, {
name: `${stack}-redis-subnet-group`,
subnetIds: vpc.privateSubnetIds
}).id,
securityGroupIds: [
new aws.ec2.SecurityGroup(`${stack}-redis-sg`, {
name: `${stack}-redis-sg`,
vpcId: vpc.vpcId,
ingress: [{
protocol: "tcp",
fromPort: 6379, // The standard port for Redis
toPort: 6379,
securityGroups: [fargateSecGroup.id]
}],
egress: [{
protocol: "-1",
fromPort: 0,
toPort: 0,
cidrBlocks: ["0.0.0.0/0"]
}]
}).id
]
});
const redisUrl = pulumi.interpolate`${redisCluster.cacheNodes[0].address}:${redisCluster.cacheNodes[0].port}`;
containerEnvironmentVars.push(
{
name: "AP_REDIS_URL",
value: redisUrl
});
} else {
containerEnvironmentVars.push(
{
name: "AP_QUEUE_MODE",
value: "MEMORY"
});
}
let alb: ApplicationLoadBalancer;
// Export the URL so we can easily access it.
let frontendUrl;
if (subDomain && domain) {
const fullDomain = `${subDomain}.${domain}`;
const exampleCertificate = new aws.acm.Certificate(`${stack}-cert`, {
domainName: fullDomain,
validationMethod: "DNS",
});
const hostedZoneId = aws.route53.getZone({ name: domain }, { async: true }).then(zone => zone.zoneId);
// DNS records to verify SSL Certificate
const certificateValidationDomain = new aws.route53.Record(`${fullDomain}-validation`, {
name: exampleCertificate.domainValidationOptions[0].resourceRecordName,
zoneId: hostedZoneId,
type: exampleCertificate.domainValidationOptions[0].resourceRecordType,
records: [exampleCertificate.domainValidationOptions[0].resourceRecordValue],
ttl: 600,
});
const certificateValidation = new aws.acm.CertificateValidation(`${fullDomain}-cert-validation`, {
certificateArn: exampleCertificate.arn,
validationRecordFqdns: [certificateValidationDomain.fqdn],
});
// Creates an ALB associated with our custom VPC.
alb = new awsx.lb.ApplicationLoadBalancer(`${stack}-alb`, {
securityGroups: [albSecGroup.id],
name: `${stack}-alb`,
subnetIds: vpc.publicSubnetIds,
listeners: [{
port: 80, // port on the docker container
protocol: "HTTP",
defaultActions: [{
type: "redirect",
redirect: {
protocol: "HTTPS",
port: "443",
statusCode: "HTTP_301",
},
}]
},
{
protocol: "HTTPS",
port: 443,
certificateArn: certificateValidation.certificateArn
}],
defaultTargetGroup: {
name: `${stack}-alb-tg`,
port: 80 // port on the docker container ,
}
});
// Create a DNS record for the load balancer
const albDomain = new aws.route53.Record(fullDomain, {
name: fullDomain,
zoneId: hostedZoneId,
type: "CNAME",
records: [alb.loadBalancer.dnsName],
ttl: 600,
});
frontendUrl = pulumi.interpolate`https://${subDomain}.${domain}`;
} else {
// Creates an ALB associated with our custom VPC.
alb = new awsx.lb.ApplicationLoadBalancer(`${stack}-alb`, {
securityGroups: [albSecGroup.id],
name: `${stack}-alb`,
subnetIds: vpc.publicSubnetIds,
listeners: [{
port: 80, // exposed port from the docker file
protocol: "HTTP"
}],
defaultTargetGroup: {
name: `${stack}-alb-tg`,
port: 80, // port on the docker container
protocol: "HTTP"
}
});
frontendUrl = pulumi.interpolate`http://${alb.loadBalancer.dnsName}`;
}
const environmentVariables = [
...containerEnvironmentVars,
{
name: "AP_ENGINE_EXECUTABLE_PATH",
value: "dist/packages/engine/main.js"
},
{
name: "AP_ENCRYPTION_KEY",
value: apEncryptionKey
},
{
name: "AP_JWT_SECRET",
value: apJwtSecret
},
{
name: "AP_ENVIRONMENT",
value: "prod"
},
{
name: "AP_FRONTEND_URL",
value: frontendUrl
},
{
name: "AP_TRIGGER_DEFAULT_POLL_INTERVAL",
value: "5"
},
{
name: "AP_EXECUTION_MODE",
value: "UNSANDBOXED"
},
{
name: "AP_REDIS_USE_SSL",
value: "false"
},
{
name: "AP_SANDBOX_RUN_TIME_SECONDS",
value: "600"
},
{
name: "AP_TELEMETRY_ENABLED",
value: "true"
},
{
name: "AP_TEMPLATES_SOURCE_URL",
value: "https://cloud.activepieces.com/api/v1/flow-templates"
}
];
const fargateService = new awsx.ecs.FargateService(`${stack}-fg`, {
name: `${stack}-fg`,
cluster: (new aws.ecs.Cluster(`${stack}-cluster`, {
name: `${stack}-cluster`
})).arn,
networkConfiguration: {
subnets: vpc.publicSubnetIds,
securityGroups: [fargateSecGroup.id],
assignPublicIp: true
},
desiredCount: containerInstances,
taskDefinitionArgs: {
family: `${stack}-fg-task-definition`,
container: {
name: "activepieces",
image: imageName,
cpu: containerCpu,
memory: containerMemory,
portMappings: [{
targetGroup: alb.defaultTargetGroup,
}],
environment: environmentVariables
}
}
});
pulumi.log.info("Finished running Pulumi");
export const _ = {
activePiecesUrl: frontendUrl,
activepiecesEnv: environmentVariables
};

View File

@ -0,0 +1,13 @@
{
"name": "pulumi",
"main": "index.ts",
"devDependencies": {
"@types/node": "^18"
},
"dependencies": {
"@pulumi/pulumi": "^3.0.0",
"@pulumi/aws": "^6.0.0",
"@pulumi/awsx": "^1.0.0",
"@pulumi/docker": "^4.4.0"
}
}

237
deploy/pulumi/taggable.ts Normal file
View File

@ -0,0 +1,237 @@
/**
* isTaggable returns true if the given resource type is an AWS resource that supports tags.
*/
export function isTaggable(t: string): boolean {
return (taggableResourceTypes.indexOf(t) !== -1);
}
// taggableResourceTypes is a list of known AWS type tokens that are taggable.
const taggableResourceTypes = [
"aws:accessanalyzer/analyzer:Analyzer",
"aws:acm/certificate:Certificate",
"aws:acmpca/certificateAuthority:CertificateAuthority",
"aws:alb/loadBalancer:LoadBalancer",
"aws:alb/targetGroup:TargetGroup",
"aws:apigateway/apiKey:ApiKey",
"aws:apigateway/clientCertificate:ClientCertificate",
"aws:apigateway/domainName:DomainName",
"aws:apigateway/restApi:RestApi",
"aws:apigateway/stage:Stage",
"aws:apigateway/usagePlan:UsagePlan",
"aws:apigateway/vpcLink:VpcLink",
"aws:applicationloadbalancing/loadBalancer:LoadBalancer",
"aws:applicationloadbalancing/targetGroup:TargetGroup",
"aws:appmesh/mesh:Mesh",
"aws:appmesh/route:Route",
"aws:appmesh/virtualNode:VirtualNode",
"aws:appmesh/virtualRouter:VirtualRouter",
"aws:appmesh/virtualService:VirtualService",
"aws:appsync/graphQLApi:GraphQLApi",
"aws:athena/workgroup:Workgroup",
"aws:autoscaling/group:Group",
"aws:backup/plan:Plan",
"aws:backup/vault:Vault",
"aws:cfg/aggregateAuthorization:AggregateAuthorization",
"aws:cfg/configurationAggregator:ConfigurationAggregator",
"aws:cfg/rule:Rule",
"aws:cloudformation/stack:Stack",
"aws:cloudformation/stackSet:StackSet",
"aws:cloudfront/distribution:Distribution",
"aws:cloudhsmv2/cluster:Cluster",
"aws:cloudtrail/trail:Trail",
"aws:cloudwatch/eventRule:EventRule",
"aws:cloudwatch/logGroup:LogGroup",
"aws:cloudwatch/metricAlarm:MetricAlarm",
"aws:codebuild/project:Project",
"aws:codecommit/repository:Repository",
"aws:codepipeline/pipeline:Pipeline",
"aws:codepipeline/webhook:Webhook",
"aws:codestarnotifications/notificationRule:NotificationRule",
"aws:cognito/identityPool:IdentityPool",
"aws:cognito/userPool:UserPool",
"aws:datapipeline/pipeline:Pipeline",
"aws:datasync/agent:Agent",
"aws:datasync/efsLocation:EfsLocation",
"aws:datasync/locationSmb:LocationSmb",
"aws:datasync/nfsLocation:NfsLocation",
"aws:datasync/s3Location:S3Location",
"aws:datasync/task:Task",
"aws:dax/cluster:Cluster",
"aws:directconnect/connection:Connection",
"aws:directconnect/hostedPrivateVirtualInterfaceAccepter:HostedPrivateVirtualInterfaceAccepter",
"aws:directconnect/hostedPublicVirtualInterfaceAccepter:HostedPublicVirtualInterfaceAccepter",
"aws:directconnect/hostedTransitVirtualInterfaceAcceptor:HostedTransitVirtualInterfaceAcceptor",
"aws:directconnect/linkAggregationGroup:LinkAggregationGroup",
"aws:directconnect/privateVirtualInterface:PrivateVirtualInterface",
"aws:directconnect/publicVirtualInterface:PublicVirtualInterface",
"aws:directconnect/transitVirtualInterface:TransitVirtualInterface",
"aws:directoryservice/directory:Directory",
"aws:dlm/lifecyclePolicy:LifecyclePolicy",
"aws:dms/endpoint:Endpoint",
"aws:dms/replicationInstance:ReplicationInstance",
"aws:dms/replicationSubnetGroup:ReplicationSubnetGroup",
"aws:dms/replicationTask:ReplicationTask",
"aws:docdb/cluster:Cluster",
"aws:docdb/clusterInstance:ClusterInstance",
"aws:docdb/clusterParameterGroup:ClusterParameterGroup",
"aws:docdb/subnetGroup:SubnetGroup",
"aws:dynamodb/table:Table",
"aws:ebs/snapshot:Snapshot",
"aws:ebs/snapshotCopy:SnapshotCopy",
"aws:ebs/volume:Volume",
"aws:ec2/ami:Ami",
"aws:ec2/amiCopy:AmiCopy",
"aws:ec2/amiFromInstance:AmiFromInstance",
"aws:ec2/capacityReservation:CapacityReservation",
"aws:ec2/customerGateway:CustomerGateway",
"aws:ec2/defaultNetworkAcl:DefaultNetworkAcl",
"aws:ec2/defaultRouteTable:DefaultRouteTable",
"aws:ec2/defaultSecurityGroup:DefaultSecurityGroup",
"aws:ec2/defaultSubnet:DefaultSubnet",
"aws:ec2/defaultVpc:DefaultVpc",
"aws:ec2/defaultVpcDhcpOptions:DefaultVpcDhcpOptions",
"aws:ec2/eip:Eip",
"aws:ec2/fleet:Fleet",
"aws:ec2/instance:Instance",
"aws:ec2/internetGateway:InternetGateway",
"aws:ec2/keyPair:KeyPair",
"aws:ec2/launchTemplate:LaunchTemplate",
"aws:ec2/natGateway:NatGateway",
"aws:ec2/networkAcl:NetworkAcl",
"aws:ec2/networkInterface:NetworkInterface",
"aws:ec2/placementGroup:PlacementGroup",
"aws:ec2/routeTable:RouteTable",
"aws:ec2/securityGroup:SecurityGroup",
"aws:ec2/spotInstanceRequest:SpotInstanceRequest",
"aws:ec2/subnet:Subnet",
"aws:ec2/vpc:Vpc",
"aws:ec2/vpcDhcpOptions:VpcDhcpOptions",
"aws:ec2/vpcEndpoint:VpcEndpoint",
"aws:ec2/vpcEndpointService:VpcEndpointService",
"aws:ec2/vpcPeeringConnection:VpcPeeringConnection",
"aws:ec2/vpcPeeringConnectionAccepter:VpcPeeringConnectionAccepter",
"aws:ec2/vpnConnection:VpnConnection",
"aws:ec2/vpnGateway:VpnGateway",
"aws:ec2clientvpn/endpoint:Endpoint",
"aws:ec2transitgateway/routeTable:RouteTable",
"aws:ec2transitgateway/transitGateway:TransitGateway",
"aws:ec2transitgateway/vpcAttachment:VpcAttachment",
"aws:ec2transitgateway/vpcAttachmentAccepter:VpcAttachmentAccepter",
"aws:ecr/repository:Repository",
"aws:ecs/capacityProvider:CapacityProvider",
"aws:ecs/cluster:Cluster",
"aws:ecs/service:Service",
"aws:ecs/taskDefinition:TaskDefinition",
"aws:efs/fileSystem:FileSystem",
"aws:eks/cluster:Cluster",
"aws:eks/fargateProfile:FargateProfile",
"aws:eks/nodeGroup:NodeGroup",
"aws:elasticache/cluster:Cluster",
"aws:elasticache/replicationGroup:ReplicationGroup",
"aws:elasticbeanstalk/application:Application",
"aws:elasticbeanstalk/applicationVersion:ApplicationVersion",
"aws:elasticbeanstalk/environment:Environment",
"aws:elasticloadbalancing/loadBalancer:LoadBalancer",
"aws:elasticloadbalancingv2/loadBalancer:LoadBalancer",
"aws:elasticloadbalancingv2/targetGroup:TargetGroup",
"aws:elasticsearch/domain:Domain",
"aws:elb/loadBalancer:LoadBalancer",
"aws:emr/cluster:Cluster",
"aws:fsx/lustreFileSystem:LustreFileSystem",
"aws:fsx/windowsFileSystem:WindowsFileSystem",
"aws:gamelift/alias:Alias",
"aws:gamelift/build:Build",
"aws:gamelift/fleet:Fleet",
"aws:gamelift/gameSessionQueue:GameSessionQueue",
"aws:glacier/vault:Vault",
"aws:glue/crawler:Crawler",
"aws:glue/job:Job",
"aws:glue/trigger:Trigger",
"aws:iam/role:Role",
"aws:iam/user:User",
"aws:inspector/resourceGroup:ResourceGroup",
"aws:kinesis/analyticsApplication:AnalyticsApplication",
"aws:kinesis/firehoseDeliveryStream:FirehoseDeliveryStream",
"aws:kinesis/stream:Stream",
"aws:kms/externalKey:ExternalKey",
"aws:kms/key:Key",
"aws:lambda/function:Function",
"aws:lb/loadBalancer:LoadBalancer",
"aws:lb/targetGroup:TargetGroup",
"aws:licensemanager/licenseConfiguration:LicenseConfiguration",
"aws:lightsail/instance:Instance",
"aws:mediaconvert/queue:Queue",
"aws:mediapackage/channel:Channel",
"aws:mediastore/container:Container",
"aws:mq/broker:Broker",
"aws:mq/configuration:Configuration",
"aws:msk/cluster:Cluster",
"aws:neptune/cluster:Cluster",
"aws:neptune/clusterInstance:ClusterInstance",
"aws:neptune/clusterParameterGroup:ClusterParameterGroup",
"aws:neptune/eventSubscription:EventSubscription",
"aws:neptune/parameterGroup:ParameterGroup",
"aws:neptune/subnetGroup:SubnetGroup",
"aws:opsworks/stack:Stack",
"aws:organizations/account:Account",
"aws:pinpoint/app:App",
"aws:qldb/ledger:Ledger",
"aws:ram/resourceShare:ResourceShare",
"aws:rds/cluster:Cluster",
"aws:rds/clusterEndpoint:ClusterEndpoint",
"aws:rds/clusterInstance:ClusterInstance",
"aws:rds/clusterParameterGroup:ClusterParameterGroup",
"aws:rds/clusterSnapshot:ClusterSnapshot",
"aws:rds/eventSubscription:EventSubscription",
"aws:rds/instance:Instance",
"aws:rds/optionGroup:OptionGroup",
"aws:rds/parameterGroup:ParameterGroup",
"aws:rds/securityGroup:SecurityGroup",
"aws:rds/snapshot:Snapshot",
"aws:rds/subnetGroup:SubnetGroup",
"aws:redshift/cluster:Cluster",
"aws:redshift/eventSubscription:EventSubscription",
"aws:redshift/parameterGroup:ParameterGroup",
"aws:redshift/snapshotCopyGrant:SnapshotCopyGrant",
"aws:redshift/snapshotSchedule:SnapshotSchedule",
"aws:redshift/subnetGroup:SubnetGroup",
"aws:resourcegroups/group:Group",
"aws:route53/healthCheck:HealthCheck",
"aws:route53/resolverEndpoint:ResolverEndpoint",
"aws:route53/resolverRule:ResolverRule",
"aws:route53/zone:Zone",
"aws:s3/bucket:Bucket",
"aws:s3/bucketObject:BucketObject",
"aws:sagemaker/endpoint:Endpoint",
"aws:sagemaker/endpointConfiguration:EndpointConfiguration",
"aws:sagemaker/model:Model",
"aws:sagemaker/notebookInstance:NotebookInstance",
"aws:secretsmanager/secret:Secret",
"aws:servicecatalog/portfolio:Portfolio",
"aws:sfn/activity:Activity",
"aws:sfn/stateMachine:StateMachine",
"aws:sns/topic:Topic",
"aws:sqs/queue:Queue",
"aws:ssm/activation:Activation",
"aws:ssm/document:Document",
"aws:ssm/maintenanceWindow:MaintenanceWindow",
"aws:ssm/parameter:Parameter",
"aws:ssm/patchBaseline:PatchBaseline",
"aws:storagegateway/cachesIscsiVolume:CachesIscsiVolume",
"aws:storagegateway/gateway:Gateway",
"aws:storagegateway/nfsFileShare:NfsFileShare",
"aws:storagegateway/smbFileShare:SmbFileShare",
"aws:swf/domain:Domain",
"aws:transfer/server:Server",
"aws:transfer/user:User",
"aws:waf/rateBasedRule:RateBasedRule",
"aws:waf/rule:Rule",
"aws:waf/ruleGroup:RuleGroup",
"aws:waf/webAcl:WebAcl",
"aws:wafregional/rateBasedRule:RateBasedRule",
"aws:wafregional/rule:Rule",
"aws:wafregional/ruleGroup:RuleGroup",
"aws:wafregional/webAcl:WebAcl",
"aws:workspaces/directory:Directory",
"aws:workspaces/ipGroup:IpGroup",
];

View File

@ -0,0 +1,20 @@
{
"compilerOptions": {
"strict": true,
"outDir": "bin",
"target": "es2016",
"module": "commonjs",
"moduleResolution": "node",
"sourceMap": true,
"experimentalDecorators": true,
"pretty": true,
"noFallthroughCasesInSwitch": true,
"noImplicitReturns": true,
"forceConsistentCasingInFileNames": true,
"noImplicitAny": false,
"types": ["node"]
},
"files": [
"index.ts"
]
}

1
depot.json Normal file
View File

@ -0,0 +1 @@
{"id":"du7O4b0e8P"}

23
docker-compose.dev.yml Normal file
View File

@ -0,0 +1,23 @@
services:
db:
image: postgres:14.4
environment:
POSTGRES_DB: activepieces
POSTGRES_USER: postgres
POSTGRES_PASSWORD: A79Vm5D4p2VQHOp2gd5
volumes:
- postgres_data:/var/lib/postgresql/data
ports:
- "5432:5432"
redis:
image: redis:7.0.7
volumes:
- redis_data:/data
ports:
- "6379:6379"
volumes:
postgres_data:
redis_data:

30
docker-compose.test.yml Normal file
View File

@ -0,0 +1,30 @@
version: '3.0'
services:
app:
extends:
file: docker-compose.dev.yml
service: app
user: "${UID}:${GID}"
command: /bin/sh -c "npm_config_cache=/usr/src/app/.npm-cache npx nx run-tests backend"
postgres:
extends:
file: docker-compose.dev.yml
service: postgres
ports:
- "5432:5432"
redis:
extends:
file: docker-compose.dev.yml
service: redis
ports:
- "6379:6379"
volumes:
postgres_data_dev:
redis_data_dev:
networks:
activepieces_dev:

43
docker-compose.yml Normal file
View File

@ -0,0 +1,43 @@
version: '3.0'
services:
activepieces:
image: ghcr.io/activepieces/activepieces:0.64.2
container_name: activepieces
restart: unless-stopped
## Enable the following line if you already use AP_EXECUTION_MODE with SANDBOXED or old activepieces, checking the breaking change documentation for more info.
## privileged: true
ports:
- '8080:80'
depends_on:
- postgres
- redis
env_file: .env
volumes:
- ./cache:/usr/src/app/cache
networks:
- activepieces
postgres:
image: 'postgres:14.4'
container_name: postgres
restart: unless-stopped
environment:
- 'POSTGRES_DB=${AP_POSTGRES_DATABASE}'
- 'POSTGRES_PASSWORD=${AP_POSTGRES_PASSWORD}'
- 'POSTGRES_USER=${AP_POSTGRES_USERNAME}'
volumes:
- postgres_data:/var/lib/postgresql/data
networks:
- activepieces
redis:
image: 'redis:7.0.7'
container_name: redis
restart: unless-stopped
volumes:
- 'redis_data:/data'
networks:
- activepieces
volumes:
postgres_data:
redis_data:
networks:
activepieces:

7
docker-entrypoint.sh Normal file
View File

@ -0,0 +1,7 @@
#!/bin/sh
# Start Nginx server
nginx -g "daemon off;" &
# Start backend server
node --enable-source-maps dist/packages/server/api/main.js

View File

@ -0,0 +1,3 @@
<Tip>
This feature is available in our paid editions. Contact us [here](https://www.activepieces.com/sales), and we'll be delighted to assist you!
</Tip>

View File

@ -0,0 +1,5 @@
| Name | Supports NPM in Code Piece | Requires Docker to be Privileged | Performance | Secure for Multi Tenant | Environment Variable |
|-------------------------------|----------------------------|----------------------------------|-----------------------|-------------------------|-------------------------------------------|
| V8/Code Sandboxing | ❌ | No | Fast & Lightweight | ✅ | Set `AP_EXECUTION_MODE` to `SANDBOX_CODE_ONLY` |
| No Sandboxin | ✅ | No | Fast & Lightweight | ❌ | Set `AP_EXECUTION_MODE` to `UNSANDBOXED` |
| Kernel Namespaces Sandboxing | ✅ | Yes | Slow & CPU Intensive | ✅ | Set `AP_EXECUTION_MODE` to `SANDBOXED` |

View File

@ -0,0 +1,5 @@
<Card title="Abdul Rahman Yasir (@Abdulyki)">
Product Engineer
<img src="/resources/profile/abdulyki.png" width="270px" height="270px"/>
<div style={{marginTop: "10px"}}>A stoic software engineer, looking to make the world a better place</div>
</Card>

View File

@ -0,0 +1,5 @@
<Card title="Abdullah Alwarawreh (@abood)">
**👋 Former** Product Engineer (Intern)
<img src="/resources/profile/abood.webp" width="270px" height="270px"/>
<div style={{marginTop: "10px"}}>Speed isn't everything... But my code compiles before I finish my coffee</div>
</Card>

View File

@ -0,0 +1,5 @@
<Card title="Abdulrahaman Zein (@aboodzein)">
Product Engineer (Part Time)
<img src="/resources/profile/aboodzein.png" width="270px" height="270px"/>
<div style={{marginTop: "10px"}}>Quiet but lethal, building in stealth like an Arctic fox in the snow</div>
</Card>

View File

@ -0,0 +1,5 @@
<Card title="Amr Elmohamady (@Amr)">
Product Engineer
<img src="/resources/profile/amr.png" width="270px" height="270px"/>
<div style={{marginTop: "10px"}}>Mo calls me Amr Database... He doesn't know that after one byte, I'm already full!</div>
</Card>

View File

@ -0,0 +1,5 @@
<Card title="Ashraf Samhouri (@Ash)">
CEO
<img src="/resources/profile/ash.png" width="270px" height="270px"/>
<div style={{marginTop: "10px"}}>On a mission to democratize automation for everyone.⚡</div>
</Card>

View File

@ -0,0 +1,5 @@
<Card title="Ginikachukwu Nwibe (@ginika)">
Content Marketing
<img src="/resources/profile/ginika.png" width="270px" height="270px"/>
<div style={{marginTop: "10px"}}>Coming soon!</div>
</Card>

View File

@ -0,0 +1,5 @@
<Card title="Hazem Adel (@hazem)">
Product Engineer
<img src="/resources/profile/hazem.jpg" width="270px" height="270px"/>
<div style={{marginTop: "10px"}}>Building the future of automation, one piece at a time 🚀</div>
</Card>

View File

@ -0,0 +1,5 @@
<Card title="Mohammad Issa (@issa)">
Product Designer (Part Time)
<img src="/resources/profile/issa.png" width="270px" height="270px"/>
<div style={{marginTop: "10px"}}>Coming soon!</div>
</Card>

View File

@ -0,0 +1,5 @@
<Card title="Kareem Nofal (@kareem)">
Content
<img src="/resources/profile/kareem.png" width="270px" height="270px"/>
<div style={{marginTop: "10px"}}>Coming soon!</div>
</Card>

View File

@ -0,0 +1,5 @@
<Card title="Kishan Parmer (@kishan)">
Community & Piece Manager
<img src="/resources/profile/kishan.png" width="270px" height="270px"/>
<div style={{marginTop: "10px"}}>Coming soon!</div>
</Card>

View File

@ -0,0 +1,5 @@
<Card title="Mohammad AbuAboud (@Mo)">
Product Engineer (CTO)
<img src="/resources/profile/mo.png" width="270px" height="270px"/>
<div style={{marginTop: "10px"}}>Former beaver, now human (major career change). Still can't resist building things, but traded wood for code 🌳 💻</div>
</Card>

View File

@ -0,0 +1,5 @@
<Card title="Sanad Jawabreh (@sanad)">
Content
<img src="/resources/profile/sanad.png" width="270px" height="270px"/>
<div style={{marginTop: "10px"}}>Fox of all trades, adaptable, and always with a paw in every task. If it smells like a problem, you can bet he'll sneak in and figure it out!</div>
</Card>

View File

@ -0,0 +1,3 @@
<Tip>
If you would like your users to use your own OAuth2 apps, we recommend you check [this](/admin-console/manage-oauth2).
</Tip>

View File

@ -0,0 +1,105 @@
---
title: "Breaking Changes"
description: "This list shows all versions that include breaking changes and how to upgrade."
icon: "hammer"
---
## 0.64.0
### What has changed?
- MCP management is removed from the embedding SDK.
## 0.63.0
### What has changed?
- Replicate provider's text models have been removed.
### When is action necessary?
- If you are using one of Replicate's text models, you should replace it with another model from another provider.
## 0.46.0
### What has changed?
- The UI for "Array of Properties" inputs in the pieces has been updated, particularly affecting the "Dynamic Value" toggle functionality.
### When is action necessary?
- No action is required for this change.
- Your published flows will continue to work without interruption.
- When editing existing flows that use the "Dynamic Value" toggle on "Array of Properties" inputs (such as the "files" parameter in the "Extract Structured Data" action of the "Utility AI" piece), the end user will need to remap the values again.
- For details on the new UI implementation, refer to this [announcement](https://community.activepieces.com/t/inline-items/8964).
## 0.38.6
### What has changed?
- Workers no longer rely on the `AP_FLOW_WORKER_CONCURRENCY` and `AP_SCHEDULED_WORKER_CONCURRENCY` environment variables. These values are now retrieved from the app server.
### When is action necessary?
- If `AP_CONTAINER_TYPE` is set to `WORKER` on the worker machine, and `AP_SCHEDULED_WORKER_CONCURRENCY` or `AP_FLOW_WORKER_CONCURRENCY` are set to zero on the app server, workers will stop processing the queues. To fix this, check the [Separate Worker from App](https://www.activepieces.com/docs/install/configuration/separate-workers) documentation and set the `AP_CONTAINER_TYPE` to fetch the necessary values from the app server. If no container type is set on the worker machine, this is not a breaking change.
## 0.35.1
### What has changed?
- The 'name' attribute has been renamed to 'externalId' in the `AppConnection` entity.
- The 'displayName' attribute has been added to the `AppConnection` entity.
### When is action necessary?
- If you are using the connections API, you should update the `name` attribute to `externalId` and add the `displayName` attribute.
## 0.35.0
### What has changed?
- All branches are now converted to routers, and downgrade is not supported.
## 0.33.0
### What has changed?
- Files from actions or triggers are now stored in the database / S3 to support retries from certain steps, and the size of files from actions is now subject to the limit of `AP_MAX_FILE_SIZE_MB`.
- Files in triggers were previously passed as base64 encoded strings; now they are passed as file paths in the database / S3. Paused flows that have triggers from version 0.29.0 or earlier will no longer work.
### When is action necessary?
- If you are dealing with large files in the actions, consider increasing the `AP_MAX_FILE_SIZE_MB` to a higher value, and make sure the storage system (database/S3) has enough capacity for the files.
## 0.30.0
### What has changed?
- `AP_SANDBOX_RUN_TIME_SECONDS` is now deprecated and replaced with `AP_FLOW_TIMEOUT_SECONDS`
- `AP_CODE_SANDBOX_TYPE` is now deprecated and replaced with new mode in `AP_EXECUTION_MODE`
### When is action necessary?
- If you are using `AP_CODE_SANDBOX_TYPE` to `V8_ISOLATE`, you should switch to `AP_EXECUTION_MODE` to `SANDBOX_CODE_ONLY`
- If you are using `AP_SANDBOX_RUN_TIME_SECONDS` to set the sandbox run time limit, you should switch to `AP_FLOW_TIMEOUT_SECONDS`
## 0.28.0
### What has changed?
- **Project Members:**
- The `EXTERNAL_CUSTOMER` role has been deprecated and replaced with the `OPERATOR` role. Please check the permissions page for more details.
- All pending invitations will be removed.
- The User Invitation entity has been introduced to send invitations. You can still use the Project Member API to add roles for the user, but it requires the user to exist. If you want to send an email, use the User Invitation, and later a record in the project member will be created after the user accepts and registers an account.
- **Authentication:**
- The `SIGN_UP_ENABLED` environment variable, which allowed multiple users to sign up for different platforms/projects, has been removed. It has been replaced with inviting users to the same platform/project. All old users should continue to work normally.
### When is action necessary?
- **Project Members:**
If you use the embedding SDK or the create project member API with the `EXTERNAL_CUSTOMER` role, you should start using the `OPERATOR` role instead.
- **Authentication:**
Multiple platforms/projects are no longer supported in the community edition. Technically, everything is still there, but you have to hack using the API as the authentication system has now changed. If you have already created the users/platforms, they should continue to work, and no action is required.

6
docs/about/changelog.mdx Executable file
View File

@ -0,0 +1,6 @@
---
title: "Changelog"
description: "A log of all notable changes to Activepieces"
icon: "code-commit"
url: "https://github.com/activepieces/activepieces/releases"
---

43
docs/about/editions.mdx Normal file
View File

@ -0,0 +1,43 @@
---
title: "Editions"
description: ""
icon: "code-compare"
---
Activepieces operates on an open-core model, providing a core software platform as open source licensed under the permissive **MIT** license while offering additional features as proprietary add-ons in the cloud.
### Community / Open Source Edition
The Community edition is free and open source. It has all the pieces and features to build and run flows without any limitations.
### Commercial Editions
Learn more at: [https://www.activepieces.com/pricing](https://www.activepieces.com/pricing)
## Feature Comparison
| Feature | Community | Enterprise | Embed |
| ----------------------- | ----------- | -------- | -------|
| Flow History | ✅ | ✅ | ✅ |
| All Pieces | ✅ | ✅ | ✅ |
| Flow Runs | ✅ | ✅ | ✅ |
| Unlimited Flows | ✅ | ✅ | ✅ |
| Unlimited Connections | ✅ | ✅ | ✅ |
| Unlimited Flow steps | ✅ | ✅ | ✅ |
| Custom Pieces | ✅ | ✅ | ✅ |
| On Premise | ✅ | ✅ | ✅ |
| Cloud | ❌ | ✅ | ✅ |
| Project Team Members | ❌ | ✅ | ✅ |
| Manage Multiple Projects| ❌ | ✅ | ✅ |
| Limits Per Project | ❌ | ✅ | ✅ |
| Pieces Management | ❌ | ✅ | ✅ |
| Templates Management | ❌ | ✅ | ✅ |
| Custom Domain | ❌ | ✅ | ✅ |
| All Languages | ✅ | ✅ | ✅ |
| JWT Single Sign On | ❌ | ❌ | ✅ |
| Embed SDK | ❌ | ❌ | ✅ |
| Audit Logs | ❌ | ✅ | ❌ |
| Git Sync | ❌ | ✅ | ❌ |
| Private Pieces | ❌ | <b>5</b >| <b>2</b>|
| Custom Email Branding | ❌ | ✅ | ✅ |
| Custom Branding | ❌ | ✅ | ✅ |

29
docs/about/i18n.mdx Normal file
View File

@ -0,0 +1,29 @@
---
title: "i18n Translations"
description: ""
icon: "language"
---
This guide helps you understand how to change or add new translations.
Activepieces uses Crowdin because it helps translators who don't know how to code. It also makes the approval process easier. Activepieces automatically sync new text from the code and translations back into the code.
## Contribute to existing translations
1. Create Crowdin account
2. Join the project https://crowdin.com/project/activepieces
![Join Project](/resources/crowdin.png)
3. Click on the language you want to translate
4. Click on "Translate All"
![Translate All](/resources/crowdin-translate-all.png)
5. Select Strings you want to translate and click on "Save" button
## Adding a new language
- Please contact us (support@activepieces.com) if you want to add a new language. We will add it to the project and you can start translating.

22
docs/about/license.mdx Executable file
View File

@ -0,0 +1,22 @@
---
title: "License"
description: ""
icon: 'file-contract'
---
Activepieces' **core** is released as open source under the [MIT license](https://github.com/activepieces/activepieces/blob/main/LICENSE) and enterprise / cloud editions features are released under [Commercial License](https://github.com/activepieces/activepieces/blob/main/packages/ee/LICENSE)
The MIT license is a permissive license that grants users the freedom to use, modify, or distribute the software without any significant restrictions. The only requirement is that you include the license notice along with the software when distributing it.
Using the enterprise features (under the packages/ee and packages/server/api/src/app/ee folder) with a self-hosted instance requires an Activepieces license. If you are looking for these features, contact us at [sales@activepieces.com](mailto:sales@activepieces.com).
**Benefits of Dual Licensing Repo**
- **Transparency** - Everyone can see what we are doing and contribute to the project.
- **Clarity** - Everyone can see what the difference is between the open source and commercial versions of our software.
- **Audit** - Everyone can audit our code and see what we are doing.
- **Faster Development** - We can develop faster and more efficiently.
<Tip>
If you are still confused or have feedback, please open an issue on GitHub or send a message in the #contribution channel on Discord.
</Tip>

32
docs/about/telemetry.mdx Normal file
View File

@ -0,0 +1,32 @@
---
title: "Telemetry"
description: ""
icon: 'calculator'
---
# Why Does Activepieces need data?
As a self-hosted product, gathering usage metrics and insights can be difficult for us. However, these analytics are essential in helping us understand key behaviors and delivering a higher quality experience that meets your needs.
To ensure we can continue to improve our product, we have decided to track certain basic behaviors and metrics that are vital for understanding the usage of Activepieces.
We have implemented a minimal tracking plan and provide a detailed list of the metrics collected in a separate section.
# What Does Activepieces Collect?
We value transparency in data collection and assure you that we do not collect any personal information. The following events are currently being collected:
[Exact Code](https://github.com/activepieces/activepieces/blob/main/packages/shared/src/lib/common/telemetry.ts)
1. `flow.published`: Event fired when a flow is published
2. `signed.up`: Event fired when a user signs up
3. `flow.test`: Event fired when a flow is tested
4. `flow.created`: Event fired when a flow is created
5. `start.building`: Event fired when a user starts building
6. `demo.imported`: Event fired when a demo is imported
7. `flow.imported`: Event fired when a flow template is imported
# Opting out?
To opt out, set the environment variable `AP_TELEMETRY_ENABLED=false`

View File

@ -0,0 +1,19 @@
---
title: "Appearance"
description: ""
icon: "palette"
---
<Snippet file="enterprise-feature.mdx" />
Customize the brand by going to the **Platform Admin -> Setup -> Branding**. Here, you can customize:
- Logo / FavIcon
- Primary color
- Platform Name
![Branding Platform](/resources/screenshots/branding.png)
<video controls autoplay muted loop playsinline className="w-full aspect-video" src="https://cdn.activepieces.com/videos/showcase/appearance.mp4">
</video>

View File

@ -0,0 +1,13 @@
---
title: "Custom Domains"
description: ""
icon: "globe"
---
<Snippet file="enterprise-feature.mdx" />
You can set up a unique domain for your platform, like app.example.com.<br></br>
This is also used to determine the theme and branding on the authentication pages when a user is not logged in.
**Platform Admin -> Setup -> Branding**
![Manage Projects](/resources/screenshots/custom-domain.png)

View File

@ -0,0 +1,11 @@
---
title: "Customize Emails"
description: ""
icon: "envelope"
---
<Snippet file="enterprise-feature.mdx" />
You can add your own mail server to Activepieces, or override it if it's in the cloud. From the platform, all email templates are automatically whitelabeled according to the [appearance settings](./appearance).
![Manage SMTP](/resources/screenshots/manage-smtp.png)

View File

@ -0,0 +1,35 @@
---
title: "Manage AI Providers"
description: ""
icon: "sparkles"
---
Set your AI providers so your users enjoy a seamless building experience with our universal AI pieces like [Text AI](https://www.activepieces.com/pieces/text-ai).
## Manage AI Providers
You can manage the AI providers that you want to use in your flows. To do this, go to the **AI** page in the **Admin Console**.
You can define the provider's base URL and the API key.
These settings will be used for all the projects for every request to the AI provider.
![Manage AI Providers](/resources/screenshots/configure-ai-provider.png)
## Configure AI Credits Limits Per Project
You can configure the token limits per project. To do this, go to the project general settings and change the **AI Credits** field to the desired value.
<Note>
This limit is per project and is an accumulation of all the reported usage by the AI piece in the project.
Since only the AI piece goes through the Activepieces API,
using any other piece like the standalone OpenAI, Anthropic or Perplexity pieces will not count towards or respect this limit.
</Note>
![Manage AI Providers](/resources/screenshots/ai-credits-limit.png)
### AI Credits Explained
AI credits are the number tasks that can be run by any of our universal AI pieces.
So if you have a flow run that contains 5 universal AI pieces steps, the AI credits consumed will be 5.

View File

@ -0,0 +1,12 @@
---
title: "Replace OAuth2 Apps"
description: ""
icon: "key"
---
<Snippet file="enterprise-feature.mdx" />
Your project automatically uses Activepieces OAuth2 Apps as the default setting. <br></br>
If you prefer to use your own OAuth2 Apps, Go to **Platform Admin -> Setup -> Pieces** then choose a piece that uses OAuth2 like Google Sheets and click the open lock icon to configure your own OAuth2 app.
![Manage Oauth2 apps](/resources/screenshots/manage-oauth2.png)

View File

@ -0,0 +1,21 @@
---
title: "Manage Pieces"
description: ""
icon: "puzzle-piece"
---
<Snippet file="enterprise-feature.mdx" />
## Show Specific Pieces in Project
If you go to **Pieces Settings** in your project, you can manage which pieces you would like to be available to your users.
![Manage Pieces](/resources/screenshots/manage-pieces.png)
![Manage Pieces](/resources/screenshots/manage-pieces-2.png)
## Install Piece
- Go to **Platform Admin -> Setup -> Pieces** and hit Install pieces.
- You can choose to install a piece from NPM or upload a tar file directly for private pieces.
- You can check the [Sharing Pieces Doc](/developers/sharing-pieces/overview) for more info.
![Manage Projects](/resources/screenshots/install-piece.png)

Some files were not shown because too many files have changed in this diff Show More