mirror of https://github.com/nocodb/nocodb
աɨռɢӄաօռɢ
9 months ago
94 changed files with 2368 additions and 1412 deletions
@ -0,0 +1,551 @@
|
||||
#!/bin/bash |
||||
# set -x |
||||
|
||||
# ****************************************************************************** |
||||
# ***************** HELPER FUNCTIONS START ********************************* |
||||
|
||||
# Function to URL encode special characters in a string |
||||
urlencode() { |
||||
local string="$1" |
||||
local strlen=${#string} |
||||
local encoded="" |
||||
local pos c o |
||||
|
||||
for (( pos=0 ; pos<strlen ; pos++ )); do |
||||
c=${string:$pos:1} |
||||
case "$c" in |
||||
[-_.~a-zA-Z0-9] ) o="$c" ;; |
||||
* ) printf -v o '%%%02X' "'$c" |
||||
esac |
||||
encoded+="$o" |
||||
done |
||||
echo "$encoded" |
||||
} |
||||
|
||||
# function to print a message in a box |
||||
print_box_message() { |
||||
message=("$@") # Store all arguments in the array "message" |
||||
edge="======================================" |
||||
padding=" " |
||||
|
||||
echo "$edge" |
||||
for element in "${message[@]}"; do |
||||
echo "${padding}${element}" |
||||
done |
||||
echo "$edge" |
||||
} |
||||
|
||||
# check command exists |
||||
command_exists() { |
||||
command -v "$1" >/dev/null 2>&1 |
||||
} |
||||
|
||||
# install package based on platform |
||||
install_package() { |
||||
if command_exists yum; then |
||||
sudo yum install -y "$1" |
||||
elif command_exists apt; then |
||||
sudo apt install -y "$1" |
||||
elif command_exists brew; then |
||||
brew install "$1" |
||||
else |
||||
echo "Package manager not found. Please install $1 manually." |
||||
fi |
||||
} |
||||
|
||||
# Function to check if sudo is required for Docker Compose command |
||||
check_for_docker_compose_sudo() { |
||||
if docker-compose ps >/dev/null 2>&1; then |
||||
echo "n" |
||||
else |
||||
echo "y" |
||||
fi |
||||
} |
||||
|
||||
# ***************** HELPER FUNCTIONS END *********************************** |
||||
# ****************************************************************************** |
||||
|
||||
|
||||
|
||||
# ****************************************************************************** |
||||
# ******************** SYSTEM REQUIREMENTS CHECK START ************************* |
||||
|
||||
# Check if the following requirements are met: |
||||
# a. docker, docker-compose, jq installed |
||||
# b. port mapping check : 80,443 are free or being used by nginx container |
||||
|
||||
REQUIRED_PORTS=(80 443) |
||||
|
||||
echo "** Performing nocodb system check and setup. This step may require sudo permissions" |
||||
|
||||
# pre install wget if not found |
||||
if ! command_exists wget; then |
||||
echo "wget is not installed. Setting up for installation..." |
||||
install_package wget |
||||
fi |
||||
|
||||
# d. Check if required tools are installed |
||||
echo " | Checking if required tools (docker, docker-compose, lsof) are installed..." |
||||
for tool in docker docker-compose lsof openssl; do |
||||
if ! command_exists "$tool"; then |
||||
echo "$tool is not installed. Setting up for installation..." |
||||
if [ "$tool" = "docker-compose" ]; then |
||||
sudo -E curl -L https://github.com/docker/compose/releases/download/1.29.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose |
||||
sudo chmod +x /usr/local/bin/docker-compose |
||||
elif [ "$tool" = "docker" ]; then |
||||
wget -qO- https://get.docker.com/ | sh |
||||
elif [ "$tool" = "lsof" ]; then |
||||
install_package lsof |
||||
fi |
||||
fi |
||||
done |
||||
|
||||
# e. Check if NocoDB is already installed and its expected version |
||||
# echo "Checking if NocoDB is already installed and its expected version..." |
||||
# Replace the following command with the actual command to check NocoDB installation and version |
||||
# Example: nocodb_version=$(command_to_get_nocodb_version) |
||||
# echo "NocoDB version: $nocodb_install_version" |
||||
|
||||
# f. Port mapping check |
||||
echo " | Checking port accessibility..." |
||||
for port in "${REQUIRED_PORTS[@]}"; do |
||||
if lsof -Pi :$port -sTCP:LISTEN -t >/dev/null; then |
||||
echo " | WARNING: Port $port is in use. Please make sure it is free." >&2 |
||||
else |
||||
echo " | Port $port is free." |
||||
fi |
||||
done |
||||
|
||||
echo "** System check completed successfully. **" |
||||
|
||||
|
||||
# Define an array to store the messages to be printed at the end |
||||
message_arr=() |
||||
|
||||
# extract public ip address |
||||
PUBLIC_IP=$(dig +short myip.opendns.com @resolver1.opendns.com) |
||||
|
||||
# Check if the public IP address is not empty, if empty then use the localhost |
||||
if [ -z "$PUBLIC_IP" ]; then |
||||
PUBLIC_IP="localhost" |
||||
fi |
||||
|
||||
# generate a folder for the docker-compose file which is not existing and do the setup within the folder |
||||
# Define the folder name |
||||
FOLDER_NAME="nocodb_$(date +"%Y%m%d_%H%M%S")" |
||||
|
||||
# prompt for custom folder name and if left empty skip |
||||
#echo "Enter a custom folder name or press Enter to use the default folder name ($FOLDER_NAME): " |
||||
#read CUSTOM_FOLDER_NAME |
||||
|
||||
message_arr+=("Setup folder: $FOLDER_NAME") |
||||
|
||||
if [ -n "$CUSTOM_FOLDER_NAME" ]; then |
||||
FOLDER_NAME="$CUSTOM_FOLDER_NAME" |
||||
fi |
||||
|
||||
|
||||
# Create the folder |
||||
mkdir -p "$FOLDER_NAME" |
||||
|
||||
# Navigate into the folder |
||||
cd "$FOLDER_NAME" || exit |
||||
|
||||
# ******************** SYSTEM REQUIREMENTS CHECK END ************************** |
||||
# ****************************************************************************** |
||||
|
||||
|
||||
|
||||
# ******************** INPUTS FROM USER START ******************************** |
||||
# ****************************************************************************** |
||||
|
||||
echo "Choose Community or Enterprise Edition [CE/EE] (default: CE): " |
||||
read EDITION |
||||
|
||||
echo "Do you want to configure SSL [Y/N] (default: N): " |
||||
read SSL_ENABLED |
||||
|
||||
|
||||
if [ -n "$SSL_ENABLED" ] && { [ "$SSL_ENABLED" = "Y" ] || [ "$SSL_ENABLED" = "y" ]; }; then |
||||
SSL_ENABLED='y' |
||||
echo "Enter the domain name for the SSL certificate: " |
||||
read DOMAIN_NAME |
||||
if [ -z "$DOMAIN_NAME" ]; then |
||||
echo "Domain name is required for SSL configuration" |
||||
exit 1 |
||||
fi |
||||
message_arr+=("Domain: $DOMAIN_NAME") |
||||
else |
||||
# prompt for ip address and if left empty use extracted public ip |
||||
echo "Enter the IP address or domain name for the NocoDB instance (default: $PUBLIC_IP): " |
||||
read DOMAIN_NAME |
||||
if [ -z "$DOMAIN_NAME" ]; then |
||||
DOMAIN_NAME="$PUBLIC_IP" |
||||
fi |
||||
fi |
||||
|
||||
if [ -n "$EDITION" ] && { [ "$EDITION" = "EE" ] || [ "$EDITION" = "ee" ]; }; then |
||||
echo "Enter the NocoDB license key: " |
||||
read LICENSE_KEY |
||||
if [ -z "$LICENSE_KEY" ]; then |
||||
echo "License key is required for Enterprise Edition installation" |
||||
exit 1 |
||||
fi |
||||
fi |
||||
|
||||
|
||||
echo "Do you want to enabled Redis for caching [Y/N] (default: Y): " |
||||
read REDIS_ENABLED |
||||
|
||||
if [ -z "$REDIS_ENABLED" ] || { [ "$REDIS_ENABLED" != "N" ] && [ "$REDIS_ENABLED" != "n" ]; }; then |
||||
message_arr+=("Redis: Enabled") |
||||
else |
||||
message_arr+=("Redis: Disabled") |
||||
fi |
||||
|
||||
|
||||
echo "Do you want to enabled Watchtower for automatic updates [Y/N] (default: Y): " |
||||
read WATCHTOWER_ENABLED |
||||
|
||||
if [ -z "$WATCHTOWER_ENABLED" ] || { [ "$WATCHTOWER_ENABLED" != "N" ] && [ "$WATCHTOWER_ENABLED" != "n" ]; }; then |
||||
message_arr+=("Watchtower: Enabled") |
||||
else |
||||
message_arr+=("Watchtower: Disabled") |
||||
fi |
||||
|
||||
|
||||
|
||||
# ****************************************************************************** |
||||
# *********************** INPUTS FROM USER END ******************************** |
||||
|
||||
|
||||
# ****************************************************************************** |
||||
# *************************** SETUP START ************************************* |
||||
|
||||
# Generate a strong random password for PostgreSQL |
||||
STRONG_PASSWORD=$(openssl rand -base64 48 | tr -dc 'a-zA-Z0-9!@#$%^&*()-_+=' | head -c 32) |
||||
REDIS_PASSWORD=$(openssl rand -base64 48 | tr -dc 'a-zA-Z0-9' | head -c 24) |
||||
# Encode special characters in the password for JDBC URL usage |
||||
ENCODED_PASSWORD=$(urlencode "$STRONG_PASSWORD") |
||||
|
||||
IMAGE="nocodb/nocodb:latest"; |
||||
|
||||
# Determine the Docker image to use based on the edition |
||||
if [ -n "$EDITION" ] && { [ "$EDITION" = "EE" ] || [ "$EDITION" = "ee" ]; }; then |
||||
IMAGE="nocodb/nocodb-ee:latest" |
||||
DATABASE_URL="DATABASE_URL=postgres://postgres:${ENCODED_PASSWORD}@db:5432/nocodb" |
||||
else |
||||
# use NC_DB url until the issue with DATABASE_URL is resolved(encoding) |
||||
DATABASE_URL="NC_DB=pg://db:5432?d=nocodb&user=postgres&password=${ENCODED_PASSWORD}" |
||||
fi |
||||
|
||||
|
||||
message_arr+=("Docker image: $IMAGE") |
||||
|
||||
|
||||
DEPENDS_ON="" |
||||
|
||||
# Add Redis service if enabled |
||||
if [ -z "$REDIS_ENABLED" ] || { [ "$REDIS_ENABLED" != "N" ] && [ "$REDIS_ENABLED" != "n" ]; }; then |
||||
DEPENDS_ON="- redis" |
||||
fi |
||||
|
||||
|
||||
# Write the Docker Compose file with the updated password |
||||
cat <<EOF > docker-compose.yml |
||||
version: '3' |
||||
|
||||
services: |
||||
nocodb: |
||||
image: ${IMAGE} |
||||
env_file: docker.env |
||||
depends_on: |
||||
- db |
||||
${DEPENDS_ON} |
||||
restart: unless-stopped |
||||
volumes: |
||||
- ./nocodb:/usr/app/data |
||||
labels: |
||||
- "com.centurylinklabs.watchtower.enable=true" |
||||
networks: |
||||
- nocodb-network |
||||
db: |
||||
image: postgres:16.1 |
||||
env_file: docker.env |
||||
volumes: |
||||
- ./postgres:/var/lib/postgresql/data |
||||
restart: unless-stopped |
||||
healthcheck: |
||||
interval: 10s |
||||
retries: 10 |
||||
test: "pg_isready -U \"\$\$POSTGRES_USER\" -d \"\$\$POSTGRES_DB\"" |
||||
timeout: 2s |
||||
networks: |
||||
- nocodb-network |
||||
|
||||
nginx: |
||||
image: nginx:latest |
||||
volumes: |
||||
- ./nginx:/etc/nginx/conf.d |
||||
EOF |
||||
|
||||
if [ "$SSL_ENABLED" = 'y' ] || [ "$SSL_ENABLED" = 'Y' ]; then |
||||
cat <<EOF >> docker-compose.yml |
||||
- webroot:/var/www/certbot |
||||
- ./letsencrypt:/etc/letsencrypt |
||||
- letsencrypt-lib:/var/lib/letsencrypt |
||||
EOF |
||||
fi |
||||
cat <<EOF >> docker-compose.yml |
||||
ports: |
||||
- "80:80" |
||||
- "443:443" |
||||
depends_on: |
||||
- nocodb |
||||
restart: unless-stopped |
||||
networks: |
||||
- nocodb-network |
||||
EOF |
||||
|
||||
if [ "$SSL_ENABLED" = 'y' ] || [ "$SSL_ENABLED" = 'Y' ]; then |
||||
cat <<EOF >> docker-compose.yml |
||||
certbot: |
||||
image: certbot/certbot |
||||
volumes: |
||||
- ./letsencrypt:/etc/letsencrypt |
||||
- letsencrypt-lib:/var/lib/letsencrypt |
||||
- webroot:/var/www/certbot |
||||
entrypoint: "/bin/sh -c 'trap exit TERM; while :; do certbot renew; sleep 12h & wait \$\${!}; done;'" |
||||
depends_on: |
||||
- nginx |
||||
restart: unless-stopped |
||||
networks: |
||||
- nocodb-network |
||||
EOF |
||||
fi |
||||
|
||||
if [ -z "$REDIS_ENABLED" ] || { [ "$REDIS_ENABLED" != "N" ] && [ "$REDIS_ENABLED" != "n" ]; }; then |
||||
cat <<EOF >> docker-compose.yml |
||||
redis: |
||||
image: redis:latest |
||||
restart: unless-stopped |
||||
env_file: docker.env |
||||
command: |
||||
- /bin/sh |
||||
- -c |
||||
- redis-server --requirepass "\$\${REDIS_PASSWORD}" |
||||
volumes: |
||||
- redis:/data |
||||
healthcheck: |
||||
test: [ "CMD", "redis-cli", "-a", "\$\${REDIS_PASSWORD}", "--raw", "incr", "ping" ] |
||||
networks: |
||||
- nocodb-network |
||||
EOF |
||||
fi |
||||
|
||||
if [ -z "$WATCHTOWER_ENABLED" ] || { [ "$WATCHTOWER_ENABLED" != "N" ] && [ "$WATCHTOWER_ENABLED" != "n" ]; }; then |
||||
cat <<EOF >> docker-compose.yml |
||||
watchtower: |
||||
image: containrrr/watchtower |
||||
volumes: |
||||
- /var/run/docker.sock:/var/run/docker.sock |
||||
command: --schedule "0 2 * * 6" --cleanup |
||||
restart: unless-stopped |
||||
networks: |
||||
- nocodb-network |
||||
EOF |
||||
fi |
||||
|
||||
if [ "$SSL_ENABLED" = 'y' ] || [ "$SSL_ENABLED" = 'Y' ]; then |
||||
cat <<EOF >> docker-compose.yml |
||||
volumes: |
||||
letsencrypt-lib: |
||||
webroot: |
||||
EOF |
||||
fi |
||||
|
||||
# add the cache volume |
||||
if [ -z "$REDIS_ENABLED" ] || { [ "$REDIS_ENABLED" != "N" ] && [ "$REDIS_ENABLED" != "n" ]; }; then |
||||
# check ssl enabled |
||||
if [ "$SSL_ENABLED" = 'y' ] || [ "$SSL_ENABLED" = 'Y' ]; then |
||||
cat <<EOF >> docker-compose.yml |
||||
redis: |
||||
EOF |
||||
else |
||||
cat <<EOF >> docker-compose.yml |
||||
volumes: |
||||
redis: |
||||
EOF |
||||
fi |
||||
fi |
||||
|
||||
# Create the network |
||||
cat <<EOF >> docker-compose.yml |
||||
networks: |
||||
nocodb-network: |
||||
driver: bridge |
||||
EOF |
||||
|
||||
# Write the docker.env file |
||||
cat <<EOF > docker.env |
||||
POSTGRES_DB=nocodb |
||||
POSTGRES_USER=postgres |
||||
POSTGRES_PASSWORD=${STRONG_PASSWORD} |
||||
$DATABASE_URL |
||||
NC_LICENSE_KEY=${LICENSE_KEY} |
||||
EOF |
||||
|
||||
# add redis env if enabled |
||||
if [ -z "$REDIS_ENABLED" ] || { [ "$REDIS_ENABLED" != "N" ] && [ "$REDIS_ENABLED" != "n" ]; }; then |
||||
cat <<EOF >> docker.env |
||||
REDIS_PASSWORD=${REDIS_PASSWORD} |
||||
NC_REDIS_URL=redis://:${REDIS_PASSWORD}@redis:6379/0 |
||||
EOF |
||||
fi |
||||
|
||||
mkdir -p ./nginx |
||||
|
||||
# Create nginx config with the provided domain name |
||||
cat > ./nginx/default.conf <<EOF |
||||
server { |
||||
listen 80; |
||||
EOF |
||||
|
||||
if [ "$SSL_ENABLED" = 'y' ] || [ "$SSL_ENABLED" = 'Y' ]; then |
||||
cat >> ./nginx/default.conf <<EOF |
||||
server_name $DOMAIN_NAME; |
||||
EOF |
||||
fi |
||||
|
||||
cat >> ./nginx/default.conf <<EOF |
||||
location / { |
||||
proxy_pass http://nocodb:8080; |
||||
proxy_set_header Host \$host; |
||||
proxy_set_header X-Real-IP \$remote_addr; |
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for; |
||||
proxy_set_header X-Forwarded-Proto \$scheme; |
||||
} |
||||
EOF |
||||
|
||||
if [ "$SSL_ENABLED" = 'y' ] || [ "$SSL_ENABLED" = 'Y' ]; then |
||||
cat >> ./nginx/default.conf <<EOF |
||||
location /.well-known/acme-challenge/ { |
||||
root /var/www/certbot; |
||||
} |
||||
EOF |
||||
fi |
||||
cat >> ./nginx/default.conf <<EOF |
||||
} |
||||
EOF |
||||
|
||||
if [ "$SSL_ENABLED" = 'y' ] || [ "$SSL_ENABLED" = 'Y' ]; then |
||||
|
||||
mkdir -p ./nginx-post-config |
||||
|
||||
# Create nginx config with the provided domain name |
||||
cat > ./nginx-post-config/default.conf <<EOF |
||||
server { |
||||
listen 80; |
||||
server_name $DOMAIN_NAME; |
||||
|
||||
location / { |
||||
return 301 https://\$host\$request_uri; |
||||
} |
||||
|
||||
location /.well-known/acme-challenge/ { |
||||
root /var/www/certbot; |
||||
} |
||||
} |
||||
|
||||
|
||||
server { |
||||
listen 443 ssl; |
||||
server_name $DOMAIN_NAME; |
||||
|
||||
ssl_certificate /etc/letsencrypt/live/$DOMAIN_NAME/fullchain.pem; |
||||
ssl_certificate_key /etc/letsencrypt/live/$DOMAIN_NAME/privkey.pem; |
||||
|
||||
location / { |
||||
proxy_pass http://nocodb:8080; |
||||
proxy_set_header Host \$host; |
||||
proxy_set_header X-Real-IP \$remote_addr; |
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for; |
||||
proxy_set_header X-Forwarded-Proto \$scheme; |
||||
} |
||||
} |
||||
|
||||
EOF |
||||
fi |
||||
|
||||
IS_DOCKER_COMPOSE_REQUIRE_SUDO=$(check_for_docker_compose_sudo) |
||||
|
||||
|
||||
# Generate the update.sh file for upgrading images |
||||
if [ "$IS_DOCKER_COMPOSE_REQUIRE_SUDO" = "y" ]; then |
||||
cat > ./update.sh <<EOF |
||||
sudo docker-compose pull |
||||
sudo docker-compose up -d --force-recreate |
||||
sudo docker image prune -a -f |
||||
EOF |
||||
else |
||||
cat > ./update.sh <<EOF |
||||
docker-compose pull |
||||
docker-compose up -d --force-recreate |
||||
docker image prune -a -f |
||||
EOF |
||||
fi |
||||
|
||||
|
||||
message_arr+=("Update script: update.sh") |
||||
|
||||
# Pull latest images and start the docker-compose setup |
||||
if [ "$IS_DOCKER_COMPOSE_REQUIRE_SUDO" = "y" ]; then |
||||
echo "Docker compose requires sudo. Running the docker-compose setup with sudo." |
||||
sudo docker-compose pull |
||||
sudo docker-compose up -d |
||||
else |
||||
docker-compose pull |
||||
docker-compose up -d |
||||
fi |
||||
|
||||
|
||||
echo 'Waiting for Nginx to start...'; |
||||
|
||||
sleep 5 |
||||
|
||||
if [ "$SSL_ENABLED" = 'y' ] || [ "$SSL_ENABLED" = 'Y' ]; then |
||||
echo 'Starting Letsencrypt certificate request...'; |
||||
|
||||
if [ "$IS_DOCKER_COMPOSE_REQUIRE_SUDO" = "y" ]; then |
||||
sudo docker-compose exec certbot certbot certonly --webroot --webroot-path=/var/www/certbot -d $DOMAIN_NAME --email contact@$DOMAIN_NAME --agree-tos --no-eff-email && echo "Certificate request successful" || echo "Certificate request failed" |
||||
else |
||||
docker-compose exec certbot certbot certonly --webroot --webroot-path=/var/www/certbot -d $DOMAIN_NAME --email contact@$DOMAIN_NAME --agree-tos --no-eff-email && echo "Certificate request successful" || echo "Certificate request failed" |
||||
fi |
||||
# Initial Let's Encrypt certificate request |
||||
|
||||
# Update the nginx config to use the new certificates |
||||
rm -rf ./nginx/default.conf |
||||
mv ./nginx-post-config/default.conf ./nginx/ |
||||
rm -r ./nginx-post-config |
||||
|
||||
echo "Restarting nginx to apply the new certificates" |
||||
# Reload nginx to apply the new certificates |
||||
if [ "$IS_DOCKER_COMPOSE_REQUIRE_SUDO" = "y" ]; then |
||||
sudo docker-compose exec nginx nginx -s reload |
||||
else |
||||
docker-compose exec nginx nginx -s reload |
||||
fi |
||||
|
||||
|
||||
message_arr+=("NocoDB is now available at https://$DOMAIN_NAME") |
||||
|
||||
elif [ -n "$DOMAIN_NAME" ]; then |
||||
message_arr+=("NocoDB is now available at http://$DOMAIN_NAME") |
||||
else |
||||
message_arr+=("NocoDB is now available at http://localhost") |
||||
fi |
||||
|
||||
print_box_message "${mecdessage_arr[@]}" |
||||
|
||||
# *************************** SETUP END ************************************* |
||||
# ****************************************************************************** |
After Width: | Height: | Size: 77 KiB |
After Width: | Height: | Size: 743 KiB |
After Width: | Height: | Size: 64 KiB |
After Width: | Height: | Size: 122 KiB |
After Width: | Height: | Size: 1.0 MiB |
After Width: | Height: | Size: 161 KiB |
@ -1,36 +1,620 @@
|
||||
import debug from 'debug'; |
||||
import { Logger } from '@nestjs/common'; |
||||
import type IORedis from 'ioredis'; |
||||
import { CacheDelDirection, CacheGetType } from '~/utils/globals'; |
||||
|
||||
const log = debug('nc:cache'); |
||||
const logger = new Logger('CacheMgr'); |
||||
|
||||
/* |
||||
- keys are stored as following: |
||||
- simple key: nc:<orgs>:<scope>:<model_id_1> |
||||
- value: { value: { ... }, parentKeys: [ "nc:<orgs>:<scope>:<model_id_1>:list" ], timestamp: 1234567890 } |
||||
- stored as stringified JSON |
||||
- list key: nc:<orgs>:<scope>:<model_id_1>:list |
||||
- stored as SET |
||||
- get returns `value` only |
||||
- getRaw returns the whole cache object with metadata |
||||
*/ |
||||
|
||||
const NC_REDIS_TTL = +process.env.NC_REDIS_TTL || 60 * 60 * 24 * 3; // 3 days
|
||||
const NC_REDIS_GRACE_TTL = +process.env.NC_REDIS_GRACE_TTL || 60 * 60 * 24 * 1; // 1 day
|
||||
|
||||
export default abstract class CacheMgr { |
||||
public abstract get(key: string, type: string): Promise<any>; |
||||
public abstract set(key: string, value: any): Promise<any>; |
||||
public abstract setExpiring( |
||||
client: IORedis; |
||||
prefix: string; |
||||
context: string; |
||||
|
||||
// avoid circular structure to JSON
|
||||
getCircularReplacer = () => { |
||||
const seen = new WeakSet(); |
||||
return (_, value) => { |
||||
if (typeof value === 'object' && value !== null) { |
||||
if (seen.has(value)) { |
||||
return; |
||||
} |
||||
seen.add(value); |
||||
} |
||||
return value; |
||||
}; |
||||
}; |
||||
|
||||
// @ts-ignore
|
||||
async del(key: string[] | string): Promise<any> { |
||||
log(`${this.context}::del: deleting key ${key}`); |
||||
if (Array.isArray(key)) { |
||||
if (key.length) { |
||||
return this.client.del(key); |
||||
} |
||||
} else if (key) { |
||||
return this.client.del(key); |
||||
} |
||||
} |
||||
|
||||
// @ts-ignore
|
||||
private async getRaw( |
||||
key: string, |
||||
type?: string, |
||||
skipTTL = false, |
||||
): Promise<any> { |
||||
log(`${this.context}::getRaw: getting key ${key} with type ${type}`); |
||||
if (type === CacheGetType.TYPE_ARRAY) { |
||||
return this.client.smembers(key); |
||||
} else { |
||||
const res = await this.client.get(key); |
||||
if (res) { |
||||
try { |
||||
const o = JSON.parse(res); |
||||
if (typeof o === 'object') { |
||||
if ( |
||||
o && |
||||
Object.keys(o).length === 0 && |
||||
Object.getPrototypeOf(o) === Object.prototype |
||||
) { |
||||
log(`${this.context}::get: object is empty!`); |
||||
} |
||||
|
||||
if (!skipTTL && o.timestamp) { |
||||
const diff = Date.now() - o.timestamp; |
||||
if (diff > NC_REDIS_GRACE_TTL * 1000) { |
||||
await this.refreshTTL(key); |
||||
} |
||||
} |
||||
|
||||
return Promise.resolve(o); |
||||
} |
||||
} catch (e) { |
||||
logger.error(`Bad value stored for key ${key} : ${res}`); |
||||
return Promise.resolve(res); |
||||
} |
||||
} |
||||
return Promise.resolve(res); |
||||
} |
||||
} |
||||
|
||||
// @ts-ignore
|
||||
async get(key: string, type: string): Promise<any> { |
||||
return this.getRaw(key, type).then((res) => { |
||||
if (res && res.value) { |
||||
return res.value; |
||||
} |
||||
return res; |
||||
}); |
||||
} |
||||
|
||||
// @ts-ignore
|
||||
async set( |
||||
key: string, |
||||
value: any, |
||||
options: { |
||||
// when we prepare beforehand, we don't need to prepare again
|
||||
skipPrepare?: boolean; |
||||
// timestamp for the value, if not provided, it will be set to current time
|
||||
timestamp?: number; |
||||
} = { |
||||
skipPrepare: false, |
||||
}, |
||||
): Promise<any> { |
||||
const { skipPrepare, timestamp } = options; |
||||
|
||||
if (typeof value !== 'undefined' && value) { |
||||
log(`${this.context}::set: setting key ${key} with value ${value}`); |
||||
|
||||
// if provided value is an array store it as a set
|
||||
if (Array.isArray(value) && value.length) { |
||||
return new Promise((resolve) => { |
||||
this.client |
||||
.pipeline() |
||||
.sadd(key, value) |
||||
// - 60 seconds to avoid expiring list before any of its children
|
||||
.expire(key, NC_REDIS_TTL - 60) |
||||
.exec((err) => { |
||||
if (err) { |
||||
logger.error( |
||||
`${this.context}::set: error setting key ${key} with value ${value}`, |
||||
); |
||||
} |
||||
resolve(true); |
||||
}); |
||||
}); |
||||
} |
||||
|
||||
if (!skipPrepare) { |
||||
// try to get old key value
|
||||
const keyValue = await this.getRaw(key); |
||||
// prepare new key value
|
||||
value = this.prepareValue({ |
||||
value, |
||||
parentKeys: this.getParents(keyValue), |
||||
timestamp, |
||||
}); |
||||
} |
||||
|
||||
return this.client |
||||
.set( |
||||
key, |
||||
JSON.stringify(value, this.getCircularReplacer()), |
||||
'EX', |
||||
NC_REDIS_TTL, |
||||
) |
||||
.then(async () => { |
||||
await this.refreshTTL(key, timestamp); |
||||
return true; |
||||
}); |
||||
} else { |
||||
log(`${this.context}::set: value is empty for ${key}. Skipping ...`); |
||||
return Promise.resolve(true); |
||||
} |
||||
} |
||||
|
||||
// @ts-ignore
|
||||
async setExpiring( |
||||
key: string, |
||||
value: any, |
||||
seconds: number, |
||||
): Promise<any>; |
||||
public abstract incrby(key: string, value: number): Promise<any>; |
||||
public abstract del(key: string[] | string): Promise<any>; |
||||
public abstract getList( |
||||
options: { |
||||
// when we prepare beforehand, we don't need to prepare again
|
||||
skipPrepare?: boolean; |
||||
// timestamp for the value, if not provided, it will be set to current time
|
||||
timestamp?: number; |
||||
} = { |
||||
skipPrepare: false, |
||||
}, |
||||
): Promise<any> { |
||||
const { skipPrepare, timestamp } = options; |
||||
|
||||
if (typeof value !== 'undefined' && value) { |
||||
log( |
||||
`${this.context}::setExpiring: setting key ${key} with value ${value}`, |
||||
); |
||||
|
||||
if (Array.isArray(value) && value.length) { |
||||
return new Promise((resolve) => { |
||||
this.client |
||||
.pipeline() |
||||
.sadd(key, value) |
||||
.expire(key, seconds) |
||||
.exec((err) => { |
||||
if (err) { |
||||
logger.error( |
||||
`${this.context}::set: error setting key ${key} with value ${value}`, |
||||
); |
||||
} |
||||
resolve(true); |
||||
}); |
||||
}); |
||||
} |
||||
|
||||
if (!skipPrepare) { |
||||
// try to get old key value
|
||||
const keyValue = await this.getRaw(key); |
||||
// prepare new key value
|
||||
value = this.prepareValue({ |
||||
value, |
||||
parentKeys: this.getParents(keyValue), |
||||
timestamp, |
||||
}); |
||||
} |
||||
|
||||
return this.client.set( |
||||
key, |
||||
JSON.stringify(value, this.getCircularReplacer()), |
||||
'EX', |
||||
seconds, |
||||
); |
||||
} else { |
||||
log(`${this.context}::set: value is empty for ${key}. Skipping ...`); |
||||
return Promise.resolve(true); |
||||
} |
||||
} |
||||
|
||||
// @ts-ignore
|
||||
async incrby(key: string, value = 1): Promise<any> { |
||||
return this.client.incrby(key, value); |
||||
} |
||||
|
||||
async getList( |
||||
scope: string, |
||||
list: string[], |
||||
subKeys: string[], |
||||
): Promise<{ |
||||
list: any[]; |
||||
isNoneList: boolean; |
||||
}>; |
||||
public abstract setList( |
||||
}> { |
||||
// remove null from arrays
|
||||
subKeys = subKeys.filter((k) => k); |
||||
// e.g. key = nc:<orgs>:<scope>:<project_id_1>:<source_id_1>:list
|
||||
const key = |
||||
subKeys.length === 0 |
||||
? `${this.prefix}:${scope}:list` |
||||
: `${this.prefix}:${scope}:${subKeys.join(':')}:list`; |
||||
// e.g. arr = ["nc:<orgs>:<scope>:<model_id_1>", "nc:<orgs>:<scope>:<model_id_2>"]
|
||||
const arr = (await this.get(key, CacheGetType.TYPE_ARRAY)) || []; |
||||
log(`${this.context}::getList: getting list with key ${key}`); |
||||
const isNoneList = arr.length && arr.includes('NONE'); |
||||
|
||||
if (isNoneList || !arr.length) { |
||||
return Promise.resolve({ |
||||
list: [], |
||||
isNoneList, |
||||
}); |
||||
} |
||||
|
||||
log(`${this.context}::getList: getting list with keys ${arr}`); |
||||
const values = await this.client.mget(arr); |
||||
|
||||
if (values.some((v) => v === null)) { |
||||
// FALLBACK: a key is missing from list, this should never happen
|
||||
logger.error(`${this.context}::getList: missing value for ${key}`); |
||||
const allParents = []; |
||||
// get all parents from children
|
||||
values.forEach((v) => { |
||||
if (v) { |
||||
try { |
||||
const o = JSON.parse(v); |
||||
if (typeof o === 'object') { |
||||
allParents.push(...this.getParents(o)); |
||||
} |
||||
} catch (e) { |
||||
logger.error( |
||||
`${this.context}::getList: Bad value stored for key ${arr[0]} : ${v}`, |
||||
); |
||||
} |
||||
} |
||||
}); |
||||
// remove duplicates
|
||||
const uniqueParents = [...new Set(allParents)]; |
||||
// delete all parents and children
|
||||
await Promise.all( |
||||
uniqueParents.map(async (p) => { |
||||
await this.deepDel(p, CacheDelDirection.PARENT_TO_CHILD); |
||||
}), |
||||
); |
||||
return Promise.resolve({ |
||||
list: [], |
||||
isNoneList, |
||||
}); |
||||
} |
||||
|
||||
if (values.length) { |
||||
try { |
||||
const o = JSON.parse(values[0]); |
||||
if (typeof o === 'object') { |
||||
const diff = Date.now() - o.timestamp; |
||||
if (diff > NC_REDIS_GRACE_TTL * 1000) { |
||||
await this.refreshTTL(key); |
||||
} |
||||
} |
||||
} catch (e) { |
||||
logger.error( |
||||
`${this.context}::getList: Bad value stored for key ${arr[0]} : ${values[0]}`, |
||||
); |
||||
} |
||||
} |
||||
|
||||
return { |
||||
list: values.map((res) => { |
||||
try { |
||||
const o = JSON.parse(res); |
||||
if (typeof o === 'object') { |
||||
return o.value; |
||||
} |
||||
} catch (e) { |
||||
return res; |
||||
} |
||||
return res; |
||||
}), |
||||
isNoneList, |
||||
}; |
||||
} |
||||
|
||||
async setList( |
||||
scope: string, |
||||
subListKeys: string[], |
||||
list: any[], |
||||
props?: string[], |
||||
): Promise<boolean>; |
||||
public abstract deepDel( |
||||
scope: string, |
||||
key: string, |
||||
direction: string, |
||||
): Promise<boolean>; |
||||
public abstract appendToList( |
||||
props: string[] = [], |
||||
): Promise<boolean> { |
||||
// remove null from arrays
|
||||
subListKeys = subListKeys.filter((k) => k); |
||||
// construct key for List
|
||||
// e.g. nc:<orgs>:<scope>:<project_id_1>:<source_id_1>:list
|
||||
const listKey = |
||||
subListKeys.length === 0 |
||||
? `${this.prefix}:${scope}:list` |
||||
: `${this.prefix}:${scope}:${subListKeys.join(':')}:list`; |
||||
if (!list.length) { |
||||
// Set NONE here so that it won't hit the DB on each page load
|
||||
return this.set(listKey, ['NONE']); |
||||
} |
||||
|
||||
// timestamp for list
|
||||
const timestamp = Date.now(); |
||||
|
||||
// remove existing list
|
||||
await this.deepDel(listKey, CacheDelDirection.PARENT_TO_CHILD); |
||||
const listOfGetKeys = []; |
||||
|
||||
for (const o of list) { |
||||
// construct key for Get
|
||||
let getKey = `${this.prefix}:${scope}:${o.id}`; |
||||
if (props.length) { |
||||
const propValues = props.map((p) => o[p]); |
||||
// e.g. nc:<orgs>:<scope>:<prop_value_1>:<prop_value_2>
|
||||
getKey = `${this.prefix}:${scope}:${propValues.join(':')}`; |
||||
} |
||||
log(`${this.context}::setList: get key ${getKey}`); |
||||
// get key
|
||||
let rawValue = await this.getRaw(getKey, CacheGetType.TYPE_OBJECT); |
||||
if (rawValue) { |
||||
log(`${this.context}::setList: preparing key ${getKey}`); |
||||
// prepare key
|
||||
rawValue = this.prepareValue({ |
||||
value: o, |
||||
parentKeys: this.getParents(rawValue), |
||||
newKey: listKey, |
||||
timestamp, |
||||
}); |
||||
} else { |
||||
rawValue = this.prepareValue({ |
||||
value: o, |
||||
parentKeys: [listKey], |
||||
timestamp, |
||||
}); |
||||
} |
||||
// set key
|
||||
log(`${this.context}::setList: setting key ${getKey}`); |
||||
await this.set(getKey, rawValue, { |
||||
skipPrepare: true, |
||||
timestamp, |
||||
}); |
||||
// push key to list
|
||||
listOfGetKeys.push(getKey); |
||||
} |
||||
// set list
|
||||
log(`${this.context}::setList: setting list with key ${listKey}`); |
||||
return this.set(listKey, listOfGetKeys); |
||||
} |
||||
|
||||
async deepDel(key: string, direction: string): Promise<boolean> { |
||||
log(`${this.context}::deepDel: choose direction ${direction}`); |
||||
if (direction === CacheDelDirection.CHILD_TO_PARENT) { |
||||
const childKey = await this.getRaw(key, CacheGetType.TYPE_OBJECT); |
||||
// given a child key, delete all keys in corresponding parent lists
|
||||
const scopeList = this.getParents(childKey); |
||||
for (const listKey of scopeList) { |
||||
// get target list
|
||||
let list = (await this.get(listKey, CacheGetType.TYPE_ARRAY)) || []; |
||||
if (!list.length) { |
||||
continue; |
||||
} |
||||
// remove target Key
|
||||
list = list.filter((k) => k !== key); |
||||
// delete list
|
||||
log(`${this.context}::deepDel: remove listKey ${listKey}`); |
||||
await this.del(listKey); |
||||
if (list.length) { |
||||
// set target list
|
||||
log(`${this.context}::deepDel: set key ${listKey}`); |
||||
await this.set(listKey, list); |
||||
} |
||||
} |
||||
log(`${this.context}::deepDel: remove key ${key}`); |
||||
return await this.del(key); |
||||
} else if (direction === CacheDelDirection.PARENT_TO_CHILD) { |
||||
key = /:list$/.test(key) ? key : `${key}:list`; |
||||
// given a list key, delete all the children
|
||||
const listOfChildren = await this.get(key, CacheGetType.TYPE_ARRAY); |
||||
// delete each child key
|
||||
await this.del(listOfChildren); |
||||
// delete list key
|
||||
return await this.del(key); |
||||
} else { |
||||
log(`Invalid deepDel direction found : ${direction}`); |
||||
return Promise.resolve(false); |
||||
} |
||||
} |
||||
|
||||
async appendToList( |
||||
scope: string, |
||||
subListKeys: string[], |
||||
key: string, |
||||
): Promise<boolean>; |
||||
public abstract destroy(): Promise<boolean>; |
||||
public abstract export(): Promise<any>; |
||||
): Promise<boolean> { |
||||
// remove null from arrays
|
||||
subListKeys = subListKeys.filter((k) => k); |
||||
// e.g. key = nc:<orgs>:<scope>:<project_id_1>:<source_id_1>:list
|
||||
const listKey = |
||||
subListKeys.length === 0 |
||||
? `${this.prefix}:${scope}:list` |
||||
: `${this.prefix}:${scope}:${subListKeys.join(':')}:list`; |
||||
log(`${this.context}::appendToList: append key ${key} to ${listKey}`); |
||||
let list = await this.get(listKey, CacheGetType.TYPE_ARRAY); |
||||
|
||||
if (!list || !list.length) { |
||||
return false; |
||||
} |
||||
|
||||
if (list.includes('NONE')) { |
||||
list = []; |
||||
await this.del(listKey); |
||||
} |
||||
|
||||
log(`${this.context}::appendToList: get key ${key}`); |
||||
// get Get Key
|
||||
const rawValue = await this.getRaw(key, CacheGetType.TYPE_OBJECT); |
||||
log(`${this.context}::appendToList: preparing key ${key}`); |
||||
if (!rawValue) { |
||||
// FALLBACK: this is to get rid of all keys that would be effected by this (should never happen)
|
||||
logger.error(`${this.context}::appendToList: value is empty for ${key}`); |
||||
const allParents = []; |
||||
// get all children
|
||||
const listValues = await this.getList(scope, subListKeys); |
||||
// get all parents from children
|
||||
listValues.list.forEach((v) => { |
||||
allParents.push(...this.getParents(v)); |
||||
}); |
||||
// remove duplicates
|
||||
const uniqueParents = [...new Set(allParents)]; |
||||
// delete all parents and children
|
||||
await Promise.all( |
||||
uniqueParents.map(async (p) => { |
||||
await this.deepDel(p, CacheDelDirection.PARENT_TO_CHILD); |
||||
}), |
||||
); |
||||
return false; |
||||
} |
||||
// prepare Get Key
|
||||
const preparedValue = this.prepareValue({ |
||||
value: rawValue.value ?? rawValue, |
||||
parentKeys: this.getParents(rawValue), |
||||
newKey: listKey, |
||||
}); |
||||
// set Get Key
|
||||
log(`${this.context}::appendToList: setting key ${key}`); |
||||
await this.set(key, preparedValue, { |
||||
skipPrepare: true, |
||||
}); |
||||
|
||||
list.push(key); |
||||
return this.set(listKey, list).then(async (res) => { |
||||
await this.refreshTTL(listKey); |
||||
return res; |
||||
}); |
||||
} |
||||
|
||||
// wrap value with metadata
|
||||
prepareValue(args: { |
||||
value: any; |
||||
parentKeys: string[]; |
||||
newKey?: string; |
||||
timestamp?: number; |
||||
}) { |
||||
const { value, parentKeys, newKey, timestamp } = args; |
||||
|
||||
if (newKey && !parentKeys.includes(newKey)) { |
||||
parentKeys.push(newKey); |
||||
} |
||||
|
||||
const cacheObj = { |
||||
value, |
||||
parentKeys, |
||||
timestamp: timestamp || Date.now(), |
||||
}; |
||||
|
||||
return cacheObj; |
||||
} |
||||
|
||||
getParents(rawValue) { |
||||
if (rawValue && rawValue.parentKeys) { |
||||
return rawValue.parentKeys; |
||||
} else if (!rawValue) { |
||||
return []; |
||||
} else { |
||||
logger.error( |
||||
`${this.context}::getParents: parentKeys not found ${JSON.stringify( |
||||
rawValue, |
||||
)}`,
|
||||
); |
||||
return []; |
||||
} |
||||
} |
||||
|
||||
async refreshTTL(key: string, timestamp?: number): Promise<void> { |
||||
log(`${this.context}::refreshTTL: refreshing TTL for ${key}`); |
||||
const isParent = /:list$/.test(key); |
||||
timestamp = timestamp || Date.now(); |
||||
if (isParent) { |
||||
const list = |
||||
(await this.getRaw(key, CacheGetType.TYPE_ARRAY, true)) || []; |
||||
if (list && list.length) { |
||||
const listValues = await this.client.mget(list); |
||||
const pipeline = this.client.pipeline(); |
||||
for (const [i, v] of listValues.entries()) { |
||||
const key = list[i]; |
||||
if (v) { |
||||
try { |
||||
const o = JSON.parse(v); |
||||
if (typeof o === 'object') { |
||||
if (o.timestamp !== timestamp) { |
||||
o.timestamp = timestamp; |
||||
pipeline.set( |
||||
key, |
||||
JSON.stringify(o, this.getCircularReplacer()), |
||||
'EX', |
||||
NC_REDIS_TTL, |
||||
); |
||||
} |
||||
} |
||||
} catch (e) { |
||||
logger.error( |
||||
`${this.context}::refreshTTL: Bad value stored for key ${key} : ${v}`, |
||||
); |
||||
} |
||||
} |
||||
} |
||||
pipeline.expire(key, NC_REDIS_TTL - 60); |
||||
await pipeline.exec(); |
||||
} |
||||
} else { |
||||
const rawValue = await this.getRaw(key, null, true); |
||||
if (rawValue) { |
||||
if (rawValue.parentKeys && rawValue.parentKeys.length) { |
||||
for (const parent of rawValue.parentKeys) { |
||||
await this.refreshTTL(parent, timestamp); |
||||
} |
||||
} else { |
||||
if (rawValue.timestamp !== timestamp) { |
||||
rawValue.timestamp = timestamp; |
||||
await this.client.set( |
||||
key, |
||||
JSON.stringify(rawValue, this.getCircularReplacer()), |
||||
'EX', |
||||
NC_REDIS_TTL, |
||||
); |
||||
} |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
async destroy(): Promise<boolean> { |
||||
log('${this.context}::destroy: destroy redis'); |
||||
return this.client.flushdb().then((r) => r === 'OK'); |
||||
} |
||||
|
||||
async export(): Promise<any> { |
||||
log('${this.context}::export: export data'); |
||||
const data = await this.client.keys('*'); |
||||
const res = {}; |
||||
return await Promise.all( |
||||
data.map(async (k) => { |
||||
res[k] = await this.get( |
||||
k, |
||||
k.slice(-4) === 'list' |
||||
? CacheGetType.TYPE_ARRAY |
||||
: CacheGetType.TYPE_OBJECT, |
||||
); |
||||
}), |
||||
).then(() => { |
||||
return res; |
||||
}); |
||||
} |
||||
} |
||||
|
@ -0,0 +1,88 @@
|
||||
import type { Knex } from 'knex'; |
||||
import { MetaTable } from '~/utils/globals'; |
||||
|
||||
const up = async (knex: Knex) => { |
||||
if (knex.client.config.client === 'sqlite3') { |
||||
//nc_012_alter_colum_data_types.ts
|
||||
await knex.schema.alterTable(MetaTable.COLUMNS, (table) => { |
||||
table.text('cdf').alter(); |
||||
}); |
||||
await knex.schema.alterTable(MetaTable.COLUMNS, (table) => { |
||||
table.text('dtxp').alter(); |
||||
}); |
||||
await knex.schema.alterTable(MetaTable.COLUMNS, (table) => { |
||||
table.text('cc').alter(); |
||||
}); |
||||
await knex.schema.alterTable(MetaTable.COLUMNS, (table) => { |
||||
table.text('ct').alter(); |
||||
}); |
||||
//nc_014_alter_colum_data_types.ts
|
||||
await knex.schema.alterTable(MetaTable.FORM_VIEW, (table) => { |
||||
table.text('success_msg').alter(); |
||||
}); |
||||
await knex.schema.alterTable(MetaTable.FORM_VIEW, (table) => { |
||||
table.text('redirect_url').alter(); |
||||
}); |
||||
await knex.schema.alterTable(MetaTable.FORM_VIEW, (table) => { |
||||
table.text('banner_image_url').alter(); |
||||
}); |
||||
await knex.schema.alterTable(MetaTable.FORM_VIEW, (table) => { |
||||
table.text('logo_url').alter(); |
||||
}); |
||||
await knex.schema.alterTable(MetaTable.FORM_VIEW_COLUMNS, (table) => { |
||||
table.text('description').alter(); |
||||
}); |
||||
//nc_016_alter_hooklog_payload_types.ts
|
||||
await knex.schema.alterTable(MetaTable.HOOK_LOGS, (table) => { |
||||
table.text('payload').alter(); |
||||
}); |
||||
//nc_029_webhook.ts
|
||||
await knex.schema.alterTable(MetaTable.HOOK_LOGS, (table) => { |
||||
table.text('response').alter(); |
||||
}); |
||||
} |
||||
}; |
||||
|
||||
const down = async (knex) => { |
||||
if (knex.client.config.client === 'sqlite3') { |
||||
//nc_012_alter_colum_data_types.ts
|
||||
await knex.schema.alterTable(MetaTable.COLUMNS, (table) => { |
||||
table.string('cdf').alter(); |
||||
}); |
||||
await knex.schema.alterTable(MetaTable.COLUMNS, (table) => { |
||||
table.string('dtxp').alter(); |
||||
}); |
||||
await knex.schema.alterTable(MetaTable.COLUMNS, (table) => { |
||||
table.string('cc').alter(); |
||||
}); |
||||
await knex.schema.alterTable(MetaTable.COLUMNS, (table) => { |
||||
table.string('ct').alter(); |
||||
}); |
||||
//nc_014_alter_colum_data_types.ts
|
||||
await knex.schema.alterTable(MetaTable.FORM_VIEW, (table) => { |
||||
table.string('success_msg').alter(); |
||||
}); |
||||
await knex.schema.alterTable(MetaTable.FORM_VIEW, (table) => { |
||||
table.string('redirect_url').alter(); |
||||
}); |
||||
await knex.schema.alterTable(MetaTable.FORM_VIEW, (table) => { |
||||
table.string('banner_image_url').alter(); |
||||
}); |
||||
await knex.schema.alterTable(MetaTable.FORM_VIEW, (table) => { |
||||
table.string('logo_url').alter(); |
||||
}); |
||||
await knex.schema.alterTable(MetaTable.FORM_VIEW_COLUMNS, (table) => { |
||||
table.string('description').alter(); |
||||
}); |
||||
//nc_016_alter_hooklog_payload_types.ts
|
||||
await knex.schema.alterTable(MetaTable.HOOK_LOGS, (table) => { |
||||
table.boolean('payload').alter(); |
||||
}); |
||||
//nc_029_webhook.ts
|
||||
await knex.schema.alterTable(MetaTable.HOOK_LOGS, (table) => { |
||||
table.boolean('response').alter(); |
||||
}); |
||||
} |
||||
}; |
||||
|
||||
export { up, down }; |
@ -0,0 +1,28 @@
|
||||
import type { Knex } from 'knex'; |
||||
import { MetaTable } from '~/utils/globals'; |
||||
|
||||
const up = async (knex: Knex) => { |
||||
await knex.schema.alterTable(MetaTable.FORM_VIEW, (table) => { |
||||
table.text('subheading').alter(); |
||||
}); |
||||
await knex.schema.alterTable(MetaTable.FORM_VIEW_COLUMNS, (table) => { |
||||
table.text('label').alter(); |
||||
}); |
||||
await knex.schema.alterTable(MetaTable.FORM_VIEW_COLUMNS, (table) => { |
||||
table.text('help').alter(); |
||||
}); |
||||
}; |
||||
|
||||
const down = async (knex: Knex) => { |
||||
await knex.schema.alterTable(MetaTable.FORM_VIEW, (table) => { |
||||
table.string('subheading').alter(); |
||||
}); |
||||
await knex.schema.alterTable(MetaTable.FORM_VIEW_COLUMNS, (table) => { |
||||
table.string('label').alter(); |
||||
}); |
||||
await knex.schema.alterTable(MetaTable.FORM_VIEW_COLUMNS, (table) => { |
||||
table.string('help').alter(); |
||||
}); |
||||
}; |
||||
|
||||
export { up, down }; |
Loading…
Reference in new issue