1
0
Fork 0
mirror of https://github.com/anyproto/any-sync-dockercompose.git synced 2025-06-08 05:47:03 +09:00

improved config generation: adaptations, fixes, refactoring

This commit is contained in:
Kirill Shklyaev 2025-02-05 13:55:34 +03:00
parent 5ae9f4a70a
commit ef3158740b
22 changed files with 289 additions and 431 deletions

6
Dockerfile-generate-env Normal file
View file

@ -0,0 +1,6 @@
# syntax=docker/dockerfile:1
FROM python:3.13-alpine
WORKDIR /code
COPY docker-generateconfig/env-requirements.txt .
RUN pip install --no-cache-dir -r env-requirements.txt
ENTRYPOINT ["sh", "-c", "python docker-generateconfig/env.py && python docker-generateconfig/generateTemplate.py"]

19
Dockerfile-generateconfig Normal file
View file

@ -0,0 +1,19 @@
FROM ghcr.io/anyproto/any-sync-tools:latest
WORKDIR /code
RUN apt update && \
apt install -y python3 python3-pip && \
pip3 install --no-cache-dir pyyaml && \
apt clean && \
rm -rf /var/lib/apt/lists/*
ENTRYPOINT ["sh", "-c", \
"if grep -q 'networkId' /code/etc/client.yml && [ ! -f /code/etc/ids.yml ]; then \
python3 /code/docker-generateconfig/persistentAccount.py && \
any-sync-network create --auto --c /code/etc/defaultTemplate.yml && \
python3 /code/docker-generateconfig/persistentAccount.py; \
else \
any-sync-network create --auto --c /code/etc/defaultTemplate.yml && \
python3 /code/docker-generateconfig/persistentAccount.py; \
fi"]

View file

@ -1,6 +0,0 @@
# syntax=docker/dockerfile:1
FROM golang:1.23-alpine
RUN apk add --no-cache bash yq
RUN go install github.com/anyproto/any-sync-tools/anyconf@latest
WORKDIR /code
ENTRYPOINT ["bash", "/code/docker-generateconfig/anyconf.sh"]

View file

@ -1,6 +0,0 @@
# syntax=docker/dockerfile:1
FROM python:3.11-alpine
WORKDIR /code
COPY docker-generateconfig/env-requirements.txt requirements.txt
RUN pip install -r requirements.txt
ENTRYPOINT ["python", "/code/docker-generateconfig/env.py"]

View file

@ -1,5 +0,0 @@
# syntax=docker/dockerfile:1
FROM alpine:3.18.4
RUN apk add --no-cache bash yq perl python3 py3-yaml
WORKDIR /code
ENTRYPOINT ["bash", "/code/docker-generateconfig/processing.sh"]

View file

@ -12,13 +12,11 @@ endif
# targets
generate_env:
ifeq ($(QUIET_MODE),true)
docker buildx build --quiet --load --tag generateconfig-env --file Dockerfile-generateconfig-env . >/dev/null
docker buildx build --quiet --load --tag generate-env --file Dockerfile-generate-env . >/dev/null
else
docker buildx build --load --tag generateconfig-env --file Dockerfile-generateconfig-env .
docker buildx build --load --tag generate-env --file Dockerfile-generate-env .
endif
docker run --rm \
--volume ${CURDIR}/:/code/:Z \
generateconfig-env
docker run --detach --rm --volume ${CURDIR}/:/code/ generate-env
start: generate_env
$(DOCKER_COMPOSE) up --detach --remove-orphans --quiet-pull

View file

@ -1,28 +1,15 @@
services:
# generate configs using the anyconf utility
generateconfig-anyconf:
# generate configs using the any-sync-network tool
generateconfig:
build:
context: .
dockerfile: Dockerfile-generateconfig-anyconf
dockerfile: Dockerfile-generateconfig
volumes:
- ./:/code:Z
- "${STORAGE_DIR}:/code/storage:Z"
# processing any-sync-* configs
generateconfig-processing:
depends_on:
generateconfig-anyconf:
condition: service_completed_successfully
build:
context: .
dockerfile: Dockerfile-generateconfig-processing
volumes:
- ./:/code:Z
- "${STORAGE_DIR}:/code/storage:Z"
mongo-1:
depends_on:
generateconfig-processing:
generateconfig:
condition: service_completed_successfully
image: "mongo:${MONGO_VERSION}"
restart: unless-stopped
@ -38,7 +25,7 @@ services:
redis:
depends_on:
generateconfig-processing:
generateconfig:
condition: service_completed_successfully
image: "redis/redis-stack-server:${REDIS_VERSION}"
restart: unless-stopped
@ -88,7 +75,7 @@ services:
any-sync-coordinator_bootstrap:
image: "ghcr.io/anyproto/any-sync-coordinator:${ANY_SYNC_COORDINATOR_VERSION}"
depends_on:
generateconfig-processing:
generateconfig:
condition: service_completed_successfully
mongo-1:
condition: service_healthy
@ -100,7 +87,7 @@ services:
any-sync-coordinator:
image: "ghcr.io/anyproto/any-sync-coordinator:${ANY_SYNC_COORDINATOR_VERSION}"
depends_on:
generateconfig-processing:
generateconfig:
condition: service_completed_successfully
mongo-1:
condition: service_healthy
@ -134,7 +121,7 @@ services:
- "${ANY_SYNC_FILENODE_METRIC_ADDR}:8000"
volumes:
- ./etc/any-sync-filenode:/etc/any-sync-filenode:Z
- ./etc/.aws:/root/.aws:ro
- ./etc/awsCredentials:/root/.aws/credentials:ro
- "${STORAGE_DIR}/networkStore/any-sync-filenode:/networkStore:Z"
deploy:
resources:
@ -202,6 +189,7 @@ services:
memory: ${ANY_SYNC_DAEMONS_MEMORY_LIMIT:-}
restart: unless-stopped
any-sync-consensusnode:
image: "ghcr.io/anyproto/any-sync-consensusnode:${ANY_SYNC_CONSENSUSNODE_VERSION}"
depends_on:
@ -233,11 +221,11 @@ services:
- any-sync-node-2
- any-sync-node-3
volumes:
- "${STORAGE_DIR}:/code/storage:Z"
- ./etc/client.yml:/client.yml:Z
command: ["tail", "-f", "/dev/null"]
stop_signal: SIGKILL
tty: true
healthcheck:
test: any-sync-netcheck -c /code/storage/docker-generateconfig/nodes.yml 2>&1| grep -E 'netcheck\s+success'
test: any-sync-netcheck -c /client.yml 2>&1| grep -E 'netcheck\s+success'
interval: 10s
start_period: 5s
start_period: 5s

View file

@ -1,58 +0,0 @@
#!/bin/bash
echo "INFO: $0 start"
echo "INFO: loading .env file"
source .env
echo "INFO: create persistent config dir='./storage/docker-generateconfig'"
install -d ./storage/docker-generateconfig
cd ./storage/docker-generateconfig
# generate networkId
if [[ -s .networkId ]]; then
echo "INFO: saved networkId found, skipping"
else
echo "INFO: saved networkId not found, creating"
anyconf create-network
cat nodes.yml | grep '^networkId:' | awk '{print $NF}' > .networkId
cat account.yml | yq '.account.signingKey' > .networkSigningKey
if [ $? -ne 0 ]; then
echo "ERROR: Failed network creations!"
exit 1
fi
fi
NETWORK_ID=$( cat .networkId)
NETWORK_SIGNING_KEY=$( cat .networkSigningKey )
if [[ -s account0.yml ]]; then
echo "INFO: saved nodes and accounts configuration found, skipping"
else
echo "INFO: save nodes and accounts not found, createing"
anyconf generate-nodes \
--t tree \
--t tree \
--t tree \
--t coordinator \
--t file \
--t consensus \
--addresses ${ANY_SYNC_NODE_1_ADDRESSES} \
--addresses ${ANY_SYNC_NODE_2_ADDRESSES} \
--addresses ${ANY_SYNC_NODE_3_ADDRESSES} \
--addresses ${ANY_SYNC_COORDINATOR_ADDRESSES} \
--addresses ${ANY_SYNC_FILENODE_ADDRESSES} \
--addresses ${ANY_SYNC_CONSENSUSNODE_ADDRESSES} \
if [ $? -ne 0 ]; then
echo "ERROR: Failed to generate nodes and accounts!"
exit 1
fi
fi
echo "INFO: yq processing yml files"
yq --indent 2 --inplace 'del(.creationTime)' nodes.yml
yq --indent 2 --inplace ".networkId |= \"${NETWORK_ID}\"" nodes.yml
yq --indent 2 --inplace ".account.signingKey |= \"${NETWORK_SIGNING_KEY}\"" account3.yml
yq --indent 2 --inplace ".account.signingKey |= \"${NETWORK_SIGNING_KEY}\"" account5.yml
echo "INFO: $0 done"

View file

@ -1 +1,2 @@
requests==2.32.2
python-dotenv

View file

@ -1,4 +0,0 @@
[default]
aws_access_key_id=%AWS_ACCESS_KEY_ID%
aws_secret_access_key=%AWS_SECRET_ACCESS_KEY%

View file

@ -1,9 +0,0 @@
metric:
addr: 0.0.0.0:8000
log:
defaultLevel: ''
namedLevels: {}
production: false
networkStorePath: "/networkStore"

View file

@ -1,21 +0,0 @@
mongo:
connect: %MONGO_CONNECT%/?w=majority
database: consensus
logCollection: log
drpc:
stream:
timeoutMilliseconds: 1000
maxMsgSizeMb: 256
yamux:
listenAddrs:
- %ANY_SYNC_CONSENSUSNODE_ADDRESSES%
writeTimeoutSec: 10
dialTimeoutSec: 10
quic:
listenAddrs:
- %ANY_SYNC_CONSENSUSNODE_QUIC_ADDRESSES%
writeTimeoutSec: 10
dialTimeoutSec: 10

View file

@ -1,31 +0,0 @@
mongo:
connect: %MONGO_CONNECT%
database: coordinator
log: log
spaces: spaces
spaceStatus:
runSeconds: 5
deletionPeriodDays: 0
drpc:
stream:
timeoutMilliseconds: 1000
maxMsgSizeMb: 256
yamux:
listenAddrs:
- %ANY_SYNC_COORDINATOR_ADDRESSES%
writeTimeoutSec: 10
dialTimeoutSec: 10
quic:
listenAddrs:
- %ANY_SYNC_COORDINATOR_QUIC_ADDRESSES%
writeTimeoutSec: 10
dialTimeoutSec: 10
defaultLimits:
spaceMembersRead: %ANY_SYNC_COORDINATOR_DEFAULT_LIMITS_SPACE_MEMBERS_READ%
spaceMembersWrite: %ANY_SYNC_COORDINATOR_DEFAULT_LIMITS_SPACE_MEMBERS_WRITE%
sharedSpacesLimit: %ANY_SYNC_COORDINATOR_DEFAULT_LIMITS_SHARED_SPACES_LIMIT%

View file

@ -1,31 +0,0 @@
s3Store:
bucket: %MINIO_BUCKET%
indexBucket: %MINIO_BUCKET%
maxThreads: 16
profile: default
region: us-east-1
endpoint: http://minio:%MINIO_PORT%
forcePathStyle: true # 'true' for self-hosted S3 Object Storage
redis:
isCluster: false
url: %REDIS_URL%
drpc:
stream:
timeoutMilliseconds: 1000
maxMsgSizeMb: 256
yamux:
listenAddrs:
- %ANY_SYNC_FILENODE_ADDRESSES%
writeTimeoutSec: 10
dialTimeoutSec: 10
quic:
listenAddrs:
- %ANY_SYNC_FILENODE_QUIC_ADDRESSES%
writeTimeoutSec: 10
dialTimeoutSec: 10
defaultLimit: %ANY_SYNC_FILENODE_DEFAULT_LIMIT%

View file

@ -1,29 +0,0 @@
apiServer:
listenAddr: 0.0.0.0:8080
drpc:
stream:
timeoutMilliseconds: 1000
maxMsgSizeMb: 256
yamux:
listenAddrs:
- %ANY_SYNC_NODE_1_ADDRESSES%
writeTimeoutSec: 10
dialTimeoutSec: 10
quic:
listenAddrs:
- %ANY_SYNC_NODE_1_QUIC_ADDRESSES%
writeTimeoutSec: 10
dialTimeoutSec: 10
space:
gcTTL: 60
syncPeriod: 600
storage:
path: /storage
nodeSync:
periodicSyncHours: 2
syncOnStart: true

View file

@ -1,29 +0,0 @@
apiServer:
listenAddr: 0.0.0.0:8080
drpc:
stream:
timeoutMilliseconds: 1000
maxMsgSizeMb: 256
yamux:
listenAddrs:
- %ANY_SYNC_NODE_2_ADDRESSES%
writeTimeoutSec: 10
dialTimeoutSec: 10
quic:
listenAddrs:
- %ANY_SYNC_NODE_2_QUIC_ADDRESSES%
writeTimeoutSec: 10
dialTimeoutSec: 10
space:
gcTTL: 60
syncPeriod: 600
storage:
path: /storage
nodeSync:
periodicSyncHours: 2
syncOnStart: true

View file

@ -1,29 +0,0 @@
apiServer:
listenAddr: 0.0.0.0:8080
drpc:
stream:
timeoutMilliseconds: 1000
maxMsgSizeMb: 256
yamux:
listenAddrs:
- %ANY_SYNC_NODE_3_ADDRESSES%
writeTimeoutSec: 10
dialTimeoutSec: 10
quic:
listenAddrs:
- %ANY_SYNC_NODE_3_QUIC_ADDRESSES%
writeTimeoutSec: 10
dialTimeoutSec: 10
space:
gcTTL: 60
syncPeriod: 600
storage:
path: /storage
nodeSync:
periodicSyncHours: 2
syncOnStart: true

View file

@ -0,0 +1,134 @@
import os
from dotenv import load_dotenv
import logging
import time
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
load_dotenv()
def get_env_var(key, retries=3, delay=1):
for _ in range(retries):
value = os.getenv(key)
if value is not None:
return value
time.sleep(delay)
logging.error(f"Environment variable {key} is not set.")
return None
env_vars = {
'AWS_ACCESS_KEY_ID': os.getenv('AWS_ACCESS_KEY_ID'),
'AWS_SECRET_ACCESS_KEY': os.getenv('AWS_SECRET_ACCESS_KEY'),
'EXTERNAL_LISTEN_HOSTS': os.getenv('EXTERNAL_LISTEN_HOSTS', ''),
'EXTERNAL_LISTEN_HOST': os.getenv('EXTERNAL_LISTEN_HOST', ''),
'ANY_SYNC_COORDINATOR_HOST': os.getenv('ANY_SYNC_COORDINATOR_HOST'),
'ANY_SYNC_COORDINATOR_PORT': os.getenv('ANY_SYNC_COORDINATOR_PORT'),
'ANY_SYNC_COORDINATOR_QUIC_PORT': os.getenv('ANY_SYNC_COORDINATOR_QUIC_PORT'),
'MONGO_CONNECT': os.getenv('MONGO_CONNECT'),
'ANY_SYNC_COORDINATOR_DEFAULT_LIMITS_SPACE_MEMBERS_READ': os.getenv('ANY_SYNC_COORDINATOR_DEFAULT_LIMITS_SPACE_MEMBERS_READ'),
'ANY_SYNC_COORDINATOR_DEFAULT_LIMITS_SPACE_MEMBERS_WRITE': os.getenv('ANY_SYNC_COORDINATOR_DEFAULT_LIMITS_SPACE_MEMBERS_WRITE'),
'ANY_SYNC_COORDINATOR_DEFAULT_LIMITS_SHARED_SPACES_LIMIT': os.getenv('ANY_SYNC_COORDINATOR_DEFAULT_LIMITS_SHARED_SPACES_LIMIT'),
'ANY_SYNC_CONSENSUSNODE_HOST': os.getenv('ANY_SYNC_CONSENSUSNODE_HOST'),
'ANY_SYNC_CONSENSUSNODE_PORT': os.getenv('ANY_SYNC_CONSENSUSNODE_PORT'),
'ANY_SYNC_CONSENSUSNODE_QUIC_PORT': os.getenv('ANY_SYNC_CONSENSUSNODE_QUIC_PORT'),
'ANY_SYNC_FILENODE_HOST': os.getenv('ANY_SYNC_FILENODE_HOST'),
'ANY_SYNC_FILENODE_PORT': os.getenv('ANY_SYNC_FILENODE_PORT'),
'ANY_SYNC_FILENODE_QUIC_PORT': os.getenv('ANY_SYNC_FILENODE_QUIC_PORT'),
'MINIO_BUCKET': os.getenv('MINIO_BUCKET'),
'REDIS_URL': os.getenv('REDIS_URL'),
'ANY_SYNC_FILENODE_DEFAULT_LIMIT': os.getenv('ANY_SYNC_FILENODE_DEFAULT_LIMIT'),
'ANY_SYNC_NODE_1_HOST': os.getenv('ANY_SYNC_NODE_1_HOST'),
'ANY_SYNC_NODE_1_PORT': os.getenv('ANY_SYNC_NODE_1_PORT'),
'ANY_SYNC_NODE_1_QUIC_PORT': os.getenv('ANY_SYNC_NODE_1_QUIC_PORT'),
'ANY_SYNC_NODE_2_HOST': os.getenv('ANY_SYNC_NODE_2_HOST'),
'ANY_SYNC_NODE_2_PORT': os.getenv('ANY_SYNC_NODE_2_PORT'),
'ANY_SYNC_NODE_2_QUIC_PORT': os.getenv('ANY_SYNC_NODE_2_QUIC_PORT'),
'ANY_SYNC_NODE_3_HOST': os.getenv('ANY_SYNC_NODE_3_HOST'),
'ANY_SYNC_NODE_3_PORT': os.getenv('ANY_SYNC_NODE_3_PORT'),
'ANY_SYNC_NODE_3_QUIC_PORT': os.getenv('ANY_SYNC_NODE_3_QUIC_PORT'),
}
external_hosts = []
if env_vars['EXTERNAL_LISTEN_HOST']:
external_hosts.append(env_vars['EXTERNAL_LISTEN_HOST'])
if env_vars['EXTERNAL_LISTEN_HOSTS']:
external_hosts.extend(env_vars['EXTERNAL_LISTEN_HOSTS'].split(' '))
external_hosts = [host.strip() for host in external_hosts if host.strip()]
template = f"""
external-addresses:
"""
for host in external_hosts:
template += f" - {host}\n"
template += f"""
any-sync-coordinator:
listen: {env_vars['ANY_SYNC_COORDINATOR_HOST']}
yamuxPort: {env_vars['ANY_SYNC_COORDINATOR_PORT']}
quicPort: {env_vars['ANY_SYNC_COORDINATOR_QUIC_PORT']}
mongo:
connect: {env_vars['MONGO_CONNECT']}
database: coordinator
defaultLimits:
spaceMembersRead: {env_vars['ANY_SYNC_COORDINATOR_DEFAULT_LIMITS_SPACE_MEMBERS_READ']}
spaceMembersWrite: {env_vars['ANY_SYNC_COORDINATOR_DEFAULT_LIMITS_SPACE_MEMBERS_WRITE']}
sharedSpacesLimit: {env_vars['ANY_SYNC_COORDINATOR_DEFAULT_LIMITS_SHARED_SPACES_LIMIT']}
any-sync-consensusnode:
listen: {env_vars['ANY_SYNC_CONSENSUSNODE_HOST']}
yamuxPort: {env_vars['ANY_SYNC_CONSENSUSNODE_PORT']}
quicPort: {env_vars['ANY_SYNC_CONSENSUSNODE_QUIC_PORT']}
mongo:
connect: {env_vars['MONGO_CONNECT']}/?w=majority
database: consensus
any-sync-filenode:
listen: {env_vars['ANY_SYNC_FILENODE_HOST']}
yamuxPort: {env_vars['ANY_SYNC_FILENODE_PORT']}
quicPort: {env_vars['ANY_SYNC_FILENODE_QUIC_PORT']}
s3Store:
endpoint: http://minio:9000
bucket: {env_vars['MINIO_BUCKET']}
indexBucket: {env_vars['MINIO_BUCKET']}
region: us-east-1
profile: default
forcePathStyle: true
redis:
url: {env_vars['REDIS_URL']}
defaultLimit: {env_vars['ANY_SYNC_FILENODE_DEFAULT_LIMIT']}
any-sync-node:
listen:
- {env_vars['ANY_SYNC_NODE_1_HOST']}
- {env_vars['ANY_SYNC_NODE_2_HOST']}
- {env_vars['ANY_SYNC_NODE_3_HOST']}
yamuxPort:
- {env_vars['ANY_SYNC_NODE_1_PORT']}
- {env_vars['ANY_SYNC_NODE_2_PORT']}
- {env_vars['ANY_SYNC_NODE_3_PORT']}
quicPort:
- {env_vars['ANY_SYNC_NODE_1_QUIC_PORT']}
- {env_vars['ANY_SYNC_NODE_2_QUIC_PORT']}
- {env_vars['ANY_SYNC_NODE_3_QUIC_PORT']}
"""
missing_vars = [key for key, value in env_vars.items() if value is None]
if missing_vars:
logging.critical(f"Missing environment variables: {', '.join(missing_vars)}")
raise SystemExit(1)
logging.info("All environment variables loaded successfully.")
os.makedirs("etc", exist_ok=True)
with open("etc/defaultTemplate.yml", "w") as file:
file.write(template)
logging.info("Template for `any-sync-network` written to ./etc/defaultTemplate.yml")
with open("etc/awsCredentials", "w") as aws_file:
aws_file.write(f"[default]\naws_access_key_id={get_env_var('AWS_ACCESS_KEY_ID')}\naws_secret_access_key={get_env_var('AWS_SECRET_ACCESS_KEY')}\n")
logging.info("AWS credentials file written to ./etc/awsCredentials")

View file

@ -0,0 +1,114 @@
import yaml
import os
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
BASE_DIR = 'etc'
IDS_FILE = os.path.join(BASE_DIR, 'ids.yml')
def load_yaml(file_path):
try:
with open(file_path, 'r') as f:
return yaml.safe_load(f) or {}
except Exception as e:
logging.error(f'Error loading YAML file {file_path}: {e}')
return {}
def save_yaml(data, file_path):
try:
with open(file_path, 'w') as f:
yaml.dump(data, f)
logging.info(f'Data saved to {file_path}')
except Exception as e:
logging.error(f'Error saving to {file_path}: {e}')
def process_config_file(config_path, output_data):
directory_name = os.path.basename(os.path.dirname(config_path))
config_data = load_yaml(config_path)
# Extract account information
account_info = config_data.get('account', {})
# Handling for 'any-sync-coordinator' configurations
if 'any-sync-coordinator' in config_path:
network = config_data.get('network', {})
output_data['any-sync-coordinator'] = account_info
output_data['network'] = {
'networkId': network.get('networkId', 'unknown'),
'id': network.get('id', 'unknown')
}
else:
output_data[directory_name] = account_info
logging.info(f'Processed file: {config_path}')
# Extract account and network information, save the results to ids.yml
def read_config():
output_data = {}
config_found = False
for root, _, files in os.walk(BASE_DIR):
if 'config.yml' in files:
config_found = True
process_config_file(os.path.join(root, 'config.yml'), output_data)
if not config_found:
raise FileNotFoundError(f'{BASE_DIR}/any-sync-*/config.yml not found. Please generate a configuration first')
save_yaml(output_data, IDS_FILE)
# Handles account, network, and node information updates
def update_config_file(config_path, ids_data, network_info):
config_data = load_yaml(config_path)
directory_name = os.path.basename(os.path.dirname(config_path))
# Update config.yml files
if os.path.basename(config_path) == 'config.yml':
if directory_name in ids_data:
config_data['account'] = ids_data[directory_name]
if network_info:
config_data.setdefault('network', {}).update({
'networkId': network_info.get('networkId', 'unknown'),
'id': network_info.get('id', 'unknown')
})
# Update peerId for nodes if applicable
if 'nodes' in config_data.get('network', {}):
for node in config_data['network']['nodes']:
for address in node.get('addresses', []):
node_name = address.split(':')[0]
if node_name in ids_data:
node['peerId'] = ids_data[node_name].get('peerId', node.get('peerId', 'unknown'))
# Update network.yml and client.yml files
elif os.path.basename(config_path) in ['network.yml', 'client.yml']:
if network_info:
config_data['networkId'] = network_info.get('networkId', 'unknown')
if 'nodes' in config_data:
for node in config_data['nodes']:
for address in node.get('addresses', []):
node_name = address.split(':')[0]
if node_name in ids_data:
node['peerId'] = ids_data[node_name].get('peerId', node.get('peerId', 'unknown'))
save_yaml(config_data, config_path)
# Restores account and network information
def write_config():
ids_data = load_yaml(IDS_FILE)
network_info = ids_data.get('network', {})
# Traverse the base directory and update relevant configuration files
for root, _, files in os.walk(BASE_DIR):
for file in files:
if file in ('config.yml', 'network.yml', 'client.yml'):
update_config_file(os.path.join(root, file), ids_data, network_info)
if __name__ == "__main__":
# Check if ids.yml exists and decide whether to read or write configurations
if os.path.exists(IDS_FILE):
logging.info(f'{IDS_FILE} found! Restoring original accounts and network identifiers...')
write_config()
else:
logging.info(f'{IDS_FILE} not found! Reading current accounts and network identifiers...')
read_config()

View file

@ -1,67 +0,0 @@
#!/bin/bash
echo "INFO: $0 start"
echo "INFO: loading .env file"
source .env
# Set file paths
DEST_PATH="./etc"
NETWORK_FILE="./storage/docker-generateconfig/network.yml"
echo "INFO: Create directories for all node types"
for NODE_TYPE in node-1 node-2 node-3 filenode coordinator consensusnode; do
mkdir -p "${DEST_PATH}/any-sync-${NODE_TYPE}"
done
echo "INFO: Create directory for aws credentials"
mkdir -p "${DEST_PATH}/.aws"
echo "INFO: Configure external listen host"
python ./docker-generateconfig/setListenIp.py "./storage/docker-generateconfig/nodes.yml" "./storage/docker-generateconfig/nodesProcessed.yml"
echo "INFO: Create config for clients"
cp "./storage/docker-generateconfig/nodesProcessed.yml" "${DEST_PATH}/client.yml"
echo "INFO: Generate network file"
yq eval '. as $item | {"network": $item}' --indent 2 ./storage/docker-generateconfig/nodesProcessed.yml > "${NETWORK_FILE}"
echo "INFO: Generate config files for 3 nodes"
for i in {0..2}; do
cat \
"${NETWORK_FILE}" \
docker-generateconfig/etc/common.yml \
storage/docker-generateconfig/account${i}.yml \
docker-generateconfig/etc/node-$((i+1)).yml \
> "${DEST_PATH}/any-sync-node-$((i+1))/config.yml"
done
echo "INFO: Generate config files for coordinator"
cat "${NETWORK_FILE}" docker-generateconfig/etc/common.yml storage/docker-generateconfig/account3.yml docker-generateconfig/etc/coordinator.yml \
> ${DEST_PATH}/any-sync-coordinator/config.yml
echo "INFO: Generate config files for filenode"
cat "${NETWORK_FILE}" docker-generateconfig/etc/common.yml storage/docker-generateconfig/account4.yml docker-generateconfig/etc/filenode.yml \
> ${DEST_PATH}/any-sync-filenode/config.yml
echo "INFO: Generate config files for consensusnode"
cat "${NETWORK_FILE}" docker-generateconfig/etc/common.yml storage/docker-generateconfig/account5.yml docker-generateconfig/etc/consensusnode.yml \
> ${DEST_PATH}/any-sync-consensusnode/config.yml
echo "INFO: Copy network file to coordinator directory"
cp "storage/docker-generateconfig/nodesProcessed.yml" "${DEST_PATH}/any-sync-coordinator/network.yml"
echo "INFO: Copy aws credentials config"
cp "docker-generateconfig/etc/aws-credentials" "${DEST_PATH}/.aws/credentials"
echo "INFO: Replace variables from .env file"
for PLACEHOLDER in $( perl -ne 'print "$1\n" if /^([A-z0-9_-]+)=/' .env ); do
perl -i -pe "s|%${PLACEHOLDER}%|${!PLACEHOLDER}|g" \
"${DEST_PATH}/"/.aws/credentials \
"${NETWORK_FILE}" \
"${DEST_PATH}/"/*/*.yml
done
echo "INFO: fix indent in yml files"
for FILE in $( find ${DEST_PATH}/ -name "*.yml" ); do
yq --inplace --indent=2 $FILE
done
echo "INFO: $0 done"

View file

@ -1,71 +0,0 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import re
import yaml
import json
# load .env vars
envVars = dict()
if os.path.exists('.env') and os.path.getsize('.env') > 0:
with open('.env') as file:
for line in file:
if line.startswith('#') or not line.strip():
continue
key, value = line.strip().split('=', 1)
value = value.replace('"', '')
if key in envVars:
print(f"WARNING: dublicate key={key} in env file='.env'")
envVars[key] = value
else:
print(f"ERROR: file='.env' not found or size=0")
exit(1)
#print(f"DEBUG: envVars={json.dumps(envVars,indent=4)}")
inputYamlFile = sys.argv[1]
outputYamlFile = sys.argv[2]
externalListenHosts = envVars.get('EXTERNAL_LISTEN_HOSTS', '').split()
externalListenHost = envVars.get('EXTERNAL_LISTEN_HOST', None)
if externalListenHost and externalListenHost not in externalListenHosts:
externalListenHosts.append(externalListenHost)
print(f"DEBUG: externalListenHosts={externalListenHosts}")
print(f"DEBUG: externalListenHost={externalListenHost}")
listenHosts = list()
for host in externalListenHosts:
if host not in listenHosts:
listenHosts.append(host)
print(f"DEBUG: listenHosts={listenHosts}")
# read input yaml file
with open(inputYamlFile, 'r') as file:
config = yaml.load(file,Loader=yaml.Loader)
# processing addresses for nodes
for index, nodes in enumerate(config['nodes']):
listenHost = nodes['addresses'][0].split(':')[0]
listenPort = nodes['addresses'][0].split(':')[1]
nodeListenHosts = [listenHost] + listenHosts
for nodeListenHost in nodeListenHosts:
listenAddress = nodeListenHost +':'+ str(listenPort)
if listenAddress not in nodes['addresses']:
nodes['addresses'].append(listenAddress)
# add "quic" listen address
for name,value in envVars.items():
if re.match(r"^(ANY_SYNC_.*_PORT)$", name) and value == listenPort:
if re.match(r"^(ANY_SYNC_.*_QUIC_PORT)$", name):
# skip port, if PORT == QUIC_PORT
continue
quicPortKey = name.replace('_PORT', '_QUIC_PORT')
quicPortValue = envVars[quicPortKey]
quicListenAddress = 'quic://'+ nodeListenHost +':'+ str(quicPortValue)
if ( quicPortValue ) and ( quicListenAddress not in nodes['addresses']):
nodes['addresses'].append(quicListenAddress)
# write output yaml file
with open(outputYamlFile, 'w') as file:
yaml.dump(config, file)

View file

@ -104,12 +104,6 @@ runTest(){
for TEST in $TESTS_DIR/run.d/*.sh; do
echo -e "${YELLOW}Executing test: $TEST ${NC}"
local TEST_FILE_NAME=$(basename $TEST)
if [[ $TEST_FILE_NAME == 'setAnySyncPort.sh' ]]; then
if ! $CLEAN_DATA; then
echo "skipping for exist storage"
continue
fi
fi
# record the start time in seconds since the epoch
local START_TIME=$(date +%s)