Skip to main content

Node Configuration Reference

This document describes how to configure a Pilier validator node after installation.

Audience: All validators (genesis and new)

Prerequisites:


Configuration Overview

Pilier node configuration consists of three layers:

1. Systemd Service
├─ Auto-start on boot
├─ Restart policies
└─ Resource limits

2. Configuration File (optional)
├─ /etc/pilier/config.toml
└─ Network, RPC, telemetry settings

3. Command-Line Flags
├─ --chain, --validator, --name
└─ Override config file settings

Priority (highest to lowest):

Command-line flags > Config file > Defaults

Systemd Service Setup

Create Service File

Location: /etc/systemd/system/pilier.service

Basic template (testnet):

[Unit]
Description=Pilier Validator Node
Documentation=https://docs.pilier.net
After=network-online.target
Wants=network-online.target

[Service]
Type=simple
User=pilier
Group=pilier

# Working directory
WorkingDirectory=/var/lib/pilier

# Environment
Environment="RUST_LOG=info"

# Start command
ExecStart=/usr/local/bin/pilier-node \
--base-path /var/lib/pilier \
--chain /etc/pilier/testnet.json \
--validator \
--name "validator-01" \
--port 30333 \
--rpc-port 9933 \
--ws-port 9944 \
--prometheus-port 9615 \
--prometheus-external \
--telemetry-url "wss://telemetry.pilier.net/submit 0"

# Logging
StandardOutput=journal
StandardError=journal
SyslogIdentifier=pilier

# Restart policy
Restart=always
RestartSec=10s
TimeoutStopSec=300s
KillSignal=SIGTERM

# Security hardening
NoNewPrivileges=true
PrivateTmp=true
ProtectSystem=strict
ProtectHome=true
ReadWritePaths=/var/lib/pilier

# Resource limits
LimitNOFILE=65536
LimitNPROC=4096
CPUQuota=400%
MemoryMax=30G

[Install]
WantedBy=multi-user.target

Production Template (Mainnet)

Differences from testnet:

  • Remove --telemetry-url (optional, privacy)
  • Add --pruning 256 (state pruning)
  • Stricter resource limits
[Unit]
Description=Pilier Mainnet Validator Node
Documentation=https://docs.pilier.net
After=network-online.target
Wants=network-online.target

[Service]
Type=simple
User=pilier
Group=pilier

WorkingDirectory=/var/lib/pilier

# Reduced logging for production
Environment="RUST_LOG=warn,pilier_node=info"

ExecStart=/usr/local/bin/pilier-node \
--base-path /var/lib/pilier \
--chain /etc/pilier/mainnet.json \
--validator \
--name "validator-lyon-01" \
--port 30333 \
--rpc-port 9933 \
--ws-port 9944 \
--prometheus-port 9615 \
--prometheus-external \
--pruning 256

StandardOutput=journal
StandardError=journal
SyslogIdentifier=pilier

Restart=always
RestartSec=10s
TimeoutStopSec=300s
KillSignal=SIGTERM

# Production hardening
NoNewPrivileges=true
PrivateTmp=true
ProtectSystem=strict
ProtectHome=true
ReadWritePaths=/var/lib/pilier
ProtectKernelTunables=true
ProtectKernelModules=true
ProtectControlGroups=true

# Production limits
LimitNOFILE=65536
LimitNPROC=4096
CPUQuota=700%
MemoryMax=30G

[Install]
WantedBy=multi-user.target

Install and Enable Service

# 1. Create service file
sudo nano /etc/systemd/system/pilier.service
# (paste template above)

# 2. Set permissions
sudo chmod 644 /etc/systemd/system/pilier.service

# 3. Reload systemd
sudo systemctl daemon-reload

# 4. Enable auto-start on boot
sudo systemctl enable pilier

# 5. Start the service
sudo systemctl start pilier

# 6. Check status
sudo systemctl status pilier

Systemd Management Commands

# Start node
sudo systemctl start pilier

# Stop node
sudo systemctl stop pilier

# Restart node
sudo systemctl restart pilier

# Check status
sudo systemctl status pilier

# View logs (real-time)
sudo journalctl -u pilier -f

# View logs (last 100 lines)
sudo journalctl -u pilier -n 100

# View logs (since boot)
sudo journalctl -u pilier -b

# View logs (specific time range)
sudo journalctl -u pilier --since "2026-02-01 10:00:00" --until "2026-02-01 12:00:00"

# Check if enabled (auto-start)
sudo systemctl is-enabled pilier

Configuration File (Optional)

Location: /etc/pilier/config.toml

Optional

Configuration file is optional. Command-line flags can achieve the same result. Use config file for:

  • Complex setups (many flags)
  • Standardization (same config across validators)
  • Easier management (edit file vs. edit systemd service)

Full Example Config

# Pilier Node Configuration
# /etc/pilier/config.toml

[node]
# Node name (appears in telemetry)
name = "validator-lyon-01"

# Base path for blockchain data
base_path = "/var/lib/pilier"

# Chain specification file
chain = "/etc/pilier/testnet.json"

# Enable validator mode
validator = true

[network]
# P2P listening address
listen_addr = "0.0.0.0:30333"

# Public address (if behind NAT)
# public_addr = "/ip4/51.210.123.45/tcp/30333"

# Reserved nodes (always connect to these peers)
reserved_nodes = [
"/dns/bootnode-1.pilier.net/tcp/30333/p2p/12D3KooWExample1",
"/dns/bootnode-2.pilier.net/tcp/30333/p2p/12D3KooWExample2"
]

# Reserved-only mode (only connect to reserved nodes)
reserved_only = false

# Maximum peer connections
max_peers = 50

[rpc]
# RPC HTTP listening address (localhost only for security!)
http_addr = "127.0.0.1:9933"

# RPC WebSocket listening address
ws_addr = "127.0.0.1:9944"

# Maximum connections
max_connections = 100

# CORS (only if exposing RPC publicly - NOT recommended)
# cors = ["*"]

# RPC methods (Safe = read-only, Unsafe = dangerous)
methods = "Safe"

[telemetry]
# Enable telemetry
enabled = true

# Telemetry endpoints (verbosity: 0-9, higher = more data)
endpoints = [
["wss://telemetry.pilier.net/submit", 0]
]

[prometheus]
# Enable Prometheus metrics
enabled = true

# Metrics listening address
addr = "127.0.0.1:9615"

# Allow external access (for monitoring server)
external = true

[state]
# Pruning mode
# - "archive": Keep all blocks (archive node)
# - "constrained": Keep last N blocks (full node)
pruning = "constrained"

# Number of blocks to keep (if pruning = "constrained")
pruning_blocks = 256

# State cache size (MB)
cache_size = 1024

[log]
# Log level (error, warn, info, debug, trace)
level = "info"

# Module-specific log levels
modules = [
"pilier_node=info",
"sc_consensus_aura=debug",
"sc_finality_grandpa=debug"
]

Using Config File

Option A: Specify via flag

# In systemd service:
ExecStart=/usr/local/bin/pilier-node --config /etc/pilier/config.toml

Option B: Default location

# Pilier node looks for config at:
# 1. /etc/pilier/config.toml
# 2. ~/.pilier/config.toml
# 3. ./config.toml (working directory)

# If file exists, it's loaded automatically
Command-Line Flags Override Config File

If you specify the same option in both config file and command-line flag, the flag wins.

Example:

# config.toml
[node]
name = "validator-01"
# Command line
pilier-node --config config.toml --name "validator-02"

# Result: Node name will be "validator-02" (flag overrides)

Command-Line Flags Reference

Essential Flags

FlagDescriptionExample
--chainChain specification file--chain /etc/pilier/testnet.json
--base-pathData directory--base-path /var/lib/pilier
--validatorEnable validator mode--validator
--nameNode name (telemetry)--name "validator-lyon-01"

Network Flags

FlagDescriptionExample
--portP2P listening port--port 30333
--listen-addrP2P listen address--listen-addr /ip4/0.0.0.0/tcp/30333
--public-addrPublic address (if behind NAT)--public-addr /ip4/51.210.123.45/tcp/30333
--bootnodesBootstrap nodes--bootnodes /dns/bootnode.pilier.net/tcp/30333/p2p/...
--reserved-nodesAlways connect to these--reserved-nodes /dns/validator-01.pilier.net/...
--reserved-onlyOnly connect to reserved--reserved-only
--no-mdnsDisable local peer discovery--no-mdns

RPC Flags

FlagDescriptionExample
--rpc-portHTTP RPC port--rpc-port 9933
--ws-portWebSocket RPC port--ws-port 9944
--rpc-corsCORS policy--rpc-cors all (dangerous!)
--rpc-methodsAllowed RPC methods--rpc-methods Safe
--ws-externalAllow external WS connections--ws-external (use with caution)
--rpc-externalAllow external HTTP connections--rpc-external (use with caution)
Never Expose RPC to Public Internet

DO NOT use --ws-external or --rpc-external without authentication!

If you must expose RPC:

  1. Use reverse proxy (Nginx) with SSL
  2. Add authentication (API keys)
  3. Firewall rules (allow only specific IPs)
  4. Rate limiting

For validators: Keep RPC on localhost (127.0.0.1) only.


Telemetry & Monitoring

FlagDescriptionExample
--telemetry-urlSend telemetry to endpoint--telemetry-url "wss://telemetry.pilier.net/submit 0"
--no-telemetryDisable telemetry--no-telemetry
--prometheus-portPrometheus metrics port--prometheus-port 9615
--prometheus-externalAllow external Prometheus access--prometheus-external

Telemetry verbosity:

  • 0 = Basic (block height, peers)
  • 5 = Detailed (include node details)
  • 9 = Everything (very verbose)

State & Pruning

FlagDescriptionExample
--pruningPruning mode--pruning archive or --pruning 256
--state-cache-sizeState cache size (MB)--state-cache-size 1024

Pruning modes:

  • archive = Keep all blocks (archive node, large disk)
  • 256 = Keep last 256 blocks (full node, moderate disk)
  • constrained = Default pruning (full node)

Logging

FlagDescriptionExample
--logLog level--log info or --log debug
-lModule-specific log-l sc_consensus_aura=debug

Log levels:

  • error = Errors only
  • warn = Warnings + errors
  • info = Informational (default)
  • debug = Verbose debugging
  • trace = Everything (very noisy)

Module-specific logging:

# Debug AURA consensus, warn for everything else
pilier-node --log warn -l sc_consensus_aura=debug

# Multiple modules:
pilier-node \
-l sc_consensus_aura=debug \
-l sc_finality_grandpa=debug \
-l pilier_node=info

Performance & Limits

FlagDescriptionExample
--executionExecution strategy--execution Native or --execution Wasm
--wasm-executionWasm execution method--wasm-execution Compiled
--max-runtime-instancesMax Wasm instances--max-runtime-instances 8
--pool-limitTransaction pool size--pool-limit 8192
Execution Strategy

Native: Faster, but requires exact runtime version compiled in binary Wasm: Slower, but works with any runtime (forkless upgrades)

Default: Wasm (recommended for validators)


Logging Configuration

Log Levels

Set via environment variable:

# In systemd service:
Environment="RUST_LOG=info"

# Or inline:
RUST_LOG=debug pilier-node ...

Levels:

error < warn < info < debug < trace

Examples:

# Everything at info level
RUST_LOG=info

# Everything at warn, but consensus at debug
RUST_LOG=warn,sc_consensus_aura=debug

# Multiple modules
RUST_LOG=warn,pilier_node=info,sc_consensus_aura=debug,sc_finality_grandpa=debug

Log Rotation

Prevent logs from filling disk:

Create logrotate config:

sudo nano /etc/logrotate.d/pilier

Content:

/var/log/pilier/*.log {
daily
rotate 7
compress
delaycompress
missingok
notifempty
create 0640 pilier pilier
sharedscripts
postrotate
systemctl reload pilier > /dev/null 2>&1 || true
endscript
}

Test logrotate:

sudo logrotate -d /etc/logrotate.d/pilier  # Dry run
sudo logrotate -f /etc/logrotate.d/pilier # Force rotation

Journald Configuration

Limit journal size (systemd logs):

sudo nano /etc/systemd/journald.conf

Add/modify:

[Journal]
SystemMaxUse=1G
SystemMaxFileSize=100M
MaxRetentionSec=7day

Restart journald:

sudo systemctl restart systemd-journald

Monitoring Setup

Prometheus Metrics

Enable in node:

# In systemd service or command line:
--prometheus-port 9615
--prometheus-external

Verify metrics endpoint:

curl http://localhost:9615/metrics

# Output:
# substrate_block_height{status="best"} 12345
# substrate_finality_grandpa_round 678
# substrate_peer_count 5
# ...

Health Check Script

Create health check:

sudo nano /usr/local/bin/pilier-health.sh

Content:

#!/bin/bash
# Pilier Node Health Check

set -e

# Check if service is running
if ! systemctl is-active --quiet pilier; then
echo "ERROR: Pilier service is not running"
exit 1
fi

# Check if RPC is responding
BLOCK=$(curl -s -H "Content-Type: application/json" \
-d '{"id":1, "jsonrpc":"2.0", "method": "chain_getBlock", "params":[]}' \
http://127.0.0.1:9933/ | jq -r '.result.block.header.number')

if [ -z "$BLOCK" ] || [ "$BLOCK" == "null" ]; then
echo "ERROR: Cannot query block height"
exit 1
fi

# Convert hex to decimal
BLOCK_HEIGHT=$((16#${BLOCK:2}))

# Check peer count
PEERS=$(curl -s -H "Content-Type: application/json" \
-d '{"id":1, "jsonrpc":"2.0", "method": "system_health", "params":[]}' \
http://127.0.0.1:9933/ | jq -r '.result.peers')

if [ "$PEERS" -lt 2 ]; then
echo "WARNING: Low peer count ($PEERS)"
fi

# Check if syncing
IS_SYNCING=$(curl -s -H "Content-Type: application/json" \
-d '{"id":1, "jsonrpc":"2.0", "method": "system_health", "params":[]}' \
http://127.0.0.1:9933/ | jq -r '.result.isSyncing')

if [ "$IS_SYNCING" == "true" ]; then
echo "WARNING: Node is still syncing"
fi

echo "OK: Block #$BLOCK_HEIGHT, $PEERS peers, syncing=$IS_SYNCING"
exit 0

Make executable:

sudo chmod +x /usr/local/bin/pilier-health.sh

Test:

/usr/local/bin/pilier-health.sh
# Output: OK: Block #12345, 5 peers, syncing=false

Automated Monitoring (Cron)

Add to crontab:

crontab -e

# Check every 5 minutes, send email on failure
*/5 * * * * /usr/local/bin/pilier-health.sh || echo "Pilier node unhealthy!" | mail -s "Alert: Pilier" your-email@example.com

Or use systemd timer (more robust):

# /etc/systemd/system/pilier-health.service
[Unit]
Description=Pilier Health Check

[Service]
Type=oneshot
ExecStart=/usr/local/bin/pilier-health.sh
User=pilier

# /etc/systemd/system/pilier-health.timer
[Unit]
Description=Run Pilier Health Check every 5 minutes

[Timer]
OnBootSec=5min
OnUnitActiveSec=5min

[Install]
WantedBy=timers.target

Enable timer:

sudo systemctl daemon-reload
sudo systemctl enable pilier-health.timer
sudo systemctl start pilier-health.timer

Security Configuration

Firewall Rules

Using UFW (Ubuntu/Debian):

# Default: deny all incoming
sudo ufw default deny incoming
sudo ufw default allow outgoing

# Allow SSH (change port if non-standard)
sudo ufw allow 22/tcp

# Allow P2P (required for validators)
sudo ufw allow 30333/tcp

# Do NOT allow RPC ports publicly!
# sudo ufw allow 9933/tcp ← NO!
# sudo ufw allow 9944/tcp ← NO!

# Enable firewall
sudo ufw enable

# Check status
sudo ufw status verbose

SSH Hardening

sudo nano /etc/ssh/sshd_config

Recommended settings:

# Disable root login
PermitRootLogin no

# Disable password authentication (use keys only)
PasswordAuthentication no
PubkeyAuthentication yes

# Change default port (optional, security by obscurity)
Port 2222

# Disable X11 forwarding
X11Forwarding no

# Limit users
AllowUsers pilier your-admin-user

Restart SSH:

sudo systemctl restart sshd

File Permissions

Ensure correct ownership:

# Data directory
sudo chown -R pilier:pilier /var/lib/pilier
sudo chmod 700 /var/lib/pilier

# Config files
sudo chown pilier:pilier /etc/pilier/*.json
sudo chmod 600 /etc/pilier/*.json

# Keystore (extra sensitive)
sudo chmod 600 /var/lib/pilier/chains/*/keystore/*

Automatic Security Updates

# Install unattended-upgrades
sudo apt install unattended-upgrades

# Enable
sudo dpkg-reconfigure --priority=low unattended-upgrades

# Configure (optional)
sudo nano /etc/apt/apt.conf.d/50unattended-upgrades

# Enable security updates only:
Unattended-Upgrade::Allowed-Origins {
"${distro_id}:${distro_codename}-security";
};

# Auto-reboot if required (with delay)
Unattended-Upgrade::Automatic-Reboot "true";
Unattended-Upgrade::Automatic-Reboot-Time "03:00";

Performance Tuning

System Limits

Increase file descriptor limits:

sudo nano /etc/security/limits.conf

Add:

pilier soft nofile 65536
pilier hard nofile 65536
pilier soft nproc 4096
pilier hard nproc 4096

Network Tuning

sudo nano /etc/sysctl.conf

Add:

# Increase TCP buffer sizes
net.core.rmem_max = 134217728
net.core.wmem_max = 134217728
net.ipv4.tcp_rmem = 4096 87380 67108864
net.ipv4.tcp_wmem = 4096 65536 67108864

# Increase connection backlog
net.core.somaxconn = 4096
net.ipv4.tcp_max_syn_backlog = 4096

# Enable TCP Fast Open
net.ipv4.tcp_fastopen = 3

Apply:

sudo sysctl -p

Disk I/O Optimization

For NVMe SSDs:

# Check current scheduler
cat /sys/block/nvme0n1/queue/scheduler

# Set to none (for NVMe, best performance)
echo none | sudo tee /sys/block/nvme0n1/queue/scheduler

# Make persistent (add to /etc/rc.local or systemd service)

Troubleshooting

Node Won't Start

Check logs:

sudo journalctl -u pilier -n 50 --no-pager

Common errors:

"Address already in use" → Port 30333 occupied
"Permission denied" → Wrong file ownership
"Database corruption" → Delete /var/lib/pilier/chains/*/db/
"Failed to connect" → Firewall blocking port 30333

High CPU Usage

# Check CPU usage
top -u pilier

# Check which process
ps aux | grep pilier-node

# Reduce logging verbosity
# In systemd service: RUST_LOG=warn (instead of debug)

# Check if compiling Wasm unnecessarily
# Ensure --wasm-execution Compiled flag is set

High Memory Usage

# Check memory
free -h

# Pilier node typically uses:
# - 2-4 GB (normal operation)
# - 8-16 GB (sync, heavy load)
# - 20+ GB (memory leak - bug)

# If >20 GB sustained:
# 1. Check for known issues (GitHub)
# 2. Restart node: sudo systemctl restart pilier
# 3. Report bug if persistent

Disk Space Issues

# Check disk usage
df -h /var/lib/pilier
du -sh /var/lib/pilier/chains/*/db/

# Solutions:
# 1. Enable pruning (if running archive mode)
# 2. Purge old chain data (if testnet reset)
# 3. Upgrade disk size

Backup & Recovery

What to Backup

Critical (must backup):

Session keys:
/var/lib/pilier/chains/*/keystore/

Configuration:
/etc/pilier/*.json
/etc/systemd/system/pilier.service

Not critical (can re-sync):

Blockchain state:
/var/lib/pilier/chains/*/db/

Backup Script

#!/bin/bash
# Backup Pilier validator keys and config

BACKUP_DIR="/secure-backup/pilier-$(date +%Y%m%d-%H%M%S)"
KEYSTORE="/var/lib/pilier/chains/pilier_testnet/keystore"
CONFIG="/etc/pilier"

mkdir -p "$BACKUP_DIR"

# Backup keystore
cp -r "$KEYSTORE" "$BACKUP_DIR/"

# Backup config
cp -r "$CONFIG" "$BACKUP_DIR/"

# Backup systemd service
cp /etc/systemd/system/pilier.service "$BACKUP_DIR/"

# Create encrypted archive
tar -czf "$BACKUP_DIR.tar.gz" "$BACKUP_DIR/"
gpg --symmetric --cipher-algo AES256 "$BACKUP_DIR.tar.gz"

# Clean up
rm -rf "$BACKUP_DIR" "$BACKUP_DIR.tar.gz"

echo "Backup created: $BACKUP_DIR.tar.gz.gpg"

Next Steps

After configuring your node:

  1. Generate session keysSession Keys Guide
  2. Register as validatorValidator Registration
  3. Setup monitoringMonitoring Guide

For genesis validators:

Initial Launch Guide

For new validators:

Join Network Guide


Support

Configuration issues?


Document version: 1.0

Last updated: 2026-01-12