Add multi-site Caddy helpers and document usage

- add startup/shutdown scripts that render a Caddyfile from JSON config and run health checks

- add Python utilities and a sample sites.json for declarative multi-site configuration

- document the workflow and ignore generated Caddy state artifacts

- normalize double-quote style across challenge workflow controllers, nodes, and tests
This commit is contained in:
Joey Yakimowich-Payne 2025-10-15 22:03:56 -06:00
commit 6038fc25f5
No known key found for this signature in database
GPG key ID: 6BFE655FA5ABD1E1
26 changed files with 1018 additions and 168 deletions

View file

@ -149,5 +149,3 @@ class ChallengeDetailApi(Resource):
db.session.delete(c)
db.session.commit()
return {"result": "success"}, 204

View file

@ -142,4 +142,3 @@ class RedBluePairingsApi(Resource):
for r in rows
],
}

View file

@ -22,16 +22,18 @@ class ChallengeListApi(Resource):
select(Site).where(Site.app_id == c.app_id, Site.status == "normal")
).scalar_one_or_none()
site_code = site.code if site else None
items.append({
"id": c.id,
"name": c.name,
"description": c.description,
"goal": c.goal,
"app_id": c.app_id,
"workflow_id": c.workflow_id,
"app_mode": app.mode if app else None,
"app_site_code": site_code,
})
items.append(
{
"id": c.id,
"name": c.name,
"description": c.description,
"goal": c.goal,
"app_id": c.app_id,
"workflow_id": c.workflow_id,
"app_mode": app.mode if app else None,
"app_site_code": site_code,
}
)
return {"result": "success", "data": items}
@ -73,28 +75,27 @@ class ChallengeLeaderboardApi(Resource):
if not challenge:
return {"result": "not_found"}, 404
scoring_strategy = challenge.scoring_strategy or 'highest_rating'
scoring_strategy = challenge.scoring_strategy or "highest_rating"
# Build query based on scoring strategy
q = db.session.query(ChallengeAttempt).filter(
ChallengeAttempt.challenge_id == str(challenge_id),
ChallengeAttempt.succeeded.is_(True)
ChallengeAttempt.challenge_id == str(challenge_id), ChallengeAttempt.succeeded.is_(True)
)
# Apply sorting based on strategy
if scoring_strategy == 'first':
if scoring_strategy == "first":
# Earliest successful attempt wins
q = q.order_by(ChallengeAttempt.created_at.asc())
elif scoring_strategy == 'fastest':
elif scoring_strategy == "fastest":
# Lowest elapsed_ms wins
q = q.order_by(ChallengeAttempt.elapsed_ms.asc().nullslast(), ChallengeAttempt.created_at.asc())
elif scoring_strategy == 'fewest_tokens':
elif scoring_strategy == "fewest_tokens":
# Lowest tokens_total wins
q = q.order_by(ChallengeAttempt.tokens_total.asc().nullslast(), ChallengeAttempt.created_at.asc())
elif scoring_strategy == 'highest_rating':
elif scoring_strategy == "highest_rating":
# Highest judge_rating wins, ties broken by earliest
q = q.order_by(ChallengeAttempt.judge_rating.desc().nullslast(), ChallengeAttempt.created_at.asc())
elif scoring_strategy == 'custom':
elif scoring_strategy == "custom":
# Custom score field (computed by plugin)
q = q.order_by(ChallengeAttempt.score.desc().nullslast(), ChallengeAttempt.created_at.asc())
else:
@ -116,5 +117,3 @@ class ChallengeLeaderboardApi(Resource):
for r in rows
]
return {"result": "success", "data": data}

View file

@ -82,4 +82,3 @@ class RedBlueLeaderboardApi(Resource):
"blue_ratio": (float(blue or 0.0) / total) if total else 0.0,
}
return {"result": "success", "data": data}

View file

@ -8,15 +8,15 @@ from extensions.ext_database import db
from services.account_service import RegisterService
@web_ns.route('/register')
@web_ns.route("/register")
class WebRegisterApi(Resource):
def post(self):
payload = request.get_json(force=True) or {}
email = payload.get('email')
name = payload.get('name') or 'Player'
password = payload.get('password')
email = payload.get("email")
name = payload.get("name") or "Player"
password = payload.get("password")
if not email or not password:
return {'result': 'bad_request'}, 400
return {"result": "bad_request"}, 400
account = RegisterService.register(
email=email,
name=name,
@ -25,6 +25,4 @@ class WebRegisterApi(Resource):
create_workspace_required=False,
)
db.session.commit()
return {'result': 'success', 'data': {'account_id': account.id}}, 201
return {"result": "success", "data": {"account_id": account.id}}, 201

View file

@ -50,8 +50,8 @@ class AnswerNode(Node):
existing_outputs = self.graph_runtime_state.outputs
merged_outputs: dict[str, Any] = {
**existing_outputs,
'answer': segments.markdown,
'files': ArrayFileSegment(value=files),
"answer": segments.markdown,
"files": ArrayFileSegment(value=files),
}
self.graph_runtime_state.outputs = merged_outputs
return NodeRunResult(

View file

@ -1,3 +1,3 @@
from .node import ChallengeEvaluatorNode
__all__ = ['ChallengeEvaluatorNode']
__all__ = ["ChallengeEvaluatorNode"]

View file

@ -32,19 +32,19 @@ class ChallengeEvaluatorNode(Node):
self._config: dict[str, Any] = data
def _get_error_strategy(self) -> ErrorStrategy | None:
return getattr(self._node_data, 'error_strategy', None)
return getattr(self._node_data, "error_strategy", None)
def _get_retry_config(self) -> RetryConfig:
return getattr(self._node_data, 'retry_config', RetryConfig())
return getattr(self._node_data, "retry_config", RetryConfig())
def _get_title(self) -> str:
return getattr(self._node_data, 'title', 'Challenge Evaluator')
return getattr(self._node_data, "title", "Challenge Evaluator")
def _get_description(self) -> str | None:
return getattr(self._node_data, 'desc', None)
return getattr(self._node_data, "desc", None)
def _get_default_value_dict(self) -> dict[str, Any]:
return getattr(self._node_data, 'default_value_dict', {})
return getattr(self._node_data, "default_value_dict", {})
def get_base_node_data(self) -> BaseNodeData:
return self._node_data
@ -55,16 +55,16 @@ class ChallengeEvaluatorNode(Node):
def _run(self) -> NodeRunResult:
# Resolve response text from selector in config.inputs.response (frontend schema)
output_text = ''
output_text = ""
source_selector = None
inputs_cfg = self._config.get('inputs') or {}
inputs_cfg = self._config.get("inputs") or {}
if isinstance(inputs_cfg, dict):
source_selector = inputs_cfg.get('response')
source_selector = inputs_cfg.get("response")
# fallback to older key if any
source_selector = source_selector or self._config.get('value_selector')
source_selector = source_selector or self._config.get("value_selector")
# Check evaluation mode from config
evaluation_mode = self._config.get('evaluation_mode', 'rules')
evaluation_mode = self._config.get("evaluation_mode", "rules")
logger.info("ChallengeEvaluator - evaluation_mode: %s, source_selector: %s", evaluation_mode, source_selector)
@ -72,8 +72,8 @@ class ChallengeEvaluatorNode(Node):
is_judge_input = False
judge_passed = False
judge_rating = 0
judge_feedback_from_input = ''
output_text = ''
judge_feedback_from_input = ""
output_text = ""
def _segment_to_value(segment: Segment | None) -> Any:
if segment is None:
@ -86,13 +86,13 @@ class ChallengeEvaluatorNode(Node):
return getattr(segment, "value", segment)
# If evaluation_mode is 'llm-judge', try to read from upstream Judging LLM node
if evaluation_mode == 'llm-judge' and source_selector and len(source_selector) >= 1:
if evaluation_mode == "llm-judge" and source_selector and len(source_selector) >= 1:
try:
node_id = source_selector[0]
# Retrieve judge outputs as Segments and convert to primitive values
passed_segment = self.graph_runtime_state.variable_pool.get([node_id, 'judge_passed'])
rating_segment = self.graph_runtime_state.variable_pool.get([node_id, 'judge_rating'])
feedback_segment = self.graph_runtime_state.variable_pool.get([node_id, 'judge_feedback'])
passed_segment = self.graph_runtime_state.variable_pool.get([node_id, "judge_passed"])
rating_segment = self.graph_runtime_state.variable_pool.get([node_id, "judge_rating"])
feedback_segment = self.graph_runtime_state.variable_pool.get([node_id, "judge_feedback"])
potential_judge_passed = _segment_to_value(passed_segment)
potential_judge_rating = _segment_to_value(rating_segment)
@ -110,7 +110,7 @@ class ChallengeEvaluatorNode(Node):
is_judge_input = True
judge_passed = bool(potential_judge_passed)
judge_rating = int(potential_judge_rating or 0)
judge_feedback_from_input = str(potential_judge_feedback or '')
judge_feedback_from_input = str(potential_judge_feedback or "")
logger.info(
"ChallengeEvaluator - Judge input successfully read! passed=%s, rating=%s, feedback=%s",
judge_passed,
@ -126,28 +126,28 @@ class ChallengeEvaluatorNode(Node):
try:
segment = self.graph_runtime_state.variable_pool.get(source_selector)
if segment is None:
output_text = ''
elif hasattr(segment, 'text'):
output_text = ""
elif hasattr(segment, "text"):
output_text = segment.text
else:
output_text = str(_segment_to_value(segment) or '')
output_text = str(_segment_to_value(segment) or "")
except Exception:
output_text = ''
output_text = ""
# Evaluate based on mode
if is_judge_input:
ok = judge_passed
details = {
'mode': 'llm-judge',
'rating': judge_rating,
'feedback': judge_feedback_from_input,
"mode": "llm-judge",
"rating": judge_rating,
"feedback": judge_feedback_from_input,
}
else:
# Rules-based evaluation (only if not using judge input)
ok, details = ChallengeService.evaluate_outcome(output_text, self._config)
# optional persistence if config carries challenge_id
challenge_id = self._config.get('challenge_id')
challenge_id = self._config.get("challenge_id")
judge_feedback = judge_feedback_from_input if is_judge_input else None
judge_rating_value = judge_rating if is_judge_input else None
if challenge_id:
@ -160,8 +160,8 @@ class ChallengeEvaluatorNode(Node):
# Extract judge_rating from details if available (for highest_rating strategy)
if isinstance(details, dict):
judge_rating_raw = details.get('rating')
judge_feedback_raw = details.get('feedback')
judge_rating_raw = details.get("rating")
judge_feedback_raw = details.get("feedback")
if judge_rating_raw is not None:
judge_rating_value = int(judge_rating_raw)
if (
@ -180,23 +180,23 @@ class ChallengeEvaluatorNode(Node):
score = None
# If custom scoring is configured, compute score using plugin
if challenge and challenge.scoring_strategy == 'custom':
if challenge and challenge.scoring_strategy == "custom":
try:
metrics = {
'succeeded': ok,
'tokens_total': tokens_total,
'elapsed_ms': elapsed_ms,
'rating': judge_rating,
'created_at': int(time.time() * 1000),
"succeeded": ok,
"tokens_total": tokens_total,
"elapsed_ms": elapsed_ms,
"rating": judge_rating,
"created_at": int(time.time() * 1000),
}
ctx = {
'tenant_id': self.tenant_id,
'app_id': self.app_id,
'workflow_id': self.workflow_id,
'challenge_id': str(challenge_id),
'end_user_id': None,
'timeout_ms': 5000,
"tenant_id": self.tenant_id,
"app_id": self.app_id,
"workflow_id": self.workflow_id,
"challenge_id": str(challenge_id),
"end_user_id": None,
"timeout_ms": 5000,
}
result = ChallengeScorerService.score_with_plugin(
@ -207,11 +207,11 @@ class ChallengeEvaluatorNode(Node):
ctx=ctx,
)
score = result.get('score')
score = result.get("score")
logger.info(
"Custom scorer computed score: %s (details: %s)",
score,
result.get('details'),
result.get("details"),
)
except Exception as e:
logger.error("Custom scorer failed: %s", e, exc_info=True)
@ -237,34 +237,34 @@ class ChallengeEvaluatorNode(Node):
# Always provide all output variables to match frontend getOutputVars
outputs: dict[str, Any] = {
'challenge_succeeded': ok,
'judge_rating': judge_rating_value,
'judge_feedback': judge_feedback_from_input or judge_feedback or '',
'message': '',
"challenge_succeeded": ok,
"judge_rating": judge_rating_value,
"judge_feedback": judge_feedback_from_input or judge_feedback or "",
"message": "",
}
# Override with actual values if evaluator provides them
if isinstance(details, dict):
logger.debug("ChallengeEvaluator - details: %s", details)
if 'rating' in details:
outputs['judge_rating'] = details.get('rating')
if 'feedback' in details:
outputs['judge_feedback'] = details.get('feedback')
if 'message' in details:
outputs['message'] = details.get('message')
if "rating" in details:
outputs["judge_rating"] = details.get("rating")
if "feedback" in details:
outputs["judge_feedback"] = details.get("feedback")
if "message" in details:
outputs["message"] = details.get("message")
# If no explicit message, create one from evaluation details
if not outputs['message']:
if not outputs["message"]:
if ok:
outputs['message'] = f"Success: {details.get('mode', 'evaluation')} matched"
outputs["message"] = f"Success: {details.get('mode', 'evaluation')} matched"
else:
outputs['message'] = f"Failed: {details.get('mode', 'evaluation')} did not match"
outputs["message"] = f"Failed: {details.get('mode', 'evaluation')} did not match"
# also persist judge outputs onto runtime state so downstream consumers can access them
if outputs['judge_feedback']:
self.graph_runtime_state.set_output('judge_feedback', outputs['judge_feedback'])
if outputs['judge_rating'] is not None:
self.graph_runtime_state.set_output('judge_rating', outputs['judge_rating'])
self.graph_runtime_state.set_output('challenge_succeeded', outputs['challenge_succeeded'])
if outputs["judge_feedback"]:
self.graph_runtime_state.set_output("judge_feedback", outputs["judge_feedback"])
if outputs["judge_rating"] is not None:
self.graph_runtime_state.set_output("judge_rating", outputs["judge_rating"])
self.graph_runtime_state.set_output("challenge_succeeded", outputs["challenge_succeeded"])
return NodeRunResult(
status=WorkflowNodeExecutionStatus.SUCCEEDED,

View file

@ -32,19 +32,19 @@ class JudgingLLMNode(Node):
self._config: dict[str, Any] = data
def _get_error_strategy(self) -> ErrorStrategy | None:
return getattr(self._node_data, 'error_strategy', None)
return getattr(self._node_data, "error_strategy", None)
def _get_retry_config(self) -> RetryConfig:
return getattr(self._node_data, 'retry_config', RetryConfig())
return getattr(self._node_data, "retry_config", RetryConfig())
def _get_title(self) -> str:
return getattr(self._node_data, 'title', 'Judging LLM')
return getattr(self._node_data, "title", "Judging LLM")
def _get_description(self) -> str | None:
return getattr(self._node_data, 'desc', None)
return getattr(self._node_data, "desc", None)
def _get_default_value_dict(self) -> dict[str, Any]:
return getattr(self._node_data, 'default_value_dict', {})
return getattr(self._node_data, "default_value_dict", {})
def get_base_node_data(self) -> BaseNodeData:
return self._node_data
@ -55,12 +55,12 @@ class JudgingLLMNode(Node):
def _run(self) -> NodeRunResult:
# Placeholder with FE-compatible keys. Extract inputs for future wiring.
inputs_cfg = self._config.get('inputs') or {}
inputs_cfg = self._config.get("inputs") or {}
goal_selector = None
response_selector = None
if isinstance(inputs_cfg, dict):
goal_selector = inputs_cfg.get('goal')
response_selector = inputs_cfg.get('response')
goal_selector = inputs_cfg.get("goal")
response_selector = inputs_cfg.get("response")
# Attempt to read variables (not used in placeholder decision)
_ = None
@ -73,31 +73,32 @@ class JudgingLLMNode(Node):
pass
outputs = {
'judge_passed': False,
'judge_rating': 0,
'judge_feedback': '',
"judge_passed": False,
"judge_rating": 0,
"judge_feedback": "",
}
# If model config and rubric provided, invoke LLM synchronously to judge
judge_model = self._config.get('judge_model') or {}
rubric = self._config.get('rubric_prompt_template') or ''
provider = (judge_model or {}).get('provider')
model_name = (judge_model or {}).get('name')
completion_params = (judge_model or {}).get('completion_params') or {}
judge_model = self._config.get("judge_model") or {}
rubric = self._config.get("rubric_prompt_template") or ""
provider = (judge_model or {}).get("provider")
model_name = (judge_model or {}).get("name")
completion_params = (judge_model or {}).get("completion_params") or {}
def _segment_to_text(seg: Any) -> str:
try:
# Many variable types expose .text
if hasattr(seg, 'text'):
if hasattr(seg, "text"):
return str(seg.text)
if isinstance(seg, (dict, list)):
return json.dumps(seg, ensure_ascii=False)
return str(seg)
except Exception:
return ''
return ""
# Debug: log what we're checking
import logging
logger = logging.getLogger(__name__)
logger.info(
"JudgingLLM check - provider: %s, model: %s, rubric_len: %s, response_selector: %s",
@ -117,9 +118,7 @@ class JudgingLLMNode(Node):
json_template = '{"passed": boolean, "rating": number (0-10), "feedback": string}'
prompt_body = (
f"Goal:\n{goal_text}\n\n"
f"Response:\n{response_text}\n\n"
f"Return JSON with rating 0-10: {json_template}"
f"Goal:\n{goal_text}\n\nResponse:\n{response_text}\n\nReturn JSON with rating 0-10: {json_template}"
)
prompt_messages = [
@ -141,14 +140,14 @@ class JudgingLLMNode(Node):
user=self.user_id,
) # type: ignore
# Extract text from result
text_out = ''
content = getattr(result.message, 'content', '')
text_out = ""
content = getattr(result.message, "content", "")
if isinstance(content, str):
text_out = content
elif isinstance(content, list):
for item in content:
if getattr(item, 'type', None) == PromptMessageContentType.TEXT:
text_out += str(getattr(item, 'data', ''))
if getattr(item, "type", None) == PromptMessageContentType.TEXT:
text_out += str(getattr(item, "data", ""))
else:
text_out = str(content)
@ -162,22 +161,25 @@ class JudgingLLMNode(Node):
verdict = None
if isinstance(verdict, dict):
outputs['judge_passed'] = bool(verdict.get('passed'))
outputs['judge_rating'] = int(verdict.get('rating') or 0)
outputs['judge_feedback'] = str(verdict.get('feedback') or '')
outputs['judge_raw'] = json.dumps(verdict)
outputs["judge_passed"] = bool(verdict.get("passed"))
outputs["judge_rating"] = int(verdict.get("rating") or 0)
outputs["judge_feedback"] = str(verdict.get("feedback") or "")
outputs["judge_raw"] = json.dumps(verdict)
else:
# Fallback to simple rules if configured
success_type = self._config.get('success_type')
success_pattern = self._config.get('success_pattern')
success_type = self._config.get("success_type")
success_pattern = self._config.get("success_pattern")
if success_type and success_pattern:
ok, _ = ChallengeService.evaluate_outcome(response_text, {
'success_type': success_type,
'success_pattern': success_pattern,
})
outputs['judge_passed'] = ok
outputs['judge_rating'] = 10 if ok else 0
outputs['judge_feedback'] = 'passed by rules' if ok else 'failed by rules'
ok, _ = ChallengeService.evaluate_outcome(
response_text,
{
"success_type": success_type,
"success_pattern": success_pattern,
},
)
outputs["judge_passed"] = ok
outputs["judge_rating"] = 10 if ok else 0
outputs["judge_feedback"] = "passed by rules" if ok else "failed by rules"
except Exception as e:
# keep default outputs on error
logger.error("JudgingLLM error: %s", e, exc_info=True)
@ -185,4 +187,3 @@ class JudgingLLMNode(Node):
else:
logger.warning("JudgingLLM skipped - missing required fields")
return NodeRunResult(status=WorkflowNodeExecutionStatus.SUCCEEDED, outputs=outputs)

View file

@ -20,19 +20,19 @@ class TeamChallengeNode(Node):
self._config: dict[str, Any] = data
def _get_error_strategy(self) -> ErrorStrategy | None:
return getattr(self._node_data, 'error_strategy', None)
return getattr(self._node_data, "error_strategy", None)
def _get_retry_config(self) -> RetryConfig:
return getattr(self._node_data, 'retry_config', RetryConfig())
return getattr(self._node_data, "retry_config", RetryConfig())
def _get_title(self) -> str:
return getattr(self._node_data, 'title', 'Team Challenge')
return getattr(self._node_data, "title", "Team Challenge")
def _get_description(self) -> str | None:
return getattr(self._node_data, 'desc', None)
return getattr(self._node_data, "desc", None)
def _get_default_value_dict(self) -> dict[str, Any]:
return getattr(self._node_data, 'default_value_dict', {})
return getattr(self._node_data, "default_value_dict", {})
def get_base_node_data(self) -> BaseNodeData:
return self._node_data
@ -43,26 +43,24 @@ class TeamChallengeNode(Node):
def _run(self) -> NodeRunResult:
# Read inputs.team_choice for consistency with FE
inputs_cfg = self._config.get('inputs') or {}
team_choice = ''
inputs_cfg = self._config.get("inputs") or {}
team_choice = ""
if isinstance(inputs_cfg, dict):
team_choice_selector = inputs_cfg.get('team_choice')
team_choice_selector = inputs_cfg.get("team_choice")
if team_choice_selector:
try:
v = self.graph_runtime_state.variable_pool.get_value_by_selector(team_choice_selector)
team_choice = str(v or '')
team_choice = str(v or "")
except Exception:
team_choice = ''
team_choice = ""
outputs = {
'team': team_choice,
'judge_passed': False,
'judge_rating': 0,
'judge_feedback': '',
'categories': {},
'team_points': 0.0,
'total_points': 0.0,
"team": team_choice,
"judge_passed": False,
"judge_rating": 0,
"judge_feedback": "",
"categories": {},
"team_points": 0.0,
"total_points": 0.0,
}
return NodeRunResult(status=WorkflowNodeExecutionStatus.SUCCEEDED, outputs=outputs)

View file

@ -88,4 +88,3 @@ class ChallengeAttempt(Base):
nullable=False,
server_default=sa.func.current_timestamp(),
)

View file

@ -111,4 +111,3 @@ class TeamPairing(Base):
nullable=False,
server_default=sa.func.current_timestamp(),
)

View file

@ -60,5 +60,3 @@ class ChallengeService:
sess.add(attempt)
sess.commit()
return attempt

View file

@ -87,5 +87,3 @@ class RedBlueService:
sess.add(pairing)
sess.commit()
return pairing

View file

@ -10,5 +10,3 @@ class TestWebChallenges:
assert resp.status_code == 200
data = resp.get_json()
assert data["result"] == "success"

View file

@ -9,5 +9,3 @@ class TestWebRedBlueChallenges:
assert resp.status_code == 200
data = resp.get_json()
assert data["result"] == "success"

View file

@ -37,4 +37,3 @@ def test_record_attempt_creates_row(mocker):
assert isinstance(attempt, ChallengeAttempt)
session.add.assert_called_once()
session.commit.assert_called_once()

View file

@ -24,11 +24,7 @@ def test_submit_prompt_creates_submission(mocker):
def test_select_counterparty_submission_latest_active(mocker):
c = SimpleNamespace(id="cid")
session = mocker.MagicMock()
qs = (
session.query.return_value.filter.return_value.order_by.return_value
)
qs = session.query.return_value.filter.return_value.order_by.return_value
qs.first.return_value = SimpleNamespace(id="subid", team="blue")
sub = RedBlueService.select_counterparty_submission(challenge=c, team="red", session=session)
assert sub.team == "blue"

1
docker/caddy/.gitignore vendored Normal file
View file

@ -0,0 +1 @@
state/

99
docker/caddy/README.md Normal file
View file

@ -0,0 +1,99 @@
# Caddy Multi-Site Configuration
The Caddy helpers under `docker/` can now generate a single Caddyfile that serves multiple services. Instead of hard-coding upstreams in the shell script, each site to be exposed is described declaratively inside `docker/caddy/sites.json`.
## Layout
```
docker/
startup-caddy.sh # Generates Caddyfile + runs caddy start --config ...
shutdown-caddy.sh # Stops the running Caddy instance
caddy/
sites.json # Default site definitions (Dify + HackAPrompt example)
state/ # Generated Caddyfile, logs, pid files
```
Provide an alternative configuration file with `--config` or `CADDY_SITE_CONFIG` if you want a different set of sites.
```
# Use a staging configuration
./docker/startup-caddy.sh --config /path/to/staging-sites.json --regenerate
```
## Config File Format
The configuration file is JSON with a top-level `sites` array. Each site object supports the keys below (all optional unless marked **required**):
| Key | Type | Description |
| --- | ---- | ----------- |
| `name` | string | Display name used in logs; defaults to `site`. |
| `address` | string **required** | Caddy address such as `example.com` or `:8080`. |
| `auto_https` | bool/string | Disable auto HTTPS when serving plain HTTP (default: `true`). |
| `https_redirect` | bool/string | Emit an HTTP→HTTPS redirect block when a hostname is present. |
| `acme_challenge` | bool/string | Adds a handler for `/.well-known/acme-challenge/*`. |
| `headers` | object | Key/value pairs emitted inside a `header { ... }` block. |
| `health_path` | string | Path exposed as a simple `respond` handler (default `/health`). |
| `health_check` | object | `{ "url": "https://...", "skip": false }` controls post-start checks. |
| `app_url` | string | Logged after startup for operator convenience. |
| `log_file` | string | Custom log path; defaults to `$LOG_DIR/<name>-access.log`. |
| `api_routes` | array | Each entry `{ "path": "/api/*", "upstream": "host:port" }` creates a `handle` block with `reverse_proxy`. |
| `static_routes` | array | Each entry `{ "path": "/images/*", "root": "/var/www" }`; add `"browse": true` to enable directory listing. |
| `frontend` | object | `{"type":"reverse_proxy","upstream":"host:port"}` or `{"type":"static","root":"/dir","try_files":[...]} `. |
| `cache_static` | object | `{ "paths": ["*.js", ...], "header": "public, max-age=..." }`. |
All string values are expanded with `os.path.expandvars`, which means you can reference environment variables—`"${LOG_DIR}/dify-access.log"`, `"${DIFY_API_UPSTREAM:-127.0.0.1:5001}"`, etc.
## Example
```
{
"sites": [
{
"name": "dify",
"address": "${DIFY_ADDRESS:-:80}",
"https_redirect": "${DIFY_REDIRECT:-false}",
"api_routes": [
{ "path": "/api/*", "upstream": "${DIFY_API_UPSTREAM:-127.0.0.1:5001}" }
],
"frontend": {
"type": "reverse_proxy",
"upstream": "${DIFY_FRONTEND_UPSTREAM:-127.0.0.1:3000}"
},
"health_check": {
"url": "${DIFY_HEALTH_URL:-http://127.0.0.1/health}",
"skip": "${DIFY_SKIP_HEALTHCHECK:-false}"
},
"app_url": "${DIFY_APP_URL:-http://127.0.0.1}"
},
{
"name": "hackaprompt",
"address": "${HACKAPROMPT_ADDRESS:-:8080}",
"frontend": {
"type": "static",
"root": "${HACKAPROMPT_FRONTEND_ROOT:-/opt/hackaprompt-chat-viewer/frontend}",
"try_files": ["{path}", "{path}/", "/index.html"]
},
"api_routes": [
{ "path": "/api/*", "upstream": "${HACKAPROMPT_API_UPSTREAM:-127.0.0.1:5002}" }
],
"static_routes": [
{ "path": "/images/*", "root": "${HACKAPROMPT_APP_ROOT:-/opt/hackaprompt-chat-viewer}" }
]
}
]
}
```
## Health Checks & Logs
After Caddy starts, the script runs a curl-based health check for each site unless `--skip-healthcheck` is passed globally or the site entry sets `"skip": true`. Each check waits up to 30 seconds.
Logs are written to `docker/caddy/state/logs/` by default. Adjust `log_file` per site if you want a different location.
## Stopping Caddy
```
./docker/shutdown-caddy.sh
```
The shutdown helper remains unchanged; it simply reads the PID file and stops the running Caddy instance.

11
docker/caddy/__init__.py Normal file
View file

@ -0,0 +1,11 @@
"""Utilities for generating multi-site Caddy configurations."""
from .render_caddy import SiteMetadata, render_sites, render_metadata_lines
from .load_metadata import load_metadata
__all__ = [
"SiteMetadata",
"render_sites",
"render_metadata_lines",
"load_metadata",
]

View file

@ -0,0 +1,29 @@
"""Load site metadata without rewriting the Caddyfile."""
from __future__ import annotations
import json
import os
from pathlib import Path
from typing import List
from .render_caddy import SiteMetadata, _expand, _to_bool
def load_metadata(config_path: Path) -> List[SiteMetadata]:
with config_path.open("r", encoding="utf-8") as fh:
data = json.load(fh)
sites = data.get("sites", [])
metadata: List[SiteMetadata] = []
for raw_site in sites:
site = _expand(raw_site)
health_cfg = site.get("health_check") or {}
metadata.append(
SiteMetadata(
name=site.get("name", "site"),
health_url=health_cfg.get("url", ""),
skip_healthcheck=_to_bool(health_cfg.get("skip"), default=False),
app_url=site.get("app_url", ""),
)
)
return metadata

View file

@ -0,0 +1,296 @@
"""Render a multi-site Caddyfile from JSON configuration.
This module is invoked by docker/startup-caddy.sh. It reads a JSON config,
expands environment variables, and writes out a Caddyfile plus site metadata
used for health checks and logging.
"""
from __future__ import annotations
import json
import os
import re
from dataclasses import dataclass
from pathlib import Path
from typing import Iterable, Iterator, List, Tuple
from urllib.parse import urlparse
@dataclass
class SiteMetadata:
"""Compact representation of site health/check details."""
name: str
health_url: str
skip_healthcheck: bool
app_url: str
def serialize(self) -> str:
return "|".join(
[
self.name,
self.health_url,
"true" if self.skip_healthcheck else "false",
self.app_url,
]
)
DEFAULT_PATTERN = re.compile(r"\${([^}:]+):-([^}]*)}")
def _expand_string(raw: str) -> str:
def replace(match: re.Match[str]) -> str:
var, default = match.group(1), match.group(2)
current = os.environ.get(var)
return current if current not in (None, "") else default
substituted = DEFAULT_PATTERN.sub(replace, raw)
return os.path.expandvars(substituted)
def _expand(value): # type: ignore[no-untyped-def]
if isinstance(value, str):
return _expand_string(value)
if isinstance(value, list):
return [_expand(v) for v in value]
if isinstance(value, dict):
return {k: _expand(v) for k, v in value.items()}
return value
def _to_bool(value, default: bool = False) -> bool:
if value is None:
return default
if isinstance(value, bool):
return value
if isinstance(value, (int, float)):
return bool(value)
value_str = str(value).strip().lower()
if value_str in {"true", "1", "yes", "y", "on"}:
return True
if value_str in {"false", "0", "no", "n", "off"}:
return False
return default
def _iter_headers(value) -> Iterator[Tuple[str, str]]: # type: ignore[no-untyped-def]
if isinstance(value, dict):
for key, val in value.items():
if key and val is not None:
yield str(key), str(val)
return
if isinstance(value, (list, tuple)):
for item in value:
if isinstance(item, dict):
name = item.get("name") or item.get("header")
val = item.get("value")
if name and val is not None:
yield str(name), str(val)
elif isinstance(item, (list, tuple)) and len(item) == 2:
name, val = item
if name and val is not None:
yield str(name), str(val)
elif value is not None:
raise ValueError("Unsupported header specification: %r" % (value,))
def render_sites(config_path: Path, log_dir: Path) -> tuple[str, List[SiteMetadata]]:
with config_path.open("r", encoding="utf-8") as fh:
data = json.load(fh)
raw_sites = data.get("sites", [])
if not raw_sites:
raise ValueError("No sites defined in configuration file")
log_dir.mkdir(parents=True, exist_ok=True)
blocks: List[str] = []
metadata: List[SiteMetadata] = []
for raw_site in raw_sites:
site = _expand(raw_site)
name = site.get("name") or "site"
address = site.get("address")
if not address:
raise ValueError(f"Site '{name}' is missing the required 'address' field")
https_redirect = _to_bool(site.get("https_redirect"), default=False)
acme_challenge = _to_bool(site.get("acme_challenge"), default=False)
address = address.strip()
log_file = site.get("log_file")
log_path = Path(log_file) if log_file else log_dir / f"{name}-access.log"
if not log_path.is_absolute():
log_path = log_dir / log_path
log_path.parent.mkdir(parents=True, exist_ok=True)
lines: List[str] = [f"{address} {{"]
lines.append(" encode gzip")
headers = site.get("headers") or {}
if headers:
lines.append(" header {")
for key, value in headers.items():
lines.append(f' {key} "{value}"')
lines.append(" }")
health_path = site.get("health_path", "/health")
if health_path:
lines.append(f' respond {health_path} "OK" 200')
if acme_challenge:
lines.extend(
[
" handle /.well-known/acme-challenge/* {",
" file_server",
" }",
]
)
for route in site.get("api_routes") or []:
path = route.get("path")
upstream = route.get("upstream")
if not path or not upstream:
continue
headers_up = list(_iter_headers(route.get("headers_up")))
headers_down = list(_iter_headers(route.get("headers_down")))
lines.append(f" handle {path} {{")
if headers_up or headers_down:
lines.append(f" reverse_proxy {upstream} {{")
for key, val in headers_up:
lines.append(f" header_up {key} {val}")
for key, val in headers_down:
lines.append(f" header_down {key} {val}")
lines.append(" }")
else:
lines.append(f" reverse_proxy {upstream}")
lines.append(" }")
for route in site.get("static_routes") or []:
path = route.get("path")
root = route.get("root")
if not path or not root:
continue
browse = _to_bool(route.get("browse"))
lines.append(f" handle {path} {{")
lines.append(f" root * {root}")
lines.append(" file_server browse" if browse else " file_server")
lines.append(" }")
if site.get("explore_route"):
explore = site["explore_route"]
upstream = explore.get("upstream")
if upstream:
lines.append(" handle /explore* {")
lines.append(f" reverse_proxy {upstream}")
lines.append(" }")
if site.get("hooks_route"):
hook = site["hooks_route"]
path = hook.get("path", "/e/*")
upstream = hook.get("upstream")
if upstream:
headers_up = list(_iter_headers(hook.get("headers_up")))
headers_down = list(_iter_headers(hook.get("headers_down")))
lines.append(f" handle {path} {{")
if headers_up or headers_down:
lines.append(f" reverse_proxy {upstream} {{")
for key, val in headers_up:
lines.append(f" header_up {key} {val}")
for key, val in headers_down:
lines.append(f" header_down {key} {val}")
lines.append(" }")
else:
lines.append(f" reverse_proxy {upstream}")
lines.append(" }")
frontend = site.get("frontend") or {}
frontend_type = frontend.get("type", "reverse_proxy")
if frontend_type == "reverse_proxy":
upstream = frontend.get("upstream")
if upstream:
rp_lines = [" handle {"]
headers_up = list(_iter_headers(frontend.get("headers_up")))
headers_down = list(_iter_headers(frontend.get("headers_down")))
if headers_up or headers_down:
rp_lines.append(f" reverse_proxy {upstream} {{")
for key, val in headers_up:
rp_lines.append(f" header_up {key} {val}")
for key, val in headers_down:
rp_lines.append(f" header_down {key} {val}")
rp_lines.append(" }")
else:
rp_lines.append(f" reverse_proxy {upstream}")
rp_lines.append(" }")
lines.extend(rp_lines)
elif frontend_type == "static":
root = frontend.get("root")
if root:
lines.append(" handle {")
lines.append(f" root * {root}")
try_files = frontend.get("try_files") or []
if try_files:
lines.append(" try_files " + " ".join(try_files))
lines.append(" file_server")
lines.append(" }")
cache_static = site.get("cache_static") or {}
cache_paths = cache_static.get("paths") or []
cache_header = cache_static.get("header")
if cache_paths and cache_header:
lines.append(" @static {")
lines.append(" path " + " ".join(cache_paths))
lines.append(" }")
lines.append(f' header @static Cache-Control "{cache_header}"')
lines.extend(
[
" log {",
f" output file {log_path} {{",
" roll_size 100mb",
" roll_keep 10",
" roll_keep_for 720h",
" }",
" format json",
" }",
"}",
]
)
blocks.append("\n".join(lines))
host_for_redirect = ""
redirect_port = None
if not address.startswith(":"):
parsed_address = urlparse(address if "://" in address else f"https://{address}")
host_for_redirect = parsed_address.hostname or ""
redirect_port = parsed_address.port
if https_redirect and host_for_redirect:
port_segment = f":{redirect_port}" if redirect_port and redirect_port != 443 else ""
blocks.append(
"\n".join(
[
f"http://{host_for_redirect}{port_segment} {{",
f" redir https://{host_for_redirect}{{uri}}",
"}",
]
)
)
health_cfg = site.get("health_check") or {}
metadata.append(
SiteMetadata(
name=name,
health_url=health_cfg.get("url", ""),
skip_healthcheck=_to_bool(health_cfg.get("skip"), default=False),
app_url=site.get("app_url", ""),
)
)
return "\n\n".join(blocks) + "\n", metadata
def render_metadata_lines(entries: Iterable[SiteMetadata]) -> List[str]:
return [entry.serialize() for entry in entries]

137
docker/caddy/sites.json Normal file
View file

@ -0,0 +1,137 @@
{
"sites": [
{
"name": "dify",
"address": "${DIFY_ADDRESS:-dify.jojomaw.com}",
"https_redirect": "${DIFY_REDIRECT:-true}",
"auto_https": "${DIFY_AUTO_HTTPS:-true}",
"health_path": "/health",
"health_check": {
"url": "${DIFY_HEALTH_URL:-http://127.0.0.1/health}",
"skip": "${DIFY_SKIP_HEALTHCHECK:-false}"
},
"app_url": "${DIFY_APP_URL:-http://127.0.0.1}",
"log_file": "${LOG_DIR}/dify-access.log",
"acme_challenge": "${DIFY_ACME_CHALLENGE:-false}",
"headers": {
"X-Frame-Options": "SAMEORIGIN",
"X-Content-Type-Options": "nosniff",
"Referrer-Policy": "strict-origin-when-cross-origin",
"X-XSS-Protection": "1; mode=block",
"Strict-Transport-Security": "max-age=31536000; includeSubDomains"
},
"api_routes": [
{
"path": "/api/*",
"upstream": "${DIFY_API_UPSTREAM:-127.0.0.1:5001}"
},
{
"path": "/v1/*",
"upstream": "${DIFY_API_UPSTREAM:-127.0.0.1:5001}"
},
{
"path": "/console/api/*",
"upstream": "${DIFY_API_UPSTREAM:-127.0.0.1:5001}"
},
{
"path": "/files/*",
"upstream": "${DIFY_FILES_UPSTREAM:-127.0.0.1:5001}"
},
{
"path": "/mcp/*",
"upstream": "${DIFY_MCP_UPSTREAM:-127.0.0.1:5001}"
}
],
"static_routes": [
{
"path": "/explore/*",
"root": "${DIFY_EXPLORE_ROOT:-/app/web}",
"browse": false
}
],
"frontend": {
"type": "reverse_proxy",
"upstream": "${DIFY_FRONTEND_UPSTREAM:-127.0.0.1:3000}"
},
"hooks_route": {
"path": "/e/*",
"upstream": "${DIFY_PLUGIN_DAEMON_UPSTREAM:-127.0.0.1:5002}",
"headers_up": [
{"name": "Dify-Hook-Url", "value": "${DIFY_HOOK_URL_HEADER:-{scheme}://{host}{uri}}"}
]
},
"cache_static": {
"paths": [
"*.js",
"*.css",
"*.png",
"*.jpg",
"*.jpeg",
"*.gif",
"*.ico",
"*.svg",
"*.woff",
"*.woff2",
"*.ttf",
"*.eot"
],
"header": "public, max-age=31536000, immutable"
}
},
{
"name": "hackaprompt",
"address": "${HACKAPROMPT_ADDRESS:-chat.jojomaw.com}",
"https_redirect": "${HACKAPROMPT_REDIRECT:-true}",
"auto_https": "${HACKAPROMPT_AUTO_HTTPS:-true}",
"health_path": "/health",
"health_check": {
"url": "${HACKAPROMPT_HEALTH_URL:-http://127.0.0.1:8080/health}",
"skip": "${HACKAPROMPT_SKIP_HEALTHCHECK:-false}"
},
"app_url": "${HACKAPROMPT_APP_URL:-http://127.0.0.1:8080}",
"log_file": "${LOG_DIR}/hackaprompt-access.log",
"acme_challenge": "${HACKAPROMPT_ACME_CHALLENGE:-false}",
"headers": {
"X-Frame-Options": "SAMEORIGIN",
"X-Content-Type-Options": "nosniff",
"Referrer-Policy": "strict-origin-when-cross-origin",
"X-XSS-Protection": "1; mode=block",
"Strict-Transport-Security": "max-age=31536000; includeSubDomains"
},
"api_routes": [
{
"path": "/api/*",
"upstream": "${HACKAPROMPT_API_UPSTREAM:-127.0.0.1:5501}"
}
],
"static_routes": [
{
"path": "/images/*",
"root": "${HACKAPROMPT_APP_ROOT:-/opt/hackaprompt-chat-viewer}"
}
],
"frontend": {
"type": "static",
"root": "${HACKAPROMPT_FRONTEND_ROOT:-/opt/hackaprompt-chat-viewer/frontend}",
"try_files": ["{path}", "{path}/", "/index.html"]
},
"cache_static": {
"paths": [
"*.js",
"*.css",
"*.png",
"*.jpg",
"*.jpeg",
"*.gif",
"*.ico",
"*.svg",
"*.woff",
"*.woff2",
"*.ttf",
"*.eot"
],
"header": "public, max-age=31536000, immutable"
}
}
]
}

49
docker/shutdown-caddy.sh Normal file
View file

@ -0,0 +1,49 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
STATE_DIR="$SCRIPT_DIR/caddy/state"
PID_FILE="$STATE_DIR/pids/caddy.pid"
STARTUP_LOG="$STATE_DIR/logs/startup.log"
SHUTDOWN_LOG="$STATE_DIR/logs/shutdown.log"
mkdir -p "$(dirname "$PID_FILE")" "$(dirname "$SHUTDOWN_LOG")"
log() {
echo "$(date '+%Y-%m-%d %H:%M:%S') [INFO] $1" | tee -a "$SHUTDOWN_LOG"
}
error() {
echo "$(date '+%Y-%m-%d %H:%M:%S') [ERROR] $1" | tee -a "$SHUTDOWN_LOG" >&2
}
if [ ! -f "$PID_FILE" ]; then
log "No Caddy PID file found; nothing to stop."
exit 0
fi
PID="$(cat "$PID_FILE")"
if ! kill -0 "$PID" >/dev/null 2>&1; then
log "Caddy process (PID $PID) not running; removing stale PID file."
rm -f "$PID_FILE"
exit 0
fi
log "Stopping Caddy (PID $PID)..."
if caddy stop --pidfile "$PID_FILE" >/dev/null 2>&1; then
log "Caddy stopped successfully."
rm -f "$PID_FILE"
else
error "Failed to stop Caddy via caddy stop; attempting manual kill."
kill "$PID" >/dev/null 2>&1 || true
sleep 2
if kill -0 "$PID" >/dev/null 2>&1; then
error "Unable to stop Caddy (PID $PID)."
exit 1
fi
log "Caddy process terminated."
rm -f "$PID_FILE"
fi
log "Shutdown complete. Logs available at $STARTUP_LOG and $SHUTDOWN_LOG."

251
docker/startup-caddy.sh Normal file
View file

@ -0,0 +1,251 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
CADDY_DIR="$SCRIPT_DIR/caddy"
STATE_DIR="$CADDY_DIR/state"
LOG_DIR="$STATE_DIR/logs"
PID_DIR="$STATE_DIR/pids"
CADDYFILE_PATH="$STATE_DIR/Caddyfile"
PID_FILE="$PID_DIR/caddy.pid"
STARTUP_LOG="$LOG_DIR/startup.log"
CONFIG_PATH="${CADDY_SITE_CONFIG:-$CADDY_DIR/sites.json}"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
mkdir -p "$LOG_DIR" "$PID_DIR"
export LOG_DIR
export PYTHONPATH="${PROJECT_ROOT}:${PYTHONPATH:-}"
# Provide defaults for configuration variables consumed by sites.json
export DIFY_ADDRESS="${DIFY_ADDRESS:-dify.jojomaw.com}"
export DIFY_REDIRECT="${DIFY_REDIRECT:-true}"
export DIFY_AUTO_HTTPS="${DIFY_AUTO_HTTPS:-true}"
export DIFY_HEALTH_URL="${DIFY_HEALTH_URL:-http://127.0.0.1/health}"
export DIFY_SKIP_HEALTHCHECK="${DIFY_SKIP_HEALTHCHECK:-false}"
export DIFY_APP_URL="${DIFY_APP_URL:-http://127.0.0.1}"
export DIFY_ACME_CHALLENGE="${DIFY_ACME_CHALLENGE:-true}"
export DIFY_API_UPSTREAM="${DIFY_API_UPSTREAM:-127.0.0.1:5001}"
export DIFY_FILES_UPSTREAM="${DIFY_FILES_UPSTREAM:-127.0.0.1:5001}"
export DIFY_MCP_UPSTREAM="${DIFY_MCP_UPSTREAM:-127.0.0.1:5001}"
export DIFY_FRONTEND_UPSTREAM="${DIFY_FRONTEND_UPSTREAM:-127.0.0.1:3000}"
export DIFY_EXPLORE_UPSTREAM="${DIFY_EXPLORE_UPSTREAM:-127.0.0.1:3000}"
export DIFY_PLUGIN_DAEMON_UPSTREAM="${DIFY_PLUGIN_DAEMON_UPSTREAM:-127.0.0.1:5002}"
export DIFY_HOOK_URL_HEADER="${DIFY_HOOK_URL_HEADER:-{scheme}://{host}{uri}}"
export HACKAPROMPT_ADDRESS="${HACKAPROMPT_ADDRESS:-chat.jojomaw.com}"
export HACKAPROMPT_REDIRECT="${HACKAPROMPT_REDIRECT:-true}"
export HACKAPROMPT_AUTO_HTTPS="${HACKAPROMPT_AUTO_HTTPS:-false}"
export HACKAPROMPT_HEALTH_URL="${HACKAPROMPT_HEALTH_URL:-http://127.0.0.1:8080/health}"
export HACKAPROMPT_SKIP_HEALTHCHECK="${HACKAPROMPT_SKIP_HEALTHCHECK:-false}"
export HACKAPROMPT_APP_URL="${HACKAPROMPT_APP_URL:-http://127.0.0.1:8080}"
export HACKAPROMPT_ACME_CHALLENGE="${HACKAPROMPT_ACME_CHALLENGE:-true}"
export HACKAPROMPT_API_UPSTREAM="${HACKAPROMPT_API_UPSTREAM:-127.0.0.1:5501}"
export HACKAPROMPT_APP_ROOT="${HACKAPROMPT_APP_ROOT:-/opt/hackaprompt-chat-viewer}"
export HACKAPROMPT_FRONTEND_ROOT="${HACKAPROMPT_FRONTEND_ROOT:-/opt/hackaprompt-chat-viewer/frontend}"
log() {
echo "$(date '+%Y-%m-%d %H:%M:%S') [INFO] $1" | tee -a "$STARTUP_LOG"
}
error() {
echo "$(date '+%Y-%m-%d %H:%M:%S') [ERROR] $1" | tee -a "$STARTUP_LOG" >&2
}
usage() {
cat <<'EOF'
Usage: startup-caddy.sh [options]
Options:
--config PATH Path to the multi-site configuration file (default: docker/caddy/sites.json).
--regenerate Force regeneration of the Caddy configuration file.
--skip-healthcheck Skip all post-start health checks.
--validate-only Regenerate the configuration, run 'caddy validate', and exit.
-h, --help Show this help message and exit.
Environment variables:
CADDY_SITE_CONFIG Alternative default path to the sites configuration file.
LOG_DIR Directory to hold generated log files.
EOF
}
FORCE_REGENERATE=false
GLOBAL_SKIP_HEALTHCHECK=false
VALIDATE_ONLY=false
while [[ $# -gt 0 ]]; do
case "$1" in
--config)
CONFIG_PATH="$2"
shift 2
;;
--regenerate)
FORCE_REGENERATE=true
shift
;;
--skip-healthcheck)
GLOBAL_SKIP_HEALTHCHECK=true
shift
;;
--validate-only)
VALIDATE_ONLY=true
FORCE_REGENERATE=true
shift
;;
-h|--help)
usage
exit 0
;;
*)
usage >&2
exit 1
;;
esac
done
if [ ! -f "$CONFIG_PATH" ]; then
error "Configuration file not found: $CONFIG_PATH"
exit 1
fi
if ! command -v caddy >/dev/null 2>&1; then
error "Caddy binary not found in PATH. Install Caddy before running this script."
exit 1
fi
if ! command -v python3 >/dev/null 2>&1; then
error "python3 is required to render the multi-site configuration"
exit 1
fi
should_regenerate() {
if [ "$FORCE_REGENERATE" = true ]; then
return 0
fi
if [ ! -f "$CADDYFILE_PATH" ]; then
return 0
fi
if ! caddy validate --config "$CADDYFILE_PATH" >/dev/null 2>&1; then
return 0
fi
return 1
}
render_caddyfile() {
local metadata_file="$1"
python3 - <<PY
from pathlib import Path
from docker.caddy.render_caddy import render_sites, render_metadata_lines
config_path = Path("${CONFIG_PATH}").expanduser().resolve()
log_dir = Path("${LOG_DIR}").expanduser().resolve()
output_path = Path("${CADDYFILE_PATH}").expanduser().resolve()
metadata_path = Path("${1}").expanduser().resolve()
caddy_text, metadata_entries = render_sites(config_path, log_dir)
output_path.write_text(caddy_text, encoding="utf-8")
metadata_path.write_text("\n".join(render_metadata_lines(metadata_entries)) + "\n", encoding="utf-8")
PY
}
load_metadata() {
python3 - <<PY
from pathlib import Path
from docker.caddy.load_metadata import load_metadata
config_path = Path("${CONFIG_PATH}").expanduser().resolve()
for entry in load_metadata(config_path):
print(entry.serialize())
PY
}
SITE_METADATA=()
if should_regenerate; then
log "Rendering Caddy configuration from $CONFIG_PATH"
metadata_temp="$(mktemp)"
if render_caddyfile "$metadata_temp"; then
if command -v caddy >/dev/null 2>&1; then
log "Formatting generated configuration..."
caddy fmt --overwrite "$CADDYFILE_PATH" >/dev/null 2>&1 || true
fi
mapfile -t SITE_METADATA < "$metadata_temp"
rm -f "$metadata_temp"
log "Caddyfile written to $CADDYFILE_PATH"
else
rm -f "$metadata_temp"
error "Failed to generate Caddyfile"
exit 1
fi
else
log "Using existing Caddyfile; pass --regenerate to overwrite"
mapfile -t SITE_METADATA < <(load_metadata)
fi
if [ -f "$PID_FILE" ] && kill -0 "$(cat "$PID_FILE")" >/dev/null 2>&1; then
error "Caddy appears to be running already (PID $(cat "$PID_FILE"))"
exit 1
fi
if [ "$VALIDATE_ONLY" = true ]; then
log "Validating generated configuration..."
if caddy validate --config "$CADDYFILE_PATH"; then
log "Configuration validated successfully"
exit 0
else
error "Configuration validation failed"
exit 1
fi
fi
log "Starting Caddy..."
if caddy start --config "$CADDYFILE_PATH" --pidfile "$PID_FILE" >/dev/null 2>&1; then
if [ ! -f "$PID_FILE" ]; then
error "Caddy reported success but no PID file was created"
exit 1
fi
log "Caddy started successfully (PID $(cat "$PID_FILE"))"
else
error "Failed to start Caddy"
exit 1
fi
if [ "$GLOBAL_SKIP_HEALTHCHECK" = true ]; then
log "Skipping all health checks (global override)"
else
for entry in "${SITE_METADATA[@]}"; do
IFS='|' read -r site_name health_url skip_flag app_url <<< "$entry"
skip_flag=$(printf '%s' "$skip_flag" | tr '[:upper:]' '[:lower:]')
if [ "$skip_flag" = "true" ]; then
log "Skipping health check for $site_name (configuration)"
continue
fi
if [ -z "$health_url" ]; then
log "No health URL provided for $site_name; skipping check"
continue
fi
log "Waiting for $site_name to pass health check at $health_url"
success=false
for attempt in {1..30}; do
if curl -s -k "$health_url" >/dev/null 2>&1; then
log "$site_name reported healthy"
success=true
break
fi
sleep 1
done
if [ "$success" = false ]; then
error "$site_name failed health check"
exit 1
fi
done
fi
for entry in "${SITE_METADATA[@]}"; do
IFS='|' read -r site_name _ _ app_url <<< "$entry"
if [ -n "$app_url" ]; then
log "$site_name available at: $app_url"
else
log "$site_name available (no app URL configured)"
fi
fi