mirror of
https://github.com/caddyserver/caddy.git
synced 2025-04-19 18:58:50 +08:00
caddytls: Prefer managed wildcard certs over individual subdomain certs (#6959)
* caddytls: Prefer managed wildcard certs over individual subdomain certs * Repurpose force_automate as no_wildcard * Fix a couple bugs * Restore force_automate and use automate loader as wildcard override
This commit is contained in:
parent
35c8c2d92d
commit
1bfa111552
@ -633,12 +633,6 @@ func (st *ServerType) serversFromPairings(
|
||||
srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
|
||||
}
|
||||
srv.AutoHTTPS.IgnoreLoadedCerts = true
|
||||
|
||||
case "prefer_wildcard":
|
||||
if srv.AutoHTTPS == nil {
|
||||
srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
|
||||
}
|
||||
srv.AutoHTTPS.PreferWildcard = true
|
||||
}
|
||||
}
|
||||
|
||||
@ -706,16 +700,6 @@ func (st *ServerType) serversFromPairings(
|
||||
return specificity(iLongestHost) > specificity(jLongestHost)
|
||||
})
|
||||
|
||||
// collect all hosts that have a wildcard in them
|
||||
wildcardHosts := []string{}
|
||||
for _, sblock := range p.serverBlocks {
|
||||
for _, addr := range sblock.parsedKeys {
|
||||
if strings.HasPrefix(addr.Host, "*.") {
|
||||
wildcardHosts = append(wildcardHosts, addr.Host[2:])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var hasCatchAllTLSConnPolicy, addressQualifiesForTLS bool
|
||||
autoHTTPSWillAddConnPolicy := srv.AutoHTTPS == nil || !srv.AutoHTTPS.Disabled
|
||||
|
||||
@ -801,7 +785,13 @@ func (st *ServerType) serversFromPairings(
|
||||
cp.FallbackSNI = fallbackSNI
|
||||
}
|
||||
|
||||
// only append this policy if it actually changes something
|
||||
// only append this policy if it actually changes something,
|
||||
// or if the configuration explicitly automates certs for
|
||||
// these names (this is necessary to hoist a connection policy
|
||||
// above one that may manually load a wildcard cert that would
|
||||
// otherwise clobber the automated one; the code that appends
|
||||
// policies that manually load certs comes later, so they're
|
||||
// lower in the list)
|
||||
if !cp.SettingsEmpty() || mapContains(forceAutomatedNames, hosts) {
|
||||
srv.TLSConnPolicies = append(srv.TLSConnPolicies, cp)
|
||||
hasCatchAllTLSConnPolicy = len(hosts) == 0
|
||||
@ -841,18 +831,6 @@ func (st *ServerType) serversFromPairings(
|
||||
addressQualifiesForTLS = true
|
||||
}
|
||||
|
||||
// If prefer wildcard is enabled, then we add hosts that are
|
||||
// already covered by the wildcard to the skip list
|
||||
if addressQualifiesForTLS && srv.AutoHTTPS != nil && srv.AutoHTTPS.PreferWildcard {
|
||||
baseDomain := addr.Host
|
||||
if idx := strings.Index(baseDomain, "."); idx != -1 {
|
||||
baseDomain = baseDomain[idx+1:]
|
||||
}
|
||||
if !strings.HasPrefix(addr.Host, "*.") && slices.Contains(wildcardHosts, baseDomain) {
|
||||
srv.AutoHTTPS.SkipCerts = append(srv.AutoHTTPS.SkipCerts, addr.Host)
|
||||
}
|
||||
}
|
||||
|
||||
// predict whether auto-HTTPS will add the conn policy for us; if so, we
|
||||
// may not need to add one for this server
|
||||
autoHTTPSWillAddConnPolicy = autoHTTPSWillAddConnPolicy &&
|
||||
@ -1083,11 +1061,40 @@ func consolidateConnPolicies(cps caddytls.ConnectionPolicies) (caddytls.Connecti
|
||||
|
||||
// if they're exactly equal in every way, just keep one of them
|
||||
if reflect.DeepEqual(cps[i], cps[j]) {
|
||||
cps = append(cps[:j], cps[j+1:]...)
|
||||
cps = slices.Delete(cps, j, j+1)
|
||||
i--
|
||||
break
|
||||
}
|
||||
|
||||
// as a special case, if there are adjacent TLS conn policies that are identical except
|
||||
// by their matchers, and the matchers are specifically just ServerName ("sni") matchers
|
||||
// (by far the most common), we can combine them into a single policy
|
||||
if i == j-1 && len(cps[i].MatchersRaw) == 1 && len(cps[j].MatchersRaw) == 1 {
|
||||
if iSNIMatcherJSON, ok := cps[i].MatchersRaw["sni"]; ok {
|
||||
if jSNIMatcherJSON, ok := cps[j].MatchersRaw["sni"]; ok {
|
||||
// position of policies and the matcher criteria check out; if settings are
|
||||
// the same, then we can combine the policies; we have to unmarshal and
|
||||
// remarshal the matchers though
|
||||
if cps[i].SettingsEqual(*cps[j]) {
|
||||
var iSNIMatcher caddytls.MatchServerName
|
||||
if err := json.Unmarshal(iSNIMatcherJSON, &iSNIMatcher); err == nil {
|
||||
var jSNIMatcher caddytls.MatchServerName
|
||||
if err := json.Unmarshal(jSNIMatcherJSON, &jSNIMatcher); err == nil {
|
||||
iSNIMatcher = append(iSNIMatcher, jSNIMatcher...)
|
||||
cps[i].MatchersRaw["sni"], err = json.Marshal(iSNIMatcher)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("recombining SNI matchers: %v", err)
|
||||
}
|
||||
cps = slices.Delete(cps, j, j+1)
|
||||
i--
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if they have the same matcher, try to reconcile each field: either they must
|
||||
// be identical, or we have to be able to combine them safely
|
||||
if reflect.DeepEqual(cps[i].MatchersRaw, cps[j].MatchersRaw) {
|
||||
@ -1189,12 +1196,13 @@ func consolidateConnPolicies(cps caddytls.ConnectionPolicies) (caddytls.Connecti
|
||||
}
|
||||
}
|
||||
|
||||
cps = append(cps[:j], cps[j+1:]...)
|
||||
cps = slices.Delete(cps, j, j+1)
|
||||
i--
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return cps, nil
|
||||
}
|
||||
|
||||
|
@ -92,11 +92,9 @@ func (st ServerType) buildTLSApp(
|
||||
tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, catchAllAP)
|
||||
}
|
||||
|
||||
// collect all hosts that have a wildcard in them, and arent HTTP
|
||||
wildcardHosts := []string{}
|
||||
// hosts that have been explicitly marked to be automated,
|
||||
// even if covered by another wildcard
|
||||
forcedAutomatedNames := make(map[string]struct{})
|
||||
var wildcardHosts []string // collect all hosts that have a wildcard in them, and aren't HTTP
|
||||
forcedAutomatedNames := make(map[string]struct{}) // explicitly configured to be automated, even if covered by a wildcard
|
||||
|
||||
for _, p := range pairings {
|
||||
var addresses []string
|
||||
for _, addressWithProtocols := range p.addressesWithProtocols {
|
||||
@ -153,7 +151,7 @@ func (st ServerType) buildTLSApp(
|
||||
ap.OnDemand = true
|
||||
}
|
||||
|
||||
// collect hosts that are forced to be automated
|
||||
// collect hosts that are forced to have certs automated for their specific name
|
||||
if _, ok := sblock.pile["tls.force_automate"]; ok {
|
||||
for _, host := range sblockHosts {
|
||||
forcedAutomatedNames[host] = struct{}{}
|
||||
@ -375,7 +373,9 @@ func (st ServerType) buildTLSApp(
|
||||
return nil, warnings, err
|
||||
}
|
||||
for _, cfg := range ech.Configs {
|
||||
ap.SubjectsRaw = append(ap.SubjectsRaw, cfg.PublicName)
|
||||
if cfg.PublicName != "" {
|
||||
ap.SubjectsRaw = append(ap.SubjectsRaw, cfg.PublicName)
|
||||
}
|
||||
}
|
||||
if tlsApp.Automation == nil {
|
||||
tlsApp.Automation = new(caddytls.AutomationConfig)
|
||||
|
@ -1,109 +0,0 @@
|
||||
{
|
||||
auto_https prefer_wildcard
|
||||
}
|
||||
|
||||
*.example.com {
|
||||
tls {
|
||||
dns mock
|
||||
}
|
||||
respond "fallback"
|
||||
}
|
||||
|
||||
foo.example.com {
|
||||
respond "foo"
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"foo.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "foo",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
},
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"*.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "fallback",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
],
|
||||
"automatic_https": {
|
||||
"skip_certificates": [
|
||||
"foo.example.com"
|
||||
],
|
||||
"prefer_wildcard": true
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"tls": {
|
||||
"automation": {
|
||||
"policies": [
|
||||
{
|
||||
"subjects": [
|
||||
"*.example.com"
|
||||
],
|
||||
"issuers": [
|
||||
{
|
||||
"challenges": {
|
||||
"dns": {
|
||||
"provider": {
|
||||
"name": "mock"
|
||||
}
|
||||
}
|
||||
},
|
||||
"module": "acme"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,268 +0,0 @@
|
||||
{
|
||||
auto_https prefer_wildcard
|
||||
}
|
||||
|
||||
# Covers two domains
|
||||
*.one.example.com {
|
||||
tls {
|
||||
dns mock
|
||||
}
|
||||
respond "one fallback"
|
||||
}
|
||||
|
||||
# Is covered, should not get its own AP
|
||||
foo.one.example.com {
|
||||
respond "foo one"
|
||||
}
|
||||
|
||||
# This one has its own tls config so it doesn't get covered (escape hatch)
|
||||
bar.one.example.com {
|
||||
respond "bar one"
|
||||
tls bar@bar.com
|
||||
}
|
||||
|
||||
# Covers nothing but AP gets consolidated with the first
|
||||
*.two.example.com {
|
||||
tls {
|
||||
dns mock
|
||||
}
|
||||
respond "two fallback"
|
||||
}
|
||||
|
||||
# Is HTTP so it should not cover
|
||||
http://*.three.example.com {
|
||||
respond "three fallback"
|
||||
}
|
||||
|
||||
# Has no wildcard coverage so it gets an AP
|
||||
foo.three.example.com {
|
||||
respond "foo three"
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"foo.three.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "foo three",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
},
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"foo.one.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "foo one",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
},
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"bar.one.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "bar one",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
},
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"*.one.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "one fallback",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
},
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"*.two.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "two fallback",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
],
|
||||
"automatic_https": {
|
||||
"skip_certificates": [
|
||||
"foo.one.example.com",
|
||||
"bar.one.example.com"
|
||||
],
|
||||
"prefer_wildcard": true
|
||||
}
|
||||
},
|
||||
"srv1": {
|
||||
"listen": [
|
||||
":80"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"*.three.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "three fallback",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
],
|
||||
"automatic_https": {
|
||||
"prefer_wildcard": true
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"tls": {
|
||||
"automation": {
|
||||
"policies": [
|
||||
{
|
||||
"subjects": [
|
||||
"foo.three.example.com"
|
||||
]
|
||||
},
|
||||
{
|
||||
"subjects": [
|
||||
"bar.one.example.com"
|
||||
],
|
||||
"issuers": [
|
||||
{
|
||||
"email": "bar@bar.com",
|
||||
"module": "acme"
|
||||
},
|
||||
{
|
||||
"ca": "https://acme.zerossl.com/v2/DV90",
|
||||
"email": "bar@bar.com",
|
||||
"module": "acme"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"subjects": [
|
||||
"*.one.example.com",
|
||||
"*.two.example.com"
|
||||
],
|
||||
"issuers": [
|
||||
{
|
||||
"challenges": {
|
||||
"dns": {
|
||||
"provider": {
|
||||
"name": "mock"
|
||||
}
|
||||
}
|
||||
},
|
||||
"module": "acme"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -131,13 +131,7 @@ shadowed.example.com {
|
||||
{
|
||||
"match": {
|
||||
"sni": [
|
||||
"automated1.example.com"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"match": {
|
||||
"sni": [
|
||||
"automated1.example.com",
|
||||
"automated2.example.com"
|
||||
]
|
||||
}
|
||||
|
22
internal/logs.go
Normal file
22
internal/logs.go
Normal file
@ -0,0 +1,22 @@
|
||||
package internal
|
||||
|
||||
import "fmt"
|
||||
|
||||
// MaxSizeSubjectsListForLog returns the keys in the map as a slice of maximum length
|
||||
// maxToDisplay. It is useful for logging domains being managed, for example, since a
|
||||
// map is typically needed for quick lookup, but a slice is needed for logging, and this
|
||||
// can be quite a doozy since there may be a huge amount (hundreds of thousands).
|
||||
func MaxSizeSubjectsListForLog(subjects map[string]struct{}, maxToDisplay int) []string {
|
||||
numberOfNamesToDisplay := min(len(subjects), maxToDisplay)
|
||||
domainsToDisplay := make([]string, 0, numberOfNamesToDisplay)
|
||||
for domain := range subjects {
|
||||
domainsToDisplay = append(domainsToDisplay, domain)
|
||||
if len(domainsToDisplay) >= numberOfNamesToDisplay {
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(subjects) > maxToDisplay {
|
||||
domainsToDisplay = append(domainsToDisplay, fmt.Sprintf("(and %d more...)", len(subjects)-maxToDisplay))
|
||||
}
|
||||
return domainsToDisplay
|
||||
}
|
@ -20,6 +20,7 @@ import (
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@ -490,10 +491,8 @@ func (cl *CustomLog) provision(ctx Context, logging *Logging) error {
|
||||
if len(cl.Include) > 0 && len(cl.Exclude) > 0 {
|
||||
// prevent intersections
|
||||
for _, allow := range cl.Include {
|
||||
for _, deny := range cl.Exclude {
|
||||
if allow == deny {
|
||||
return fmt.Errorf("include and exclude must not intersect, but found %s in both lists", allow)
|
||||
}
|
||||
if slices.Contains(cl.Exclude, allow) {
|
||||
return fmt.Errorf("include and exclude must not intersect, but found %s in both lists", allow)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -152,7 +152,7 @@ type App struct {
|
||||
tlsApp *caddytls.TLS
|
||||
|
||||
// used temporarily between phases 1 and 2 of auto HTTPS
|
||||
allCertDomains []string
|
||||
allCertDomains map[string]struct{}
|
||||
}
|
||||
|
||||
// CaddyModule returns the Caddy module information.
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/caddyserver/caddy/v2"
|
||||
"github.com/caddyserver/caddy/v2/internal"
|
||||
"github.com/caddyserver/caddy/v2/modules/caddytls"
|
||||
)
|
||||
|
||||
@ -65,12 +66,6 @@ type AutoHTTPSConfig struct {
|
||||
// enabled. To force automated certificate management
|
||||
// regardless of loaded certificates, set this to true.
|
||||
IgnoreLoadedCerts bool `json:"ignore_loaded_certificates,omitempty"`
|
||||
|
||||
// If true, automatic HTTPS will prefer wildcard names
|
||||
// and ignore non-wildcard names if both are available.
|
||||
// This allows for writing a config with top-level host
|
||||
// matchers without having those names produce certificates.
|
||||
PreferWildcard bool `json:"prefer_wildcard,omitempty"`
|
||||
}
|
||||
|
||||
// automaticHTTPSPhase1 provisions all route matchers, determines
|
||||
@ -163,33 +158,8 @@ func (app *App) automaticHTTPSPhase1(ctx caddy.Context, repl *caddy.Replacer) er
|
||||
}
|
||||
}
|
||||
|
||||
// trim the list of domains covered by wildcards, if configured
|
||||
if srv.AutoHTTPS.PreferWildcard {
|
||||
wildcards := make(map[string]struct{})
|
||||
for d := range serverDomainSet {
|
||||
if strings.HasPrefix(d, "*.") {
|
||||
wildcards[d[2:]] = struct{}{}
|
||||
}
|
||||
}
|
||||
for d := range serverDomainSet {
|
||||
if strings.HasPrefix(d, "*.") {
|
||||
continue
|
||||
}
|
||||
base := d
|
||||
if idx := strings.Index(d, "."); idx != -1 {
|
||||
base = d[idx+1:]
|
||||
}
|
||||
if _, ok := wildcards[base]; ok {
|
||||
delete(serverDomainSet, d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// build the list of domains that could be used with ECH (if enabled)
|
||||
// so the TLS app can know to publish ECH configs for them; we do this
|
||||
// after trimming domains covered by wildcards because, presumably,
|
||||
// if the user wants to use wildcard certs, they also want to use the
|
||||
// wildcard for ECH, rather than individual subdomains
|
||||
// so the TLS app can know to publish ECH configs for them
|
||||
echDomains := make([]string, 0, len(serverDomainSet))
|
||||
for d := range serverDomainSet {
|
||||
echDomains = append(echDomains, d)
|
||||
@ -295,19 +265,10 @@ func (app *App) automaticHTTPSPhase1(ctx caddy.Context, repl *caddy.Replacer) er
|
||||
}
|
||||
}
|
||||
|
||||
// we now have a list of all the unique names for which we need certs;
|
||||
// turn the set into a slice so that phase 2 can use it
|
||||
app.allCertDomains = make([]string, 0, len(uniqueDomainsForCerts))
|
||||
// we now have a list of all the unique names for which we need certs
|
||||
var internal, tailscale []string
|
||||
uniqueDomainsLoop:
|
||||
for d := range uniqueDomainsForCerts {
|
||||
if !isTailscaleDomain(d) {
|
||||
// whether or not there is already an automation policy for this
|
||||
// name, we should add it to the list to manage a cert for it,
|
||||
// unless it's a Tailscale domain, because we don't manage those
|
||||
app.allCertDomains = append(app.allCertDomains, d)
|
||||
}
|
||||
|
||||
// some names we've found might already have automation policies
|
||||
// explicitly specified for them; we should exclude those from
|
||||
// our hidden/implicit policy, since applying a name to more than
|
||||
@ -346,6 +307,7 @@ uniqueDomainsLoop:
|
||||
}
|
||||
if isTailscaleDomain(d) {
|
||||
tailscale = append(tailscale, d)
|
||||
delete(uniqueDomainsForCerts, d) // not managed by us; handled separately
|
||||
} else if shouldUseInternal(d) {
|
||||
internal = append(internal, d)
|
||||
}
|
||||
@ -475,6 +437,9 @@ redirServersLoop:
|
||||
}
|
||||
}
|
||||
|
||||
// persist the domains/IPs we're managing certs for through provisioning/startup
|
||||
app.allCertDomains = uniqueDomainsForCerts
|
||||
|
||||
logger.Debug("adjusted config",
|
||||
zap.Reflect("tls", app.tlsApp),
|
||||
zap.Reflect("http", app))
|
||||
@ -777,7 +742,7 @@ func (app *App) automaticHTTPSPhase2() error {
|
||||
return nil
|
||||
}
|
||||
app.logger.Info("enabling automatic TLS certificate management",
|
||||
zap.Strings("domains", app.allCertDomains),
|
||||
zap.Strings("domains", internal.MaxSizeSubjectsListForLog(app.allCertDomains, 1000)),
|
||||
)
|
||||
err := app.tlsApp.Manage(app.allCertDomains)
|
||||
if err != nil {
|
||||
|
@ -652,7 +652,7 @@ func (t *TLSConfig) MakeTLSClientConfig(ctx caddy.Context) (*tls.Config, error)
|
||||
return nil, fmt.Errorf("getting tls app: %v", err)
|
||||
}
|
||||
tlsApp := tlsAppIface.(*caddytls.TLS)
|
||||
err = tlsApp.Manage([]string{t.ClientCertificateAutomate})
|
||||
err = tlsApp.Manage(map[string]struct{}{t.ClientCertificateAutomate: {}})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("managing client certificate: %v", err)
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/mholt/acmez/v3"
|
||||
@ -461,6 +462,14 @@ func (p ConnectionPolicy) SettingsEmpty() bool {
|
||||
p.InsecureSecretsLog == ""
|
||||
}
|
||||
|
||||
// SettingsEmpty returns true if p's settings (fields
|
||||
// except the matchers) are the same as q.
|
||||
func (p ConnectionPolicy) SettingsEqual(q ConnectionPolicy) bool {
|
||||
p.MatchersRaw = nil
|
||||
q.MatchersRaw = nil
|
||||
return reflect.DeepEqual(p, q)
|
||||
}
|
||||
|
||||
// UnmarshalCaddyfile sets up the ConnectionPolicy from Caddyfile tokens. Syntax:
|
||||
//
|
||||
// connection_policy {
|
||||
|
@ -138,7 +138,6 @@ func (ech *ECH) Provision(ctx caddy.Context) ([]string, error) {
|
||||
// all existing configs are now loaded; see if we need to make any new ones
|
||||
// based on the input configuration, and also mark the most recent one(s) as
|
||||
// current/active, so they can be used for ECH retries
|
||||
|
||||
for _, cfg := range ech.Configs {
|
||||
publicName := strings.ToLower(strings.TrimSpace(cfg.PublicName))
|
||||
|
||||
|
@ -33,6 +33,7 @@ import (
|
||||
"go.uber.org/zap/zapcore"
|
||||
|
||||
"github.com/caddyserver/caddy/v2"
|
||||
"github.com/caddyserver/caddy/v2/internal"
|
||||
"github.com/caddyserver/caddy/v2/modules/caddyevents"
|
||||
)
|
||||
|
||||
@ -55,8 +56,10 @@ type TLS struct {
|
||||
//
|
||||
// The "automate" certificate loader module can be used to
|
||||
// specify a list of subjects that need certificates to be
|
||||
// managed automatically. The first matching automation
|
||||
// policy will be applied to manage the certificate(s).
|
||||
// managed automatically, including subdomains that may
|
||||
// already be covered by a managed wildcard certificate.
|
||||
// The first matching automation policy will be used
|
||||
// to manage automated certificate(s).
|
||||
//
|
||||
// All loaded certificates get pooled
|
||||
// into the same cache and may be used to complete TLS
|
||||
@ -123,7 +126,7 @@ type TLS struct {
|
||||
dns any // technically, it should be any/all of the libdns interfaces (RecordSetter, RecordAppender, etc.)
|
||||
|
||||
certificateLoaders []CertificateLoader
|
||||
automateNames []string
|
||||
automateNames map[string]struct{}
|
||||
ctx caddy.Context
|
||||
storageCleanTicker *time.Ticker
|
||||
storageCleanStop chan struct{}
|
||||
@ -218,12 +221,13 @@ func (t *TLS) Provision(ctx caddy.Context) error {
|
||||
// special case; these will be loaded in later using our automation facilities,
|
||||
// which we want to avoid doing during provisioning
|
||||
if automateNames, ok := modIface.(*AutomateLoader); ok && automateNames != nil {
|
||||
repl := caddy.NewReplacer()
|
||||
subjects := make([]string, len(*automateNames))
|
||||
for i, sub := range *automateNames {
|
||||
subjects[i] = repl.ReplaceAll(sub, "")
|
||||
if t.automateNames == nil {
|
||||
t.automateNames = make(map[string]struct{})
|
||||
}
|
||||
repl := caddy.NewReplacer()
|
||||
for _, sub := range *automateNames {
|
||||
t.automateNames[repl.ReplaceAll(sub, "")] = struct{}{}
|
||||
}
|
||||
t.automateNames = append(t.automateNames, subjects...)
|
||||
} else {
|
||||
return fmt.Errorf("loading certificates with 'automate' requires array of strings, got: %T", modIface)
|
||||
}
|
||||
@ -283,7 +287,7 @@ func (t *TLS) Provision(ctx caddy.Context) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("provisioning default public automation policy: %v", err)
|
||||
}
|
||||
for _, n := range t.automateNames {
|
||||
for n := range t.automateNames {
|
||||
// if any names specified by the "automate" loader do not qualify for a public
|
||||
// certificate, we should initialize a default internal automation policy
|
||||
// (but we don't want to do this unnecessarily, since it may prompt for password!)
|
||||
@ -339,8 +343,14 @@ func (t *TLS) Provision(ctx caddy.Context) error {
|
||||
|
||||
// outer names should have certificates to reduce client brittleness
|
||||
for _, outerName := range outerNames {
|
||||
if outerName == "" {
|
||||
continue
|
||||
}
|
||||
if !t.HasCertificateForSubject(outerName) {
|
||||
t.automateNames = append(t.automateNames, outerNames...)
|
||||
if t.automateNames == nil {
|
||||
t.automateNames = make(map[string]struct{})
|
||||
}
|
||||
t.automateNames[outerName] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -449,7 +459,8 @@ func (t *TLS) Cleanup() error {
|
||||
// app instance (which is being stopped) that are not managed or loaded by the
|
||||
// new app instance (which just started), and remove them from the cache
|
||||
var noLongerManaged []certmagic.SubjectIssuer
|
||||
var reManage, noLongerLoaded []string
|
||||
var noLongerLoaded []string
|
||||
reManage := make(map[string]struct{})
|
||||
for subj, currentIssuerKey := range t.managing {
|
||||
// It's a bit nuanced: managed certs can sometimes be different enough that we have to
|
||||
// swap them out for a different one, even if they are for the same subject/domain.
|
||||
@ -467,7 +478,7 @@ func (t *TLS) Cleanup() error {
|
||||
|
||||
// then, if the next app is managing a cert for this name, but with a different issuer, re-manage it
|
||||
if ok && nextIssuerKey != currentIssuerKey {
|
||||
reManage = append(reManage, subj)
|
||||
reManage[subj] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -488,7 +499,7 @@ func (t *TLS) Cleanup() error {
|
||||
if err := nextTLSApp.Manage(reManage); err != nil {
|
||||
if c := t.logger.Check(zapcore.ErrorLevel, "re-managing unloaded certificates with new config"); c != nil {
|
||||
c.Write(
|
||||
zap.Strings("subjects", reManage),
|
||||
zap.Strings("subjects", internal.MaxSizeSubjectsListForLog(reManage, 1000)),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
@ -509,17 +520,31 @@ func (t *TLS) Cleanup() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Manage immediately begins managing names according to the
|
||||
// matching automation policy.
|
||||
func (t *TLS) Manage(names []string) error {
|
||||
// Manage immediately begins managing subjects according to the
|
||||
// matching automation policy. The subjects are given in a map
|
||||
// to prevent duplication and also because quick lookups are
|
||||
// needed to assess wildcard coverage, if any, depending on
|
||||
// certain config parameters (with lots of subjects, computing
|
||||
// wildcard coverage over a slice can be highly inefficient).
|
||||
func (t *TLS) Manage(subjects map[string]struct{}) error {
|
||||
// for a large number of names, we can be more memory-efficient
|
||||
// by making only one certmagic.Config for all the names that
|
||||
// use that config, rather than calling ManageAsync once for
|
||||
// every name; so first, bin names by AutomationPolicy
|
||||
policyToNames := make(map[*AutomationPolicy][]string)
|
||||
for _, name := range names {
|
||||
ap := t.getAutomationPolicyForName(name)
|
||||
policyToNames[ap] = append(policyToNames[ap], name)
|
||||
for subj := range subjects {
|
||||
ap := t.getAutomationPolicyForName(subj)
|
||||
// by default, if a wildcard that covers the subj is also being
|
||||
// managed, either by a previous call to Manage or by this one,
|
||||
// prefer using that over individual certs for its subdomains;
|
||||
// but users can disable this and force getting a certificate for
|
||||
// subdomains by adding the name to the 'automate' cert loader
|
||||
if t.managingWildcardFor(subj, subjects) {
|
||||
if _, ok := t.automateNames[subj]; !ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
policyToNames[ap] = append(policyToNames[ap], subj)
|
||||
}
|
||||
|
||||
// now that names are grouped by policy, we can simply make one
|
||||
@ -530,7 +555,7 @@ func (t *TLS) Manage(names []string) error {
|
||||
if err != nil {
|
||||
const maxNamesToDisplay = 100
|
||||
if len(names) > maxNamesToDisplay {
|
||||
names = append(names[:maxNamesToDisplay], fmt.Sprintf("(%d more...)", len(names)-maxNamesToDisplay))
|
||||
names = append(names[:maxNamesToDisplay], fmt.Sprintf("(and %d more...)", len(names)-maxNamesToDisplay))
|
||||
}
|
||||
return fmt.Errorf("automate: manage %v: %v", names, err)
|
||||
}
|
||||
@ -555,6 +580,43 @@ func (t *TLS) Manage(names []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// managingWildcardFor returns true if the app is managing a certificate that covers that
|
||||
// subject name (including consideration of wildcards), either from its internal list of
|
||||
// names that it IS managing certs for, or from the otherSubjsToManage which includes names
|
||||
// that WILL be managed.
|
||||
func (t *TLS) managingWildcardFor(subj string, otherSubjsToManage map[string]struct{}) bool {
|
||||
// TODO: we could also consider manually-loaded certs using t.HasCertificateForSubject(),
|
||||
// but that does not account for how manually-loaded certs may be restricted as to which
|
||||
// hostnames or ClientHellos they can be used with by tags, etc; I don't *think* anyone
|
||||
// necessarily wants this anyway, but I thought I'd note this here for now (if we did
|
||||
// consider manually-loaded certs, we'd probably want to rename the method since it
|
||||
// wouldn't be just about managed certs anymore)
|
||||
|
||||
// IP addresses must match exactly
|
||||
if ip := net.ParseIP(subj); ip != nil {
|
||||
_, managing := t.managing[subj]
|
||||
return managing
|
||||
}
|
||||
|
||||
// replace labels of the domain with wildcards until we get a match
|
||||
labels := strings.Split(subj, ".")
|
||||
for i := range labels {
|
||||
if labels[i] == "*" {
|
||||
continue
|
||||
}
|
||||
labels[i] = "*"
|
||||
candidate := strings.Join(labels, ".")
|
||||
if _, ok := t.managing[candidate]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := otherSubjsToManage[candidate]; ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// RegisterServerNames registers the provided DNS names with the TLS app.
|
||||
// This is currently used to auto-publish Encrypted ClientHello (ECH)
|
||||
// configurations, if enabled. Use of this function by apps using the TLS
|
||||
|
Loading…
x
Reference in New Issue
Block a user