forked from coreweave/docker-registry-proxy
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathDockerfile
139 lines (111 loc) · 6.24 KB
/
Dockerfile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
# We start from my nginx fork which includes the proxy-connect module from tEngine
# Source is available at https://github.com/rpardini/nginx-proxy-connect-stable-alpine
# This is already multi-arch!
ARG BASE_IMAGE="ghcr.io/gmarcy/nginx-proxy-connect-stable-alpine:latest"
ARG DEBUG_IMAGE
# Could be "-debug"
ARG BASE_IMAGE_SUFFIX="${IMAGE_SUFFIX}"
FROM ${BASE_IMAGE}${BASE_IMAGE_SUFFIX}
# Link image to original repository on GitHub
LABEL org.opencontainers.image.source=https://github.com/gmarcy/container-registry-proxy
# apk packages that will be present in the final image both debug and release
RUN apk add --no-cache --update bash ca-certificates-bundle coreutils openssl
# If set to 1, enables building mitmproxy, which helps a lot in debugging, but is super heavy to build.
ARG DEBUG_IMAGE
ARG DO_DEBUG_BUILD="${DEBUG_IMAGE:-"0"}"
# Build mitmproxy via pip. This is heavy, takes minutes do build and creates a 90mb+ layer. Oh well.
RUN [[ "a$DO_DEBUG_BUILD" == "a1" ]] && { echo "Debug build ENABLED." \
&& apk add --no-cache --update su-exec cargo bsd-compat-headers git g++ libffi libffi-dev libstdc++ openssl-dev python3 python3-dev py3-pip py3-wheel py3-six py3-idna py3-certifi py3-setuptools \
&& rm /usr/lib/python3.*/EXTERNALLY-MANAGED \
&& LDFLAGS=-L/lib pip install MarkupSafe mitmproxy \
&& apk del --purge git g++ libffi-dev openssl-dev python3-dev py3-pip py3-wheel \
&& rm -rf ~/.cache/pip \
; } || { echo "Debug build disabled." ; }
# Required for mitmproxy
ENV LANG=en_US.UTF-8
# Check the installed mitmproxy version, if built.
RUN [[ "a$DO_DEBUG_BUILD" == "a1" ]] && { mitmproxy --version && mitmweb --version ; } || { echo "Debug build disabled."; }
# Create the cache directory and CA directory
RUN mkdir -p /registry_proxy__mirror_cache /ca
# Expose it as a volume, so cache can be kept external to the container image
VOLUME /registry_proxy_mirror_cache
# Expose /ca as a volume. Users are supposed to volume mount this, as to preserve it across restarts.
# Actually, its required; if not, then clients will reject the CA certificate when the proxy is run the second time
VOLUME /ca
# Add our configuration
ADD nginx.conf /etc/nginx/nginx.conf
ADD nginx.manifest.common.conf /etc/nginx/nginx.manifest.common.conf
ADD nginx.manifest.stale.conf /etc/nginx/nginx.manifest.stale.conf
# Add our very hackish entrypoint and ca-building scripts, make them executable
ADD entrypoint.sh /entrypoint.sh
ADD create_ca_cert.sh /create_ca_cert.sh
RUN chmod +x /create_ca_cert.sh /entrypoint.sh
# Add Liveliness Probe script for CoreWeave
RUN apk --no-cache add curl
ADD liveliness.sh /liveliness.sh
RUN chmod +x /liveliness.sh
# Clients should only use 3128, not anything else.
EXPOSE 3128
## Default envs.
# A space delimited list of registries we should proxy and cache.
ENV REGISTRIES="k8s.gcr.io gcr.io quay.io"
# A space delimited list of registry:user:password to inject authentication for
ENV AUTH_REGISTRIES="some.authenticated.registry:oneuser:onepassword another.registry:user:password"
# Should we verify upstream's certificates? Default to true.
ENV VERIFY_SSL="true"
# Enable nginx debugging mode; this uses nginx-debug binary and enabled debug logging, which is VERY verbose so separate setting
ENV DEBUG_NGINX="false"
# Enable slow caching tier; this allows caching in a secondary cache path on e.g a larger slower disk; for known URIs defined in SLOW_TIER_URIS
ENV SLOW_TIER_ENABLED="false"
# Statically define worker_processes; defaults to auto
ENV WORKER_PROCESSES="auto"
# Manifest caching tiers. Disabled by default, to mimick 0.4/0.5 behaviour.
# Setting it to true enables the processing of the ENVs below.
# Once enabled, it is valid for all registries.
# The envs *_REGEX represent a regex fragment, check entrypoint.sh to understand how they're used (nginx ~ location, PCRE syntax).
ENV ENABLE_MANIFEST_CACHE="false"
# 'Primary' tier defaults to 10m cache for frequently used/abused tags.
# - People publishing to production via :latest (argh) will want to include that in the regex
# - Heavy pullers who are being ratelimited but don't mind getting outdated manifests should (also) increase the cache time here
ENV MANIFEST_CACHE_PRIMARY_REGEX="(stable|nightly|production|test)"
ENV MANIFEST_CACHE_PRIMARY_TIME="10m"
# 'Secondary' tier defaults any tag that has 3 digits or dots, in the hopes of matching most explicitly-versioned tags.
# It caches for 60d, which is also the cache time for the large binary blobs to which the manifests refer.
# That makes them effectively immutable. Make sure you're not affected; tighten this regex or widen the primary tier.
ENV MANIFEST_CACHE_SECONDARY_REGEX="(.*)(\d|\.)+(.*)(\d|\.)+(.*)(\d|\.)+"
ENV MANIFEST_CACHE_SECONDARY_TIME="60d"
# The default cache duration for manifests that don't match either the primary or secondary tiers above.
# In the default config, :latest and other frequently-used tags will get this value.
ENV MANIFEST_CACHE_DEFAULT_TIME="1h"
# Should we allow actions different than pull, default to false.
ENV ALLOW_PUSH="false"
# If push is allowed, buffering requests can cause issues on slow upstreams.
# If you have trouble pushing, set this to false first, then fix remainig timouts.
# Default is true to not change default behavior.
ENV PROXY_REQUEST_BUFFERING="true"
# Force HTTP/1.1 upstream connections, for http2 upstream that returns 426 Upgrade Required
ENV FORCE_UPSTREAM_HTTP_1_1="false"
# Stream data; reduce TTFB
# Effectively disables caching
# Default is true to not change default behavior.
ENV PROXY_BUFFERING="true"
# Should we allow overridding with own authentication, default to false.
ENV ALLOW_OWN_AUTH="false"
# Should we allow push only with own authentication, default to false.
ENV ALLOW_PUSH_WITH_OWN_AUTH="false"
# Timeouts
# ngx_http_core_module
ENV SEND_TIMEOUT="60s"
ENV CLIENT_BODY_TIMEOUT="60s"
ENV CLIENT_HEADER_TIMEOUT="60s"
ENV KEEPALIVE_TIMEOUT="300s"
# ngx_http_proxy_module
ENV PROXY_READ_TIMEOUT="60s"
ENV PROXY_CONNECT_TIMEOUT="60s"
ENV PROXY_SEND_TIMEOUT="60s"
# ngx_http_proxy_connect_module - external module
ENV PROXY_CONNECT_READ_TIMEOUT="60s"
ENV PROXY_CONNECT_CONNECT_TIMEOUT="60s"
ENV PROXY_CONNECT_SEND_TIMEOUT="60s"
# Did you want a shell? Sorry, the entrypoint never returns, because it runs nginx itself. Use 'podman exec' if you need to mess around internally.
ENTRYPOINT ["/entrypoint.sh"]