2023-10-25 15:40:43 -07:00
|
|
|
server {
|
|
|
|
server_name {{ impress_hostname }};
|
|
|
|
listen 80;
|
2024-02-13 08:47:49 -08:00
|
|
|
listen [::]:80;
|
2023-10-25 15:40:43 -07:00
|
|
|
if ($host = {{ impress_hostname }}) {
|
|
|
|
return 301 https://$host$request_uri;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
server {
|
2024-02-19 09:45:13 -08:00
|
|
|
set $maintenance 0; # To enable maintenance mode, set this to 1.
|
|
|
|
|
2023-10-25 15:40:43 -07:00
|
|
|
server_name {{ impress_hostname }};
|
|
|
|
listen 443 ssl;
|
2024-02-13 08:47:49 -08:00
|
|
|
listen [::]:443 ssl;
|
2023-10-25 15:40:43 -07:00
|
|
|
ssl_certificate /etc/letsencrypt/live/{{ impress_hostname }}/fullchain.pem;
|
|
|
|
ssl_certificate_key /etc/letsencrypt/live/{{ impress_hostname }}/privkey.pem;
|
|
|
|
include /etc/letsencrypt/options-ssl-nginx.conf;
|
|
|
|
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
|
|
|
|
ssl_session_cache shared:SSL:10m; # https://superuser.com/q/1484466/14127
|
|
|
|
|
|
|
|
root /srv/impress/current/public;
|
|
|
|
|
2023-10-25 15:44:01 -07:00
|
|
|
# Serve assets using their precompressed *.gz versions.
|
|
|
|
# The filenames contain content hashes, so they should be safe to
|
|
|
|
# cache forever.
|
|
|
|
# https://stackoverflow.com/a/6952804/107415
|
|
|
|
location ~ ^/assets/ {
|
|
|
|
gzip_static on;
|
|
|
|
expires max;
|
|
|
|
add_header Cache-Control public;
|
|
|
|
add_header Last-Modified "";
|
|
|
|
add_header ETag "";
|
|
|
|
}
|
|
|
|
|
Create `rails public_data:commit` task, to share public data dumps
I'm starting to port over the functionality that was previously just,
me running `yarn db:export:public-data` in `impress-2020` and
committing it to Git LFS every time.
My immediate motivation is that the `impress-2020` git repository is
getting weirdly large?? Idk how these 40MB files have blown up to a
solid 16GB of Git LFS data (we don't have THAT many!!!), but I guess
there's something about Git LFS's architecture and disk usage that I'm
not understanding.
So, let's move to a simpler system in which we don't bind the public
data to the codebase, but instead just regularly dump it in production
and make it available for download.
This change adds the `rails public_data:commit` task, which when run in
production will make the latest available at
`https://impress.openneo.net/public-data/latest.sql.gz`, and will also
store a running log of previous dumps, viewable at
`https://impress.openneo.net/public-data/`.
Things left to do:
1. Create a `rails public_data:pull` task, to download `latest.sql.gz`
and import it into the local development database.
2. Set up a cron job to dump this out regularly, idk maybe weekly? That
will grow, but not very fast (about 2GB per year), and we can add
logic to rotate out old ones if it starts to grow too far. (If we
wanted to get really intricate, we could do like, daily for the past
week, then weekly for the past 3 months, then monthly for the past
year, idk. There must be tools that do this!)
2024-02-29 14:30:33 -08:00
|
|
|
location /public-data/ {
|
|
|
|
autoindex on;
|
|
|
|
}
|
|
|
|
|
2024-02-19 13:19:31 -08:00
|
|
|
# On status 503, return the maintenance page. (We'll trigger this ourselves
|
|
|
|
# in the @app location, if $maintenance is on.)
|
2024-02-19 11:18:28 -08:00
|
|
|
error_page 503 /maintenance.html;
|
2024-02-19 09:45:13 -08:00
|
|
|
|
2024-02-19 13:19:31 -08:00
|
|
|
# On status 502, return the outage page. (nginx will trigger this if the
|
|
|
|
# `proxy_pass` to the application fails.)
|
|
|
|
error_page 502 /outage.html;
|
|
|
|
|
2023-10-25 15:40:43 -07:00
|
|
|
# Try serving static files first. If not found, fall back to the app.
|
|
|
|
try_files $uri/index.html $uri @app;
|
|
|
|
|
|
|
|
location @app {
|
2024-02-19 09:45:13 -08:00
|
|
|
# If we're hardcoded as being in maintenance mode, return status 503, which
|
|
|
|
# will show the maintenance page as specified above.
|
|
|
|
if ($maintenance = 1) {
|
|
|
|
return 503;
|
|
|
|
}
|
|
|
|
|
2023-10-25 15:40:43 -07:00
|
|
|
proxy_pass http://127.0.0.1:3000;
|
|
|
|
proxy_set_header X-Real-IP $remote_addr;
|
|
|
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
|
|
|
proxy_set_header X-Forwarded-Proto https;
|
|
|
|
proxy_set_header X-Forwarded-Server $host;
|
|
|
|
proxy_set_header Host $http_host;
|
|
|
|
proxy_redirect off;
|
|
|
|
}
|
|
|
|
}
|