forked from OpenNeo/impress
Emi Matchu
8dc11f9940
I'm starting to port over the functionality that was previously just, me running `yarn db:export:public-data` in `impress-2020` and committing it to Git LFS every time. My immediate motivation is that the `impress-2020` git repository is getting weirdly large?? Idk how these 40MB files have blown up to a solid 16GB of Git LFS data (we don't have THAT many!!!), but I guess there's something about Git LFS's architecture and disk usage that I'm not understanding. So, let's move to a simpler system in which we don't bind the public data to the codebase, but instead just regularly dump it in production and make it available for download. This change adds the `rails public_data:commit` task, which when run in production will make the latest available at `https://impress.openneo.net/public-data/latest.sql.gz`, and will also store a running log of previous dumps, viewable at `https://impress.openneo.net/public-data/`. Things left to do: 1. Create a `rails public_data:pull` task, to download `latest.sql.gz` and import it into the local development database. 2. Set up a cron job to dump this out regularly, idk maybe weekly? That will grow, but not very fast (about 2GB per year), and we can add logic to rotate out old ones if it starts to grow too far. (If we wanted to get really intricate, we could do like, daily for the past week, then weekly for the past 3 months, then monthly for the past year, idk. There must be tools that do this!)
66 lines
No EOL
2 KiB
Text
66 lines
No EOL
2 KiB
Text
server {
|
|
server_name {{ impress_hostname }};
|
|
listen 80;
|
|
listen [::]:80;
|
|
if ($host = {{ impress_hostname }}) {
|
|
return 301 https://$host$request_uri;
|
|
}
|
|
}
|
|
|
|
server {
|
|
set $maintenance 0; # To enable maintenance mode, set this to 1.
|
|
|
|
server_name {{ impress_hostname }};
|
|
listen 443 ssl;
|
|
listen [::]:443 ssl;
|
|
ssl_certificate /etc/letsencrypt/live/{{ impress_hostname }}/fullchain.pem;
|
|
ssl_certificate_key /etc/letsencrypt/live/{{ impress_hostname }}/privkey.pem;
|
|
include /etc/letsencrypt/options-ssl-nginx.conf;
|
|
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
|
|
ssl_session_cache shared:SSL:10m; # https://superuser.com/q/1484466/14127
|
|
|
|
root /srv/impress/current/public;
|
|
|
|
# Serve assets using their precompressed *.gz versions.
|
|
# The filenames contain content hashes, so they should be safe to
|
|
# cache forever.
|
|
# https://stackoverflow.com/a/6952804/107415
|
|
location ~ ^/assets/ {
|
|
gzip_static on;
|
|
expires max;
|
|
add_header Cache-Control public;
|
|
add_header Last-Modified "";
|
|
add_header ETag "";
|
|
}
|
|
|
|
location /public-data/ {
|
|
autoindex on;
|
|
}
|
|
|
|
# On status 503, return the maintenance page. (We'll trigger this ourselves
|
|
# in the @app location, if $maintenance is on.)
|
|
error_page 503 /maintenance.html;
|
|
|
|
# On status 502, return the outage page. (nginx will trigger this if the
|
|
# `proxy_pass` to the application fails.)
|
|
error_page 502 /outage.html;
|
|
|
|
# Try serving static files first. If not found, fall back to the app.
|
|
try_files $uri/index.html $uri @app;
|
|
|
|
location @app {
|
|
# If we're hardcoded as being in maintenance mode, return status 503, which
|
|
# will show the maintenance page as specified above.
|
|
if ($maintenance = 1) {
|
|
return 503;
|
|
}
|
|
|
|
proxy_pass http://127.0.0.1:3000;
|
|
proxy_set_header X-Real-IP $remote_addr;
|
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
|
proxy_set_header X-Forwarded-Proto https;
|
|
proxy_set_header X-Forwarded-Server $host;
|
|
proxy_set_header Host $http_host;
|
|
proxy_redirect off;
|
|
}
|
|
} |