2020-04-23 13:31:39 -07:00
|
|
|
import React from "react";
|
2021-05-05 00:22:28 -07:00
|
|
|
import {
|
|
|
|
Box,
|
|
|
|
Flex,
|
|
|
|
Grid,
|
|
|
|
Heading,
|
|
|
|
Link,
|
|
|
|
useColorModeValue,
|
|
|
|
} from "@chakra-ui/react";
|
2021-01-21 14:27:05 -08:00
|
|
|
import loadableLibrary from "@loadable/component";
|
2021-01-22 14:12:07 -08:00
|
|
|
import * as Sentry from "@sentry/react";
|
2021-11-02 00:50:39 -07:00
|
|
|
import { WarningIcon } from "@chakra-ui/icons";
|
|
|
|
import NextImage from "next/image";
|
2020-04-23 13:31:39 -07:00
|
|
|
|
2021-05-05 00:22:28 -07:00
|
|
|
import ErrorGrundoImg from "./images/error-grundo.png";
|
|
|
|
|
2020-04-26 00:46:05 -07:00
|
|
|
/**
|
2021-01-18 15:56:24 -08:00
|
|
|
* Delay hides its content at first, then shows it after the given delay.
|
2020-04-26 00:46:05 -07:00
|
|
|
*
|
|
|
|
* This is useful for loading states: it can be disruptive to see a spinner or
|
|
|
|
* skeleton element for only a brief flash, we'd rather just show them if
|
|
|
|
* loading is genuinely taking a while!
|
|
|
|
*
|
|
|
|
* 300ms is a pretty good default: that's about when perception shifts from "it
|
|
|
|
* wasn't instant" to "the process took time".
|
|
|
|
* https://developers.google.com/web/fundamentals/performance/rail
|
|
|
|
*/
|
2020-04-23 23:43:39 -07:00
|
|
|
export function Delay({ children, ms = 300 }) {
|
2020-04-23 13:31:39 -07:00
|
|
|
const [isVisible, setIsVisible] = React.useState(false);
|
|
|
|
|
|
|
|
React.useEffect(() => {
|
|
|
|
const id = setTimeout(() => setIsVisible(true), ms);
|
|
|
|
return () => clearTimeout(id);
|
|
|
|
}, [ms, setIsVisible]);
|
|
|
|
|
|
|
|
return (
|
|
|
|
<Box opacity={isVisible ? 1 : 0} transition="opacity 0.5s">
|
|
|
|
{children}
|
|
|
|
</Box>
|
|
|
|
);
|
|
|
|
}
|
2020-04-24 21:17:03 -07:00
|
|
|
|
2020-04-26 00:46:05 -07:00
|
|
|
/**
|
|
|
|
* Heading1 is a large, page-title-ish heading, with our DTI-brand-y Delicious
|
|
|
|
* font and some special typographical styles!
|
|
|
|
*/
|
2020-04-24 21:17:03 -07:00
|
|
|
export function Heading1({ children, ...props }) {
|
|
|
|
return (
|
2020-05-18 00:56:46 -07:00
|
|
|
<Heading
|
2020-10-27 23:09:42 -07:00
|
|
|
as="h1"
|
2020-08-12 00:37:31 -07:00
|
|
|
size="2xl"
|
2020-05-18 00:56:46 -07:00
|
|
|
fontFamily="Delicious, sans-serif"
|
|
|
|
fontWeight="800"
|
|
|
|
{...props}
|
|
|
|
>
|
2020-04-24 21:17:03 -07:00
|
|
|
{children}
|
|
|
|
</Heading>
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2020-04-26 00:46:05 -07:00
|
|
|
/**
|
|
|
|
* Heading2 is a major subheading, with our DTI-brand-y Delicious font and some
|
|
|
|
* special typographical styles!!
|
|
|
|
*/
|
2020-04-24 21:17:03 -07:00
|
|
|
export function Heading2({ children, ...props }) {
|
|
|
|
return (
|
2020-05-18 00:56:46 -07:00
|
|
|
<Heading
|
2020-10-27 23:09:42 -07:00
|
|
|
as="h2"
|
2020-05-18 00:56:46 -07:00
|
|
|
size="xl"
|
|
|
|
fontFamily="Delicious, sans-serif"
|
|
|
|
fontWeight="700"
|
|
|
|
{...props}
|
|
|
|
>
|
2020-04-24 21:17:03 -07:00
|
|
|
{children}
|
|
|
|
</Heading>
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2020-10-27 23:09:42 -07:00
|
|
|
/**
|
|
|
|
* Heading2 is a minor subheading, with our DTI-brand-y Delicious font and some
|
|
|
|
* special typographical styles!!
|
|
|
|
*/
|
|
|
|
export function Heading3({ children, ...props }) {
|
|
|
|
return (
|
|
|
|
<Heading
|
|
|
|
as="h3"
|
|
|
|
size="lg"
|
|
|
|
fontFamily="Delicious, sans-serif"
|
|
|
|
fontWeight="700"
|
|
|
|
{...props}
|
|
|
|
>
|
|
|
|
{children}
|
|
|
|
</Heading>
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2021-01-03 23:31:02 -08:00
|
|
|
/**
|
|
|
|
* ErrorMessage is a simple error message for simple errors!
|
|
|
|
*/
|
2021-01-20 10:36:46 -08:00
|
|
|
export function ErrorMessage({ children, ...props }) {
|
|
|
|
return (
|
|
|
|
<Box color="red.400" {...props}>
|
|
|
|
{children}
|
|
|
|
</Box>
|
|
|
|
);
|
2021-01-03 23:31:02 -08:00
|
|
|
}
|
|
|
|
|
2021-01-04 01:17:30 -08:00
|
|
|
export function useCommonStyles() {
|
|
|
|
return {
|
|
|
|
brightBackground: useColorModeValue("white", "gray.700"),
|
2021-03-14 07:16:01 -07:00
|
|
|
bodyBackground: useColorModeValue("gray.50", "gray.800"),
|
2021-01-04 01:17:30 -08:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2020-05-02 15:41:02 -07:00
|
|
|
/**
|
|
|
|
* safeImageUrl returns an HTTPS-safe image URL for Neopets assets!
|
|
|
|
*/
|
Remove proxy for most images
I've noticed that our Fastly proxy adds a surprising amount of latency on cache misses (500-1000ms). And, while our overall hit ratio of 80% is pretty good, most misses happen at inopportune times, like loading items from search.
But now that the Neopets CDN supports HTTPS, we can safely switch back to theirs for *most* image loads. (Some features, like downloads and movies, still require CORS headers, which our proxy is still reponsible for adding.)
This forgoes some minor performance wins (like the Download button now requires separate network requests), and some potential filesize reduction opportunities (like Fastly's auto-gzip which we're today using for SVGs, and eventually using their Image Optimizer for assets), to decrease latency. We could still potentially do something more powerful for low-power connections someday… but for now, with the cache miss latency being *so* heavy, this seems like the clear win for almost certainly *all* users today.
2021-06-19 15:41:52 -07:00
|
|
|
export function safeImageUrl(urlString, { crossOrigin = null } = {}) {
|
2020-10-22 20:53:21 -07:00
|
|
|
if (urlString == null) {
|
|
|
|
return urlString;
|
|
|
|
}
|
|
|
|
|
2021-02-09 16:11:32 -08:00
|
|
|
let url;
|
|
|
|
try {
|
|
|
|
url = new URL(
|
|
|
|
urlString,
|
|
|
|
// A few item thumbnail images incorrectly start with "/". When that
|
|
|
|
// happens, the correct URL is at images.neopets.com.
|
|
|
|
//
|
|
|
|
// So, we provide "http://images.neopets.com" as the base URL when
|
|
|
|
// parsing. Most URLs are absolute and will ignore it, but relative URLs
|
|
|
|
// will resolve relative to that base.
|
|
|
|
"http://images.neopets.com"
|
|
|
|
);
|
|
|
|
} catch (e) {
|
|
|
|
logAndCapture(
|
|
|
|
new Error(
|
|
|
|
`safeImageUrl could not parse URL: ${urlString}. Returning a placeholder.`
|
|
|
|
)
|
|
|
|
);
|
2021-02-09 16:16:46 -08:00
|
|
|
return "https://impress-2020.openneo.net/__error__URL-was-not-parseable__";
|
2021-02-09 16:11:32 -08:00
|
|
|
}
|
2020-09-22 03:03:01 -07:00
|
|
|
|
2021-07-02 14:34:30 -07:00
|
|
|
// Rewrite Neopets URLs to their HTTPS equivalents, and additionally to our
|
|
|
|
// proxy if we need CORS headers.
|
|
|
|
if (
|
|
|
|
url.origin === "http://images.neopets.com" ||
|
|
|
|
url.origin === "https://images.neopets.com"
|
|
|
|
) {
|
replace /api/assetProxy with a CDN proxy
When we decided to start out with /api/assetProxy, we didn't know how much the load would be in practice, so we just went ahead and tried it! Turns out, it was too high, and Vercel shut down our deployment 😅
Now, we've off-loaded this to a Fastly CDN proxy, which should run even faster and more efficiently, without adding pressure to Vercel servers and pushing our usage numbers! And I suspect we're gonna stay comfortably in Fastly's free tier :) but we'll see!
(Though, as always, if Neopets can finally upgrade their own stuff to HTTPS, we'll get to tear down this whole proxy altogether!)
2020-10-19 13:24:13 -07:00
|
|
|
url.protocol = "https:";
|
Remove proxy for most images
I've noticed that our Fastly proxy adds a surprising amount of latency on cache misses (500-1000ms). And, while our overall hit ratio of 80% is pretty good, most misses happen at inopportune times, like loading items from search.
But now that the Neopets CDN supports HTTPS, we can safely switch back to theirs for *most* image loads. (Some features, like downloads and movies, still require CORS headers, which our proxy is still reponsible for adding.)
This forgoes some minor performance wins (like the Download button now requires separate network requests), and some potential filesize reduction opportunities (like Fastly's auto-gzip which we're today using for SVGs, and eventually using their Image Optimizer for assets), to decrease latency. We could still potentially do something more powerful for low-power connections someday… but for now, with the cache miss latency being *so* heavy, this seems like the clear win for almost certainly *all* users today.
2021-06-19 15:41:52 -07:00
|
|
|
if (crossOrigin) {
|
|
|
|
url.host = "images.neopets-asset-proxy.openneo.net";
|
|
|
|
}
|
2021-07-02 14:34:30 -07:00
|
|
|
} else if (
|
|
|
|
url.origin === "http://pets.neopets.com" ||
|
|
|
|
url.origin === "https://pets.neopets.com"
|
|
|
|
) {
|
replace /api/assetProxy with a CDN proxy
When we decided to start out with /api/assetProxy, we didn't know how much the load would be in practice, so we just went ahead and tried it! Turns out, it was too high, and Vercel shut down our deployment 😅
Now, we've off-loaded this to a Fastly CDN proxy, which should run even faster and more efficiently, without adding pressure to Vercel servers and pushing our usage numbers! And I suspect we're gonna stay comfortably in Fastly's free tier :) but we'll see!
(Though, as always, if Neopets can finally upgrade their own stuff to HTTPS, we'll get to tear down this whole proxy altogether!)
2020-10-19 13:24:13 -07:00
|
|
|
url.protocol = "https:";
|
Remove proxy for most images
I've noticed that our Fastly proxy adds a surprising amount of latency on cache misses (500-1000ms). And, while our overall hit ratio of 80% is pretty good, most misses happen at inopportune times, like loading items from search.
But now that the Neopets CDN supports HTTPS, we can safely switch back to theirs for *most* image loads. (Some features, like downloads and movies, still require CORS headers, which our proxy is still reponsible for adding.)
This forgoes some minor performance wins (like the Download button now requires separate network requests), and some potential filesize reduction opportunities (like Fastly's auto-gzip which we're today using for SVGs, and eventually using their Image Optimizer for assets), to decrease latency. We could still potentially do something more powerful for low-power connections someday… but for now, with the cache miss latency being *so* heavy, this seems like the clear win for almost certainly *all* users today.
2021-06-19 15:41:52 -07:00
|
|
|
if (crossOrigin) {
|
|
|
|
url.host = "pets.neopets-asset-proxy.openneo.net";
|
|
|
|
}
|
2020-09-22 03:03:01 -07:00
|
|
|
}
|
|
|
|
|
replace /api/assetProxy with a CDN proxy
When we decided to start out with /api/assetProxy, we didn't know how much the load would be in practice, so we just went ahead and tried it! Turns out, it was too high, and Vercel shut down our deployment 😅
Now, we've off-loaded this to a Fastly CDN proxy, which should run even faster and more efficiently, without adding pressure to Vercel servers and pushing our usage numbers! And I suspect we're gonna stay comfortably in Fastly's free tier :) but we'll see!
(Though, as always, if Neopets can finally upgrade their own stuff to HTTPS, we'll get to tear down this whole proxy altogether!)
2020-10-19 13:24:13 -07:00
|
|
|
if (url.protocol !== "https:") {
|
2021-02-09 16:13:18 -08:00
|
|
|
logAndCapture(
|
|
|
|
new Error(
|
|
|
|
`safeImageUrl was provided an unsafe URL, but we don't know how to ` +
|
|
|
|
`upgrade it to HTTPS: ${urlString}. Returning a placeholder.`
|
|
|
|
)
|
replace /api/assetProxy with a CDN proxy
When we decided to start out with /api/assetProxy, we didn't know how much the load would be in practice, so we just went ahead and tried it! Turns out, it was too high, and Vercel shut down our deployment 😅
Now, we've off-loaded this to a Fastly CDN proxy, which should run even faster and more efficiently, without adding pressure to Vercel servers and pushing our usage numbers! And I suspect we're gonna stay comfortably in Fastly's free tier :) but we'll see!
(Though, as always, if Neopets can finally upgrade their own stuff to HTTPS, we'll get to tear down this whole proxy altogether!)
2020-10-19 13:24:13 -07:00
|
|
|
);
|
2021-02-09 16:16:46 -08:00
|
|
|
return "https://impress-2020.openneo.net/__error__URL-was-not-HTTPS__";
|
replace /api/assetProxy with a CDN proxy
When we decided to start out with /api/assetProxy, we didn't know how much the load would be in practice, so we just went ahead and tried it! Turns out, it was too high, and Vercel shut down our deployment 😅
Now, we've off-loaded this to a Fastly CDN proxy, which should run even faster and more efficiently, without adding pressure to Vercel servers and pushing our usage numbers! And I suspect we're gonna stay comfortably in Fastly's free tier :) but we'll see!
(Though, as always, if Neopets can finally upgrade their own stuff to HTTPS, we'll get to tear down this whole proxy altogether!)
2020-10-19 13:24:13 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return url.toString();
|
2020-05-02 15:41:02 -07:00
|
|
|
}
|
|
|
|
|
2020-04-26 00:46:05 -07:00
|
|
|
/**
|
|
|
|
* useDebounce helps make a rapidly-changing value change less! It waits for a
|
|
|
|
* pause in the incoming data before outputting the latest value.
|
|
|
|
*
|
|
|
|
* We use it in search: when the user types rapidly, we don't want to update
|
|
|
|
* our query and send a new request every keystroke. We want to wait for it to
|
|
|
|
* seem like they might be done, while still feeling responsive!
|
|
|
|
*
|
|
|
|
* Adapted from https://usehooks.com/useDebounce/
|
|
|
|
*/
|
2020-09-01 19:53:38 -07:00
|
|
|
export function useDebounce(
|
|
|
|
value,
|
|
|
|
delay,
|
Fix infinite loop bug on initial outfit save
Oops, the sequence here was:
1) Save a new outfit
2) The debounced outfit state still contains id=null, which doesn't match the saved outfit, which triggers an auto-save
3) And now again, the debounced outfit state contains the _previous_ saved outfit ID, but the saved outfit has a _new_ ID, so we save the _previous_ outfit again
and back and forth forever.
Right, ok, simple change: if the saved outfit ID changes, reset the debounced state immediately, so it can't even be out of sync in the first place! (I also considered checking it in the condition, but I didn't really understand what the timing properties of being out of sync due to debouncing would be, and it seemed to not represent the reality I want.)
2021-04-28 16:08:31 -07:00
|
|
|
{ waitForFirstPause = false, initialValue = null, forceReset = null } = {}
|
2020-09-01 19:53:38 -07:00
|
|
|
) {
|
2020-04-24 21:17:03 -07:00
|
|
|
// State and setters for debounced value
|
2020-09-01 19:53:38 -07:00
|
|
|
const [debouncedValue, setDebouncedValue] = React.useState(
|
|
|
|
waitForFirstPause ? initialValue : value
|
|
|
|
);
|
2020-04-24 21:17:03 -07:00
|
|
|
|
|
|
|
React.useEffect(
|
|
|
|
() => {
|
|
|
|
// Update debounced value after delay
|
|
|
|
const handler = setTimeout(() => {
|
|
|
|
setDebouncedValue(value);
|
|
|
|
}, delay);
|
|
|
|
|
|
|
|
// Cancel the timeout if value changes (also on delay change or unmount)
|
|
|
|
// This is how we prevent debounced value from updating if value is changed ...
|
|
|
|
// .. within the delay period. Timeout gets cleared and restarted.
|
|
|
|
return () => {
|
|
|
|
clearTimeout(handler);
|
|
|
|
};
|
|
|
|
},
|
|
|
|
[value, delay] // Only re-call effect if value or delay changes
|
|
|
|
);
|
|
|
|
|
Fix infinite loop bug on initial outfit save
Oops, the sequence here was:
1) Save a new outfit
2) The debounced outfit state still contains id=null, which doesn't match the saved outfit, which triggers an auto-save
3) And now again, the debounced outfit state contains the _previous_ saved outfit ID, but the saved outfit has a _new_ ID, so we save the _previous_ outfit again
and back and forth forever.
Right, ok, simple change: if the saved outfit ID changes, reset the debounced state immediately, so it can't even be out of sync in the first place! (I also considered checking it in the condition, but I didn't really understand what the timing properties of being out of sync due to debouncing would be, and it seemed to not represent the reality I want.)
2021-04-28 16:08:31 -07:00
|
|
|
// The `forceReset` option helps us decide whether to set the value
|
|
|
|
// immediately! We'll update it in an effect for consistency and clarity, but
|
|
|
|
// also return it immediately rather than wait a tick.
|
|
|
|
const shouldForceReset = forceReset && forceReset(debouncedValue, value);
|
2021-01-21 16:03:14 -08:00
|
|
|
React.useEffect(() => {
|
Fix infinite loop bug on initial outfit save
Oops, the sequence here was:
1) Save a new outfit
2) The debounced outfit state still contains id=null, which doesn't match the saved outfit, which triggers an auto-save
3) And now again, the debounced outfit state contains the _previous_ saved outfit ID, but the saved outfit has a _new_ ID, so we save the _previous_ outfit again
and back and forth forever.
Right, ok, simple change: if the saved outfit ID changes, reset the debounced state immediately, so it can't even be out of sync in the first place! (I also considered checking it in the condition, but I didn't really understand what the timing properties of being out of sync due to debouncing would be, and it seemed to not represent the reality I want.)
2021-04-28 16:08:31 -07:00
|
|
|
if (shouldForceReset) {
|
2021-01-21 16:03:14 -08:00
|
|
|
setDebouncedValue(value);
|
|
|
|
}
|
Fix infinite loop bug on initial outfit save
Oops, the sequence here was:
1) Save a new outfit
2) The debounced outfit state still contains id=null, which doesn't match the saved outfit, which triggers an auto-save
3) And now again, the debounced outfit state contains the _previous_ saved outfit ID, but the saved outfit has a _new_ ID, so we save the _previous_ outfit again
and back and forth forever.
Right, ok, simple change: if the saved outfit ID changes, reset the debounced state immediately, so it can't even be out of sync in the first place! (I also considered checking it in the condition, but I didn't really understand what the timing properties of being out of sync due to debouncing would be, and it seemed to not represent the reality I want.)
2021-04-28 16:08:31 -07:00
|
|
|
}, [shouldForceReset, value]);
|
2021-01-21 16:03:14 -08:00
|
|
|
|
Fix infinite loop bug on initial outfit save
Oops, the sequence here was:
1) Save a new outfit
2) The debounced outfit state still contains id=null, which doesn't match the saved outfit, which triggers an auto-save
3) And now again, the debounced outfit state contains the _previous_ saved outfit ID, but the saved outfit has a _new_ ID, so we save the _previous_ outfit again
and back and forth forever.
Right, ok, simple change: if the saved outfit ID changes, reset the debounced state immediately, so it can't even be out of sync in the first place! (I also considered checking it in the condition, but I didn't really understand what the timing properties of being out of sync due to debouncing would be, and it seemed to not represent the reality I want.)
2021-04-28 16:08:31 -07:00
|
|
|
return shouldForceReset ? value : debouncedValue;
|
2020-04-24 21:17:03 -07:00
|
|
|
}
|
2020-05-17 23:26:00 -07:00
|
|
|
|
2020-05-17 23:44:33 -07:00
|
|
|
/**
|
|
|
|
* useFetch uses `fetch` to fetch the given URL, and returns the request state.
|
|
|
|
*
|
|
|
|
* Our limited API is designed to match the `use-http` library!
|
|
|
|
*/
|
Improve item page perf by caching valids in client
Okay, so getting the initial render down time for these faces is annoying, though I might come back to it…
But actually, the _worst_ part isn't the _initial_ render, which just kinda gets processed as part of the page navigation, right?
The _worst_ part is that we render it slowly _twice_: once on page load, as we send the `useAllValidPetPoses` fetch request; and then again when the fetch request ~instantly comes back from the network cache.
The fact that this requires a double-render, instead of just rendering with the cached valids data in the first place (like how our GraphQL client does), causes a second and highly-visible render of a slow-to-render UI!
So, here we update `useAllValidPetPoses` to cache its response in JS memory, similar in principle to how Apollo Client does. That way, we can return the valids instantly on the first render, if you already loaded them from the homepage or the wardrobe page or another item page!
2021-06-11 07:37:49 -07:00
|
|
|
export function useFetch(url, { responseType, skip, ...fetchOptions }) {
|
2020-05-17 23:44:33 -07:00
|
|
|
// Just trying to be clear about what you'll get back ^_^` If we want to
|
|
|
|
// fetch non-binary data later, extend this and get something else from res!
|
|
|
|
if (responseType !== "arrayBuffer") {
|
|
|
|
throw new Error(`unsupported responseType ${responseType}`);
|
|
|
|
}
|
|
|
|
|
2021-06-11 08:31:01 -07:00
|
|
|
const [response, setResponse] = React.useState({
|
|
|
|
loading: skip ? false : true,
|
|
|
|
error: null,
|
|
|
|
data: null,
|
|
|
|
});
|
2020-05-17 23:44:33 -07:00
|
|
|
|
2021-04-23 11:48:38 -07:00
|
|
|
// We expect this to be a simple object, so this helps us only re-send the
|
|
|
|
// fetch when the options have actually changed, rather than e.g. a new copy
|
|
|
|
// of an identical object!
|
|
|
|
const fetchOptionsAsJson = JSON.stringify(fetchOptions);
|
|
|
|
|
2020-05-17 23:44:33 -07:00
|
|
|
React.useEffect(() => {
|
Improve item page perf by caching valids in client
Okay, so getting the initial render down time for these faces is annoying, though I might come back to it…
But actually, the _worst_ part isn't the _initial_ render, which just kinda gets processed as part of the page navigation, right?
The _worst_ part is that we render it slowly _twice_: once on page load, as we send the `useAllValidPetPoses` fetch request; and then again when the fetch request ~instantly comes back from the network cache.
The fact that this requires a double-render, instead of just rendering with the cached valids data in the first place (like how our GraphQL client does), causes a second and highly-visible render of a slow-to-render UI!
So, here we update `useAllValidPetPoses` to cache its response in JS memory, similar in principle to how Apollo Client does. That way, we can return the valids instantly on the first render, if you already loaded them from the homepage or the wardrobe page or another item page!
2021-06-11 07:37:49 -07:00
|
|
|
if (skip) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-05-17 23:44:33 -07:00
|
|
|
let canceled = false;
|
|
|
|
|
2021-04-23 11:48:38 -07:00
|
|
|
fetch(url, JSON.parse(fetchOptionsAsJson))
|
2020-05-17 23:44:33 -07:00
|
|
|
.then(async (res) => {
|
|
|
|
if (canceled) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
const arrayBuffer = await res.arrayBuffer();
|
2021-06-11 08:31:01 -07:00
|
|
|
setResponse({ loading: false, error: null, data: arrayBuffer });
|
2020-05-17 23:44:33 -07:00
|
|
|
})
|
|
|
|
.catch((error) => {
|
|
|
|
if (canceled) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-06-11 08:31:01 -07:00
|
|
|
setResponse({ loading: false, error, data: null });
|
2020-05-17 23:44:33 -07:00
|
|
|
});
|
|
|
|
|
|
|
|
return () => {
|
|
|
|
canceled = true;
|
|
|
|
};
|
Improve item page perf by caching valids in client
Okay, so getting the initial render down time for these faces is annoying, though I might come back to it…
But actually, the _worst_ part isn't the _initial_ render, which just kinda gets processed as part of the page navigation, right?
The _worst_ part is that we render it slowly _twice_: once on page load, as we send the `useAllValidPetPoses` fetch request; and then again when the fetch request ~instantly comes back from the network cache.
The fact that this requires a double-render, instead of just rendering with the cached valids data in the first place (like how our GraphQL client does), causes a second and highly-visible render of a slow-to-render UI!
So, here we update `useAllValidPetPoses` to cache its response in JS memory, similar in principle to how Apollo Client does. That way, we can return the valids instantly on the first render, if you already loaded them from the homepage or the wardrobe page or another item page!
2021-06-11 07:37:49 -07:00
|
|
|
}, [skip, url, fetchOptionsAsJson]);
|
2020-05-17 23:44:33 -07:00
|
|
|
|
2021-06-11 08:31:01 -07:00
|
|
|
return response;
|
2020-05-17 23:44:33 -07:00
|
|
|
}
|
2020-08-28 22:58:39 -07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* useLocalStorage is like React.useState, but it persists the value in the
|
|
|
|
* device's `localStorage`, so it comes back even after reloading the page.
|
|
|
|
*
|
|
|
|
* Adapted from https://usehooks.com/useLocalStorage/.
|
|
|
|
*/
|
2020-09-22 05:39:48 -07:00
|
|
|
let storageListeners = [];
|
2020-08-28 22:58:39 -07:00
|
|
|
export function useLocalStorage(key, initialValue) {
|
2020-09-22 05:39:48 -07:00
|
|
|
const loadValue = React.useCallback(() => {
|
2022-09-14 22:35:33 -07:00
|
|
|
if (typeof localStorage === "undefined") {
|
|
|
|
return initialValue;
|
|
|
|
}
|
2020-08-28 22:58:39 -07:00
|
|
|
try {
|
2022-09-14 22:35:33 -07:00
|
|
|
const item = localStorage.getItem(key);
|
2020-08-28 22:58:39 -07:00
|
|
|
return item ? JSON.parse(item) : initialValue;
|
|
|
|
} catch (error) {
|
2021-05-03 15:01:49 -07:00
|
|
|
console.error(error);
|
2020-08-28 22:58:39 -07:00
|
|
|
return initialValue;
|
|
|
|
}
|
2020-09-22 05:39:48 -07:00
|
|
|
}, [key, initialValue]);
|
|
|
|
|
|
|
|
const [storedValue, setStoredValue] = React.useState(loadValue);
|
2020-08-28 22:58:39 -07:00
|
|
|
|
2021-06-11 06:45:11 -07:00
|
|
|
const setValue = React.useCallback(
|
|
|
|
(value) => {
|
|
|
|
try {
|
|
|
|
setStoredValue(value);
|
|
|
|
window.localStorage.setItem(key, JSON.stringify(value));
|
|
|
|
storageListeners.forEach((l) => l());
|
|
|
|
} catch (error) {
|
|
|
|
console.error(error);
|
|
|
|
}
|
|
|
|
},
|
|
|
|
[key]
|
|
|
|
);
|
2020-08-28 22:58:39 -07:00
|
|
|
|
2020-09-22 05:39:48 -07:00
|
|
|
const reloadValue = React.useCallback(() => {
|
|
|
|
setStoredValue(loadValue());
|
|
|
|
}, [loadValue, setStoredValue]);
|
|
|
|
|
|
|
|
// Listen for changes elsewhere on the page, and update here too!
|
|
|
|
React.useEffect(() => {
|
|
|
|
storageListeners.push(reloadValue);
|
|
|
|
return () => {
|
|
|
|
storageListeners = storageListeners.filter((l) => l !== reloadValue);
|
|
|
|
};
|
|
|
|
}, [reloadValue]);
|
|
|
|
|
|
|
|
// Listen for changes in other tabs, and update here too! (This does not
|
|
|
|
// catch same-page updates!)
|
|
|
|
React.useEffect(() => {
|
|
|
|
window.addEventListener("storage", reloadValue);
|
|
|
|
return () => window.removeEventListener("storage", reloadValue);
|
|
|
|
}, [reloadValue]);
|
|
|
|
|
2020-08-28 22:58:39 -07:00
|
|
|
return [storedValue, setValue];
|
|
|
|
}
|
fix Download button to use better caching
So I broke the Download button when we switched to impress-2020.openneo.net, and I forgot to update the Amazon S3 config.
But in addition to that, I'm making some code changes here, to make downloads faster: we now use exactly the same URL and crossOrigin configuration between the <img> tag on the page, and the image that the Download button requests, which ensures that it can use the cached copy instead of loading new stuff. (There were two main cases: 1. it always loaded the PNGs instead of the SVG, which doesn't matter for quality if we're rendering a 600x600 bitmap anyway, but is good caching, and 2. send `crossOrigin` on the <img> tag, which isn't necessary there, but is necessary for Download, and having them match means we can use the cached copy.)
2020-10-10 01:19:59 -07:00
|
|
|
|
2021-06-20 10:54:03 -07:00
|
|
|
export function loadImage(rawSrc, { crossOrigin = null } = {}) {
|
|
|
|
const src = safeImageUrl(rawSrc, { crossOrigin });
|
fix Download button to use better caching
So I broke the Download button when we switched to impress-2020.openneo.net, and I forgot to update the Amazon S3 config.
But in addition to that, I'm making some code changes here, to make downloads faster: we now use exactly the same URL and crossOrigin configuration between the <img> tag on the page, and the image that the Download button requests, which ensures that it can use the cached copy instead of loading new stuff. (There were two main cases: 1. it always loaded the PNGs instead of the SVG, which doesn't matter for quality if we're rendering a 600x600 bitmap anyway, but is good caching, and 2. send `crossOrigin` on the <img> tag, which isn't necessary there, but is necessary for Download, and having them match means we can use the cached copy.)
2020-10-10 01:19:59 -07:00
|
|
|
const image = new Image();
|
2021-06-21 09:34:05 -07:00
|
|
|
let canceled = false;
|
2021-06-26 12:04:40 -07:00
|
|
|
let resolved = false;
|
|
|
|
|
fix Download button to use better caching
So I broke the Download button when we switched to impress-2020.openneo.net, and I forgot to update the Amazon S3 config.
But in addition to that, I'm making some code changes here, to make downloads faster: we now use exactly the same URL and crossOrigin configuration between the <img> tag on the page, and the image that the Download button requests, which ensures that it can use the cached copy instead of loading new stuff. (There were two main cases: 1. it always loaded the PNGs instead of the SVG, which doesn't matter for quality if we're rendering a 600x600 bitmap anyway, but is good caching, and 2. send `crossOrigin` on the <img> tag, which isn't necessary there, but is necessary for Download, and having them match means we can use the cached copy.)
2020-10-10 01:19:59 -07:00
|
|
|
const promise = new Promise((resolve, reject) => {
|
2021-06-21 09:34:05 -07:00
|
|
|
image.onload = () => {
|
|
|
|
if (canceled) return;
|
2021-06-26 12:04:40 -07:00
|
|
|
resolved = true;
|
2021-06-21 09:34:05 -07:00
|
|
|
resolve(image);
|
|
|
|
};
|
|
|
|
image.onerror = () => {
|
|
|
|
if (canceled) return;
|
Clearer errors when image download fails
Two fixes in here, for when image downloads fail!
1) Actually catch the error, and show UI feedback
2) Throw it as an actual exception, so the console message will have a stack trace
Additionally, debugging this was a bit trickier than normal, because I didn't fully understand that the image `onerror` argument is an error _event_, not an Error object. So, Sentry captured the uncaught promise rejection, but it didn't have trace information, because it wasn't an Error. Whereas now, if I forget to catch `loadImage` calls in the future, we'll get a real trace! both in the console for debugging, and in Sentry if it makes it to prod :)
2021-01-17 04:42:35 -08:00
|
|
|
reject(new Error(`Failed to load image: ${JSON.stringify(src)}`));
|
2021-06-21 09:34:05 -07:00
|
|
|
};
|
fix Download button to use better caching
So I broke the Download button when we switched to impress-2020.openneo.net, and I forgot to update the Amazon S3 config.
But in addition to that, I'm making some code changes here, to make downloads faster: we now use exactly the same URL and crossOrigin configuration between the <img> tag on the page, and the image that the Download button requests, which ensures that it can use the cached copy instead of loading new stuff. (There were two main cases: 1. it always loaded the PNGs instead of the SVG, which doesn't matter for quality if we're rendering a 600x600 bitmap anyway, but is good caching, and 2. send `crossOrigin` on the <img> tag, which isn't necessary there, but is necessary for Download, and having them match means we can use the cached copy.)
2020-10-10 01:19:59 -07:00
|
|
|
if (crossOrigin) {
|
|
|
|
image.crossOrigin = crossOrigin;
|
|
|
|
}
|
|
|
|
image.src = src;
|
|
|
|
});
|
2021-06-26 12:04:40 -07:00
|
|
|
|
fix Download button to use better caching
So I broke the Download button when we switched to impress-2020.openneo.net, and I forgot to update the Amazon S3 config.
But in addition to that, I'm making some code changes here, to make downloads faster: we now use exactly the same URL and crossOrigin configuration between the <img> tag on the page, and the image that the Download button requests, which ensures that it can use the cached copy instead of loading new stuff. (There were two main cases: 1. it always loaded the PNGs instead of the SVG, which doesn't matter for quality if we're rendering a 600x600 bitmap anyway, but is good caching, and 2. send `crossOrigin` on the <img> tag, which isn't necessary there, but is necessary for Download, and having them match means we can use the cached copy.)
2020-10-10 01:19:59 -07:00
|
|
|
promise.cancel = () => {
|
2021-06-26 12:04:40 -07:00
|
|
|
// NOTE: To keep `cancel` a safe and unsurprising call, we don't cancel
|
|
|
|
// resolved images. That's because our approach to cancelation
|
|
|
|
// mutates the Image object we already returned, which could be
|
|
|
|
// surprising if the caller is using the Image and expected the
|
|
|
|
// `cancel` call to only cancel any in-flight network requests.
|
|
|
|
// (e.g. we cancel a DTI movie when it unloads from the page, but
|
|
|
|
// it might stick around in the movie cache, and we want those images
|
|
|
|
// to still work!)
|
|
|
|
if (resolved) return;
|
fix Download button to use better caching
So I broke the Download button when we switched to impress-2020.openneo.net, and I forgot to update the Amazon S3 config.
But in addition to that, I'm making some code changes here, to make downloads faster: we now use exactly the same URL and crossOrigin configuration between the <img> tag on the page, and the image that the Download button requests, which ensures that it can use the cached copy instead of loading new stuff. (There were two main cases: 1. it always loaded the PNGs instead of the SVG, which doesn't matter for quality if we're rendering a 600x600 bitmap anyway, but is good caching, and 2. send `crossOrigin` on the <img> tag, which isn't necessary there, but is necessary for Download, and having them match means we can use the cached copy.)
2020-10-10 01:19:59 -07:00
|
|
|
image.src = "";
|
2021-06-21 09:34:05 -07:00
|
|
|
canceled = true;
|
fix Download button to use better caching
So I broke the Download button when we switched to impress-2020.openneo.net, and I forgot to update the Amazon S3 config.
But in addition to that, I'm making some code changes here, to make downloads faster: we now use exactly the same URL and crossOrigin configuration between the <img> tag on the page, and the image that the Download button requests, which ensures that it can use the cached copy instead of loading new stuff. (There were two main cases: 1. it always loaded the PNGs instead of the SVG, which doesn't matter for quality if we're rendering a 600x600 bitmap anyway, but is good caching, and 2. send `crossOrigin` on the <img> tag, which isn't necessary there, but is necessary for Download, and having them match means we can use the cached copy.)
2020-10-10 01:19:59 -07:00
|
|
|
};
|
2021-06-26 12:04:40 -07:00
|
|
|
|
fix Download button to use better caching
So I broke the Download button when we switched to impress-2020.openneo.net, and I forgot to update the Amazon S3 config.
But in addition to that, I'm making some code changes here, to make downloads faster: we now use exactly the same URL and crossOrigin configuration between the <img> tag on the page, and the image that the Download button requests, which ensures that it can use the cached copy instead of loading new stuff. (There were two main cases: 1. it always loaded the PNGs instead of the SVG, which doesn't matter for quality if we're rendering a 600x600 bitmap anyway, but is good caching, and 2. send `crossOrigin` on the <img> tag, which isn't necessary there, but is necessary for Download, and having them match means we can use the cached copy.)
2020-10-10 01:19:59 -07:00
|
|
|
return promise;
|
|
|
|
}
|
2021-01-21 14:27:05 -08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* loadable is a wrapper for `@loadable/component`, with extra error handling.
|
|
|
|
* Loading the page will often fail if you keep a session open during a deploy,
|
|
|
|
* because Vercel doesn't keep old JS chunks on the CDN. Recover by reloading!
|
|
|
|
*/
|
|
|
|
export function loadable(load, options) {
|
|
|
|
return loadableLibrary(
|
|
|
|
() =>
|
|
|
|
load().catch((e) => {
|
Fix remaining chunk error noise
I've been getting more Sentry errors about JS chunk errors after deploys, and finally looked into it!
Turns out that, our try/catch handling was working great, and the page was reloading correctly for users as expected. But in these scenarios we would _also_ throw and log two uncaught errors!
The first is that, because we're a single-page app, unrecognized routes fall back to the index.html by default (to power our custom client-side routes, like /outfits/123 etc). So this meant that missing JS files, which _should_ be returning a 404, were instead returning 200 OK and an HTML file, which failed to parse. (And running the script isn't included in the catchable part of the `import` promise!)
Now, in our `vercel.json` config, we catch those paths specifically and 404 them. (The exact choice of path is important: on dev, all these routes run _before_ the dev server, which is responsible for serving the static files - but dev doesn't include hashes in the JS file names, so this 404 route only matches built prod JS files, not local dev JS files.)
The second is that we failed to return anything to `@loadable/component` in the error case, so it would try to render `undefined` as if it were a component class. Now, we return a trivial component class that returns null!
2021-01-26 11:43:27 -08:00
|
|
|
console.error("Error loading page, reloading:", e);
|
2021-01-21 14:27:05 -08:00
|
|
|
window.location.reload();
|
Fix remaining chunk error noise
I've been getting more Sentry errors about JS chunk errors after deploys, and finally looked into it!
Turns out that, our try/catch handling was working great, and the page was reloading correctly for users as expected. But in these scenarios we would _also_ throw and log two uncaught errors!
The first is that, because we're a single-page app, unrecognized routes fall back to the index.html by default (to power our custom client-side routes, like /outfits/123 etc). So this meant that missing JS files, which _should_ be returning a 404, were instead returning 200 OK and an HTML file, which failed to parse. (And running the script isn't included in the catchable part of the `import` promise!)
Now, in our `vercel.json` config, we catch those paths specifically and 404 them. (The exact choice of path is important: on dev, all these routes run _before_ the dev server, which is responsible for serving the static files - but dev doesn't include hashes in the JS file names, so this 404 route only matches built prod JS files, not local dev JS files.)
The second is that we failed to return anything to `@loadable/component` in the error case, so it would try to render `undefined` as if it were a component class. Now, we return a trivial component class that returns null!
2021-01-26 11:43:27 -08:00
|
|
|
// Return a component that renders nothing, while we reload!
|
|
|
|
return () => null;
|
2021-01-21 14:27:05 -08:00
|
|
|
}),
|
|
|
|
options
|
|
|
|
);
|
|
|
|
}
|
2021-01-22 14:12:07 -08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* logAndCapture will print an error to the console, and send it to Sentry.
|
|
|
|
*
|
|
|
|
* This is useful when there's a graceful recovery path, but it's still a
|
|
|
|
* genuinely unexpected error worth logging.
|
|
|
|
*/
|
|
|
|
export function logAndCapture(e) {
|
|
|
|
console.error(e);
|
|
|
|
Sentry.captureException(e);
|
|
|
|
}
|
2021-05-05 00:22:28 -07:00
|
|
|
|
2022-08-15 20:23:17 -07:00
|
|
|
export function getGraphQLErrorMessage(error) {
|
2021-06-12 03:19:09 -07:00
|
|
|
// If this is a GraphQL Bad Request error, show the message of the first
|
|
|
|
// error the server returned. Otherwise, just use the normal error message!
|
2022-08-15 20:23:17 -07:00
|
|
|
return (
|
|
|
|
error?.networkError?.result?.errors?.[0]?.message || error?.message || null
|
|
|
|
);
|
|
|
|
}
|
2021-06-12 03:19:09 -07:00
|
|
|
|
2022-08-15 20:23:17 -07:00
|
|
|
export function MajorErrorMessage({ error = null, variant = "unexpected" }) {
|
2021-06-12 03:19:09 -07:00
|
|
|
// Log the detailed error to the console, so we can have a good debug
|
|
|
|
// experience without the parent worrying about it!
|
|
|
|
React.useEffect(() => {
|
2021-06-12 03:25:01 -07:00
|
|
|
if (error) {
|
|
|
|
console.error(error);
|
|
|
|
}
|
2021-06-12 03:19:09 -07:00
|
|
|
}, [error]);
|
|
|
|
|
2021-05-05 00:22:28 -07:00
|
|
|
return (
|
|
|
|
<Flex justify="center" marginTop="8">
|
|
|
|
<Grid
|
|
|
|
templateAreas='"icon title" "icon description" "icon details"'
|
2021-05-25 03:54:14 -07:00
|
|
|
templateColumns="auto minmax(0, 1fr)"
|
2021-05-05 00:22:28 -07:00
|
|
|
maxWidth="500px"
|
|
|
|
marginX="8"
|
|
|
|
columnGap="4"
|
|
|
|
>
|
|
|
|
<Box gridArea="icon" marginTop="2">
|
|
|
|
<Box
|
|
|
|
borderRadius="full"
|
|
|
|
boxShadow="md"
|
2021-11-02 00:50:39 -07:00
|
|
|
overflow="hidden"
|
2021-05-05 00:22:28 -07:00
|
|
|
width="100px"
|
|
|
|
height="100px"
|
2021-11-02 00:50:39 -07:00
|
|
|
>
|
|
|
|
<NextImage
|
|
|
|
src={ErrorGrundoImg}
|
|
|
|
alt="Distressed Grundo programmer"
|
|
|
|
width={100}
|
|
|
|
height={100}
|
|
|
|
layout="fixed"
|
|
|
|
/>
|
|
|
|
</Box>
|
2021-05-05 00:22:28 -07:00
|
|
|
</Box>
|
|
|
|
<Box gridArea="title" fontSize="lg" marginBottom="1">
|
2021-06-12 03:19:09 -07:00
|
|
|
{variant === "unexpected" && <>Ah dang, I broke it 😖</>}
|
|
|
|
{variant === "network" && <>Oops, it didn't work, sorry 😖</>}
|
2021-06-12 03:25:01 -07:00
|
|
|
{variant === "not-found" && <>Oops, page not found 😖</>}
|
2021-05-05 00:22:28 -07:00
|
|
|
</Box>
|
|
|
|
<Box gridArea="description" marginBottom="2">
|
2021-06-12 03:19:09 -07:00
|
|
|
{variant === "unexpected" && (
|
|
|
|
<>
|
|
|
|
There was an error displaying this page. I'll get info about it
|
|
|
|
automatically, but you can tell me more at{" "}
|
|
|
|
<Link href="mailto:matchu@openneo.net" color="green.400">
|
|
|
|
matchu@openneo.net
|
|
|
|
</Link>
|
|
|
|
!
|
|
|
|
</>
|
|
|
|
)}
|
|
|
|
{variant === "network" && (
|
|
|
|
<>
|
|
|
|
There was an error displaying this page. Check your internet
|
|
|
|
connection and try again—and if you keep having trouble, please
|
|
|
|
tell me more at{" "}
|
|
|
|
<Link href="mailto:matchu@openneo.net" color="green.400">
|
|
|
|
matchu@openneo.net
|
|
|
|
</Link>
|
|
|
|
!
|
|
|
|
</>
|
|
|
|
)}
|
2021-06-12 03:25:01 -07:00
|
|
|
{variant === "not-found" && (
|
|
|
|
<>
|
|
|
|
We couldn't find this page. Maybe it's been deleted? Check the URL
|
|
|
|
and try again—and if you keep having trouble, please tell me more
|
|
|
|
at{" "}
|
|
|
|
<Link href="mailto:matchu@openneo.net" color="green.400">
|
|
|
|
matchu@openneo.net
|
|
|
|
</Link>
|
|
|
|
!
|
|
|
|
</>
|
|
|
|
)}
|
2021-05-05 00:22:28 -07:00
|
|
|
</Box>
|
2022-09-14 17:41:25 -07:00
|
|
|
{error && (
|
2021-06-12 03:25:01 -07:00
|
|
|
<Box gridArea="details" fontSize="xs" opacity="0.8">
|
|
|
|
<WarningIcon
|
|
|
|
marginRight="1.5"
|
|
|
|
marginTop="-2px"
|
|
|
|
aria-label="Error message"
|
|
|
|
/>
|
2022-09-14 17:41:25 -07:00
|
|
|
"{getGraphQLErrorMessage(error)}"
|
2021-06-12 03:25:01 -07:00
|
|
|
</Box>
|
|
|
|
)}
|
2021-05-05 00:22:28 -07:00
|
|
|
</Grid>
|
|
|
|
</Flex>
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
export function TestErrorSender() {
|
|
|
|
React.useEffect(() => {
|
|
|
|
if (window.location.href.includes("send-test-error-for-sentry")) {
|
|
|
|
throw new Error("Test error for Sentry");
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
return null;
|
|
|
|
}
|