2020-08-17 01:16:35 -07:00
|
|
|
const beeline = require("honeycomb-beeline")({
|
|
|
|
writeKey: process.env["HONEYCOMB_WRITE_KEY"],
|
|
|
|
dataset:
|
|
|
|
process.env["NODE_ENV"] === "production"
|
|
|
|
? "Dress to Impress (2020)"
|
|
|
|
: "Dress to Impress (2020, dev)",
|
|
|
|
serviceName: "impress-2020-gql-server",
|
|
|
|
});
|
|
|
|
|
2020-04-25 23:04:49 -07:00
|
|
|
const { ApolloServer } = require("../src/server/lib/apollo-server-vercel");
|
2020-04-22 13:03:32 -07:00
|
|
|
const { config } = require("../src/server");
|
|
|
|
|
|
|
|
const server = new ApolloServer(config);
|
2020-08-17 01:16:35 -07:00
|
|
|
const serverHandler = server.createHandler();
|
|
|
|
|
2021-05-13 01:13:21 -07:00
|
|
|
async function handle(req, res) {
|
2020-08-17 01:16:35 -07:00
|
|
|
await serverHandler(req, res);
|
|
|
|
|
|
|
|
// As a sneaky trick, we require the Honeycomb trace to finish before the
|
|
|
|
// request formally finishes. This... is technically a slowdown, I'm not sure
|
|
|
|
// how much of one. Hopefully not too much?
|
|
|
|
// https://vercel.com/docs/platform/limits#streaming-responses
|
|
|
|
await beeline.flush();
|
|
|
|
res.end();
|
2021-05-13 01:13:21 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
export default handle;
|