finnow/finnow-api/source/api_mapping.d

279 lines
9.4 KiB
D

module api_mapping;
import handy_http_primitives;
import handy_http_handlers.path_handler;
import handy_http_handlers.filtered_handler;
import slf4d;
/**
* Defines the Finnow API mapping with a main PathHandler.
* Params:
* webOrigin = The origin to use when configuring CORS headers.
* Returns: The handler to plug into an HttpServer.
*/
HttpRequestHandler mapApiHandlers(string webOrigin) {
PathHandler publicHandler = new PathHandler();
PathHandler authenticatedHandler = new PathHandler();
// Public endpoints:
publicHandler.addMapping(HttpMethod.GET, "/api/status", HttpRequestHandler.of(&getStatus));
publicHandler.addMapping(HttpMethod.OPTIONS, "/**", HttpRequestHandler.of(&getOptions));
// Note: the download endpoint is public! We authenticate via token in query params instead of header here.
import attachment.api;
publicHandler.registerHandlers!(attachment.api);
import auth.api_public;
publicHandler.registerHandlers!(auth.api_public);
// Dev endpoint for sample data: REMOVE BEFORE DEPLOYING!!!
// h.map(HttpMethod.POST, "/sample-data", &sampleDataEndpoint);
// Authenticated endpoints:
import auth.api;
authenticatedHandler.registerHandlers!(auth.api);
import profile.api;
authenticatedHandler.registerHandlers!(profile.api);
import account.api;
authenticatedHandler.registerHandlers!(account.api);
import transaction.api;
authenticatedHandler.registerHandlers!(transaction.api);
import analytics.api;
authenticatedHandler.registerHandlers!(analytics.api);
import data_api;
authenticatedHandler.registerHandlers!(data_api);
// Protect all authenticated paths with a filter.
import auth.service : AuthenticationFilter;
HttpRequestFilter authenticationFilter = new AuthenticationFilter();
publicHandler.addMapping("/api/**", new FilteredHandler(
[authenticationFilter],
authenticatedHandler
));
// Build the main handler into a filter chain:
return new FilteredHandler(
[
cast(HttpRequestFilter) new CorsFilter(webOrigin),
cast(HttpRequestFilter) new ContentLengthFilter(),
cast(HttpRequestFilter) new TokenBucketRateLimitingFilter(10, 50),
cast(HttpRequestFilter) new ExceptionHandlingFilter()
],
publicHandler
);
}
private void getStatus(ref ServerHttpRequest request, ref ServerHttpResponse response) {
response.writeBodyString("online", ContentTypes.TEXT_PLAIN);
}
private void getOptions(ref ServerHttpRequest request, ref ServerHttpResponse response) {
// Do nothing, just return 200 OK.
}
private void sampleDataEndpoint(ref ServerHttpRequest request, ref ServerHttpResponse response) {
import slf4d;
import util.sample_data;
import core.thread;
Thread t = new Thread(() {
try {
generateSampleData();
} catch (Exception e) {
error("Error while generating sample data.", e);
}
});
t.start();
info("Started new thread to generate sample data.");
}
/**
* A filter that adds CORS response headers.
*/
private class CorsFilter : HttpRequestFilter {
private string webOrigin;
this(string webOrigin) {
this.webOrigin = webOrigin;
}
void doFilter(ref ServerHttpRequest request, ref ServerHttpResponse response, FilterChain filterChain) {
response.headers.add("Access-Control-Allow-Origin", webOrigin);
response.headers.add("Access-Control-Allow-Methods", "*");
response.headers.add("Access-Control-Allow-Headers", "Authorization, Content-Type");
response.headers.add("Access-Control-Expose-Headers", "Content-Disposition");
filterChain.doFilter(request, response);
}
}
/**
* A filter that rejects requests with a body that's too large, to avoid issues
* later with handling such large objects in memory.
*/
private class ContentLengthFilter : HttpRequestFilter {
const MAX_LENGTH = 1024 * 1024 * 20; // 2MB limit
void doFilter(ref ServerHttpRequest request, ref ServerHttpResponse response, FilterChain filterChain) {
if ("Content-Length" in request.headers) {
ulong contentLength = request.getHeaderAs!ulong("Content-Length");
if (contentLength > MAX_LENGTH) {
warnF!"Received request with content length of %d, larger than max allowed %d bytes."(
contentLength,
MAX_LENGTH
);
import std.conv;
response.status = HttpStatus.PAYLOAD_TOO_LARGE;
response.writeBodyString(
"Request body is too large. Must be at most " ~ MAX_LENGTH.to!string ~ " bytes."
);
return; // Don't propagate the filter.
}
}
filterChain.doFilter(request, response);
}
}
/**
* A filter that catches any exception thrown by the filter chain, and nicely
* formats the response status and message.
*/
private class ExceptionHandlingFilter : HttpRequestFilter {
void doFilter(ref ServerHttpRequest request, ref ServerHttpResponse response, FilterChain filterChain) {
try {
filterChain.doFilter(request, response);
} catch (HttpStatusException e) {
response.status = e.status;
response.writeBodyString(e.message.idup);
} catch (Exception e) {
error(e);
response.status = HttpStatus.INTERNAL_SERVER_ERROR;
response.writeBodyString("An error occurred: " ~ e.msg);
} catch (Throwable e) {
errorF!"A throwable was caught! %s %s"(e.msg, e.info);
response.status = HttpStatus.INTERNAL_SERVER_ERROR;
response.writeBodyString("An error occurred.");
throw e;
}
}
}
/**
* A filter that uses a shared token bucket to limit clients' requests. Each
* client's IP address is used as the identifier, and each client is given a
* maximum of N requests to make, and a rate at which that limit replenishes
* over time.
*/
private class TokenBucketRateLimitingFilter : HttpRequestFilter {
import std.datetime : Duration, Clock, SysTime, seconds;
import std.math : floor;
import std.algorithm : min;
import std.traits : Unqual;
private static struct TokenBucket {
/// The number of tokens in this bucket.
uint tokens;
/// The timestamp at which a token was last removed from this bucket.
SysTime lastRequest;
}
/// The internal set of token buckets, mapped to client addresses.
private shared TokenBucket[string] tokenBuckets;
/// The number of tokens that are added to each bucket, per second.
private const uint tokensPerSecond;
/// The maximum number of tokens that each bucket can hold.
private const uint maxTokens;
this(uint tokensPerSecond, uint maxTokens) {
this.tokensPerSecond = tokensPerSecond;
this.maxTokens = maxTokens;
}
void doFilter(ref ServerHttpRequest request, ref ServerHttpResponse response, FilterChain filterChain) {
string clientAddr = getClientId(request);
bool shouldBlockRequest = false;
synchronized {
const now = Clock.currTime();
TokenBucket* bucket = getOrCreateBucket(clientAddr, now);
incrementTokensForElapsedTime(bucket, now);
if (bucket.tokens < 1) {
shouldBlockRequest = true;
} else {
bucket.tokens--;
}
bucket.lastRequest = now;
clearOldBuckets();
}
if (shouldBlockRequest) {
infoF!"Rate-limiting client %s because they have made too many requests."(clientAddr);
response.status = HttpStatus.TOO_MANY_REQUESTS;
response.writeBodyString("You have made too many requests to the API.");
} else {
filterChain.doFilter(request, response);
}
}
/**
* Gets a string identifying the client who made the request.
* Params:
* req = The request.
* Returns: A string uniquely identifying the client.
*/
private string getClientId(in ServerHttpRequest req) {
import handy_http_transport.helpers : indexOf;
string clientAddr = req.clientAddress.toString();
auto portIdx = indexOf(clientAddr, ':');
if (portIdx != -1) {
clientAddr = clientAddr[0..portIdx];
}
return clientAddr;
}
/**
* Gets a pointer to a token bucket for a given client, creating it first
* if it doesn't exist yet.
* Params:
* clientAddr = The client's address.
* now = The current timestamp, used to initialize new buckets.
* Returns: A pointer to the token bucket in this filter's internal mapping.
*/
private TokenBucket* getOrCreateBucket(string clientAddr, SysTime now) {
TokenBucket[string] unsharedBuckets = cast(TokenBucket[string]) tokenBuckets;
TokenBucket* bucket = clientAddr in unsharedBuckets;
if (bucket is null) {
unsharedBuckets[clientAddr] = TokenBucket(maxTokens, now);
bucket = clientAddr in unsharedBuckets;
}
return bucket;
}
/**
* Increments the number of tokens in a client's bucket based on how much
* time has elapsed since the last time they made a request. This filter
* has a defined `tokensPerSecond`, as well as a `maxTokens`, so we know
* how long it takes for a bucket to fill up.
* Params:
* bucket = The bucket to fill with tokens.
* now = The current timestamp.
*/
private void incrementTokensForElapsedTime(TokenBucket* bucket, SysTime now) {
Duration timeSinceLastRequest = now - bucket.lastRequest;
const tokensAddedSinceLastRequest = floor((timeSinceLastRequest.total!"msecs") * (tokensPerSecond / 1000.0));
bucket.tokens = cast(uint) min(bucket.tokens + tokensAddedSinceLastRequest, maxTokens);
}
/**
* Removes any token buckets that haven't had a request in a while, and
* thus are full. This keeps our memory footprint smaller.
*/
private void clearOldBuckets() {
const Duration fillTime = seconds(maxTokens * tokensPerSecond);
TokenBucket[string] unsharedBuckets = cast(TokenBucket[string]) tokenBuckets;
foreach (id; unsharedBuckets.byKey()) {
TokenBucket bucket = unsharedBuckets[id];
const Duration timeSinceLastRequest = Clock.currTime() - bucket.lastRequest;
if (timeSinceLastRequest > fillTime) {
unsharedBuckets.remove(id);
}
}
}
}