first commit

This commit is contained in:
sam 2025-03-13 18:14:21 +13:00
commit 5692aa6d1d
70 changed files with 13939 additions and 0 deletions

2
.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
lph-11
*.bin

30
convert/convert.odin Normal file
View file

@ -0,0 +1,30 @@
package convert
import "core:encoding/cbor"
import "core:encoding/json"
import "core:os"
import "core:strings"
Data :: struct {
data: [dynamic][]string,
}
main :: proc() {
bytes, _ := os.read_entire_file(os.args[1])
str := string(bytes)
str, _ = strings.to_lower(str)
data: Data
lines := strings.split_lines(str)
for &line in lines {
line = strings.trim_space(line)
if line == "" do continue
sp := strings.split(line, " ")
append(&data.data, sp)
}
out, _ := json.marshal(data)
os.write_entire_file(os.args[2], out)
}

View file

@ -0,0 +1,167 @@
package discord_interactions
import "core:crypto/ed25519"
import "core:encoding/hex"
import "core:encoding/json"
import "core:fmt"
import "core:io"
import "core:log"
import "core:net"
import "core:os"
import REST "../discord/rest"
import http "../odin-http"
import "../odin-http/client"
Interaction :: REST.Interaction
Payload :: REST.Payload
Webhook_Payload :: REST.Webhook_Payload
ReqResPair :: struct {
req: ^http.Request,
res: ^http.Response,
}
Config :: struct {
token: string,
port: int,
interaction_endpoint: string,
}
State :: struct {
config: Config,
application: REST.Application,
command_handlers: map[string]Command_Handler,
}
Command_Handler :: #type proc(interaction: Interaction) -> Payload
Error :: union {
os.Error,
json.Error,
json.Unmarshal_Error,
client.Error,
client.Body_Error,
}
DISCORD_API :: "https://discord.com/api/v10"
state: State
load_config :: proc(filename: string) -> Error {
config_bytes, config_read_err := os.read_entire_file_or_err("config.json")
if config_read_err != nil {
return config_read_err
}
json.unmarshal(config_bytes, &state.config) or_return
headers: http.Headers
http.headers_set(&headers, "Authorization", fmt.tprint("Bot", state.config.token))
request: client.Request
client.request_init(&request, .Get)
defer client.request_destroy(&request)
request.headers = headers
res, res_err := client.request(&request, DISCORD_API + "/applications/@me")
if res_err != nil {
return res_err
}
body, was_alloc, body_err := client.response_body(&res)
if body_err != nil {
return body_err
}
#partial switch v in body {
case client.Body_Plain:
json.unmarshal(transmute([]u8)v, &state.application) or_return
}
return nil
}
serve :: proc() -> net.Network_Error {
s: http.Server
http.server_shutdown_on_interrupt(&s)
router: http.Router
http.router_init(&router)
defer http.router_destroy(&router)
http.route_post(&router, state.config.interaction_endpoint, http.handler(interactions))
routed := http.router_handler(&router)
port := state.config.port == 0 ? 8080 : state.config.port
log.infof("Listening at http://{}:{}", net.address_to_string(net.IP4_Loopback), port)
http.listen_and_serve(&s, routed, net.Endpoint{address = net.IP4_Loopback, port = port}) or_return
return nil
}
register_command :: proc(command_name: string, handler: Command_Handler) {
state.command_handlers[command_name] = handler
}
@(private)
interactions :: proc(req: ^http.Request, res: ^http.Response) {
pair := new(ReqResPair)
pair.req = req
pair.res = res
http.headers_set_close(&res.headers)
http.body(req, -1, pair, proc(userdata: rawptr, body: http.Body, err: http.Body_Error) {
reqres := cast(^ReqResPair)userdata
req := reqres.req
res := reqres.res
interaction: Interaction
json.unmarshal(transmute([]u8)body, &interaction)
signature, signature_ok := hex.decode(
transmute([]u8)http.headers_get(req.headers, "X-Signature-Ed25519"),
)
if !signature_ok {
log.error("Failed to decode signature")
return
}
timestamp := http.headers_get(req.headers, "X-Signature-Timestamp")
ed25519_public_key: ed25519.Public_Key
public_key_bytes, public_key_ok := hex.decode(transmute([]u8)state.application.verify_key)
if !public_key_ok {
log.error("Failed to decode public key")
return
}
ed25519.public_key_set_bytes(&ed25519_public_key, public_key_bytes)
if !ed25519.verify(
&ed25519_public_key,
transmute([]u8)fmt.tprintf("{}{}", timestamp, body),
signature,
) {
http.respond_with_status(res, .Unauthorized)
return
}
switch interaction.type {
case 1:
if err := http.respond_json(res, Payload{type = 1}); err != nil {
log.error("Failed to marshal payload:", err)
}
return
case 2:
handler, found := state.command_handlers[interaction.data.name]
if found {
if err := http.respond_json(res, handler(interaction)); err != nil {
log.error("Failed to marshal payload:", err)
}
} else {
log.debug("Skipping unrecognized command:", interaction.data.name)
}
return
}
})
}

View file

@ -0,0 +1,36 @@
package discord
Payload :: struct {
type: int,
data: Message,
}
Webhook_Payload :: struct {
content: string,
}
Message :: struct {
content: string,
}
Interaction :: struct {
type: int,
data: Interaction_Data,
token: string,
}
Option :: struct {
name: string,
type: int,
value: string,
}
Interaction_Data :: struct {
name: string,
options: []Option,
}
Application :: struct {
verify_key: string,
id: string,
}

160
main.odin Normal file
View file

@ -0,0 +1,160 @@
package main
import "core:encoding/cbor"
import "core:encoding/json"
import "core:fmt"
import "core:log"
import "core:math/rand"
import "core:os"
import "core:strings"
import "core:thread"
import "core:time"
import di "discord-interactions"
import client "discord-interactions/odin-http/client"
PRODUCE_MARKOV :: #config(PRODUCE_MARKOV, false)
SEQUENCE_LENGTH :: #config(SEQUENCE_LENGTH, 5)
Word :: struct {
proceeding: map[string]int `json:"p"`,
}
Data :: struct {
data: [][]string,
}
Dist :: struct {
word: string,
sum: int,
}
process_words :: proc(previous_word, next_word: string, word_map: ^map[string]Word) {
word_entry, found := word_map[previous_word]
if !found {
word_entry = Word{}
}
word_entry.proceeding[next_word] += 1
word_map[previous_word] = word_entry
}
word_map: map[string]Word
try_find :: proc(input: []string) -> (word: Maybe(Word)) {
for i in max(0, len(input) - rand.int_max(SEQUENCE_LENGTH) - 1) ..< len(input) {
concat := strings.join(input[i:], " ")
found: bool
word, found = word_map[concat]
if found {
break
}
}
return
}
generate :: proc(input: string) -> string {
generated: [dynamic]string
append(&generated, input)
maybe_word := try_find(strings.split(input, " "))
if maybe_word == nil {
return "aint got nothing for that in the ol' markov chain"
}
word := maybe_word.?
loop: for {
dists: [dynamic]Dist
sum: int
for next_word, count in word.proceeding {
sum += count
append(&dists, Dist{word = next_word, sum = sum})
}
if sum <= 0 {
break
}
rand_val := rand.int_max(sum)
for dist in dists {
if rand_val < dist.sum {
append(&generated, dist.word)
maybe_word = try_find(generated[:])
if maybe_word == nil {
break loop
}
word = maybe_word.?
continue loop
}
}
}
return strings.join(generated[:], " ")
}
main :: proc() {
context.logger = log.create_console_logger(.Info)
di.load_config("config.json")
when PRODUCE_MARKOV {
bytes, _ := os.read_entire_file("data.json")
data: Data
if err := json.unmarshal(bytes, &data); err != nil {
log.error(err)
}
for words in data.data {
for i in 0 ..< len(words) {
for j in i + 1 ..< min(i + SEQUENCE_LENGTH, len(words)) {
process_words(strings.join(words[i:j], " "), words[j], &word_map)
}
}
}
out, _ := cbor.marshal(word_map)
os.write_entire_file("markov.cbor", out)
fmt.println("Produced markov file")
}
markov_bytes, ok := os.read_entire_file("markov.cbor")
if !ok {
fmt.eprintln("Failed to read markov cbor")
}
cbor.unmarshal(string(markov_bytes), &word_map)
delete(markov_bytes)
di.register_command("lph", proc(interaction: di.Interaction) -> di.Payload {
thread.create_and_start_with_poly_data(interaction, proc(interaction: di.Interaction) {
request: client.Request
client.request_init(&request)
defer client.request_destroy(&request)
client.with_json(
&request,
di.Webhook_Payload{content = generate(interaction.data.options[0].value)},
)
request.method = .Patch
res, err := client.request(
&request,
fmt.tprintf(
"{}/webhooks/{}/{}/messages/@original",
di.DISCORD_API,
di.state.application.id,
interaction.token,
),
)
if err != nil {
log.error("Failed to send request:", err)
return
}
})
return di.Payload{type = 5}
})
di.serve()
}

12
odin-http/.editorconfig Normal file
View file

@ -0,0 +1,12 @@
root = true
[*]
end_of_line = lf
insert_final_newline = true
indent_style = tab
indent_size = 4
trim_trailing_whitespace = true
[*.yml]
indent_style = space
indent_size = 2

36
odin-http/.github/workflows/ci.yml vendored Normal file
View file

@ -0,0 +1,36 @@
name: CI
on:
push:
workflow_dispatch:
schedule:
- cron: 0 20 * * *
env:
FORCE_COLOR: "1"
jobs:
check:
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest, windows-latest, macos-13] # macos-latest is ARM, 13 is Intel
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v4
- uses: laytan/setup-odin@v2
with:
token: ${{ secrets.GITHUB_TOKEN }}
release: false
llvm-version: 17
- name: Report
run: odin report
- name: Non-blocking IO test
run: odin test nbio
timeout-minutes: 1
- name: Run client example
run: odin run examples/client
timeout-minutes: 1
- name: Odin check
if: success() || failure()
run: odin check examples/complete -vet --strict-style && odin check examples/client -vet --strict-style
timeout-minutes: 1

50
odin-http/.github/workflows/docs.yml vendored Normal file
View file

@ -0,0 +1,50 @@
name: Deploy docs to GitHub pages
on:
push:
branches: [main]
workflow_dispatch:
env:
FORCE_COLOR: "1"
permissions:
contents: read
pages: write
id-token: write
concurrency:
group: "pages"
cancel-in-progress: true
jobs:
docs:
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
runs-on: ubuntu-latest
steps:
- uses: laytan/setup-odin@v2
with:
token: ${{ secrets.GITHUB_TOKEN }}
- name: Report
run: odin report
- name: Get commonmark
run: sudo apt-get install libcmark-dev
- name: Get and build Odin docs generator
run: |
cd /home/runner
git clone https://github.com/odin-lang/pkg.odin-lang.org odin-doc
cd odin-doc
# The /home/runner/odin directory is in the PATH so output it there.
odin build . -out:/home/runner/odin/odin-doc
cd /home/runner
- uses: actions/checkout@v4
- name: Generate documentation
run: ./docs/generate.sh
- uses: actions/configure-pages@v3
- uses: actions/upload-pages-artifact@v3
with:
path: ./docs/build
- uses: actions/deploy-pages@v4
id: deployment

116
odin-http/.github/workflows/openssl.yml vendored Normal file
View file

@ -0,0 +1,116 @@
name: OpenSSL
on:
push:
paths: [".github/workflows/openssl.yml"]
branches: ["main"]
workflow_dispatch:
schedule:
- cron: 0 20 * * *
env:
FORCE_COLOR: "1"
concurrency:
group: "openssl"
cancel-in-progress: true
jobs:
check-updates:
runs-on: windows-latest
steps:
- uses: actions/checkout@v4
- id: current-release
shell: bash
run: |
VERSION=$(cat openssl/.version)
echo "version=$VERSION" >> $GITHUB_OUTPUT
echo "current version is $VERSION"
- uses: actions/github-script@v7
id: latest-release
with:
script: |
const latestRelease = await github.rest.repos.getLatestRelease({
owner: 'openssl',
repo: 'openssl',
});
core.setOutput('version', latestRelease.data.tag_name);
const asset = latestRelease.data.assets.find(asset => asset.name.endsWith('.tar.gz'));
if (asset) {
core.setOutput('url', asset.browser_download_url);
core.setOutput('version', latestRelease.data.tag_name);
core.info('latest version is ' + latestRelease.data.tag_name);
} else {
core.setFailed('No .tar.gz asset found in the latest release.');
}
- name: update .version
if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}
shell: bash
run: |
echo "${{ steps.latest-release.outputs.version }}" > openssl/.version
- uses: ilammy/msvc-dev-cmd@0b201ec74fa43914dc39ae48a89fd1d8cb592756
if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}
- uses: ilammy/setup-nasm@13cbeb366c45c4379d3478cdcbadd8295feb5028
if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}
- name: download release
if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}
shell: bash
run: |
curl -L -o openssl.tar.gz ${{ steps.latest-release.outputs.url }}
file openssl.tar.gz
- name: unzip release
if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}
shell: bash
run: |
tar -xzf openssl.tar.gz
- name: configure
if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}
run: |
cd ${{ steps.latest-release.outputs.version }}
perl Configure no-legacy no-deprecated --release
- name: compile
if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}
run: |
cd ${{ steps.latest-release.outputs.version }}
nmake
- name: test
if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}
run: |
cd ${{ steps.latest-release.outputs.version }}
nmake test
- name: copy & clean
if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}
shell: bash
run: |
rm -rf openssl/includes/windows/*
cd ${{ steps.latest-release.outputs.version }}
dir
cp libcrypto.lib ../openssl/includes/windows
cp libssl.lib ../openssl/includes/windows
cp libcrypto_static.lib ../openssl/includes/windows
cp libssl_static.lib ../openssl/includes/windows
cd ..
rm -rf openssl.tar.gz
rm -rf ${{ steps.latest-release.outputs.version }}
- name: pr
if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}
uses: peter-evans/create-pull-request@c5a7806660adbe173f04e3e038b0ccdcd758773c
with:
title: |
Update bundled OpenSSL libraries to ${{ steps.latest-release.outputs.version }}
commit-message: |
openssl: update bundled libraries to ${{ steps.latest-release.outputs.version }}

12
odin-http/.gitignore vendored Normal file
View file

@ -0,0 +1,12 @@
*.bin
ols.json
opm
Taskfile.yml
*.exe
docs/build
# Example binaries.
minimal
complete
readme
routing

19
odin-http/LICENSE Normal file
View file

@ -0,0 +1,19 @@
Copyright (c) 2023 Laytan Laats
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

226
odin-http/README.md Normal file
View file

@ -0,0 +1,226 @@
# Odin HTTP
A HTTP/1.1 implementation for Odin purely written in Odin (besides SSL).
See generated package documentation at [odin-http.laytan.dev](https://odin-http.laytan.dev).
See below examples or the examples directory.
## Compatibility
This is beta software, confirmed to work in my own use cases but can certainly contain edge cases and bugs that I did not catch.
Please file issues for any bug or suggestion you encounter/have.
I am usually on a recent master version of Odin and commits will be made with new features if applicable, backwards compatibility or even
stable version compatibility is not currently a thing.
Because this is still heavily in development, I do not hesitate to push API changes at the moment, so beware.
The package has been tested to work with Ubuntu Linux (other "normal" distros should work), MacOS (m1 and intel), and Windows 64 bit.
Any other distributions or versions have not been tested and might not work.
## Dependencies
The *client* package depends on OpenSSL for making HTTPS requests.
This repository contains a copy of these libraries for ease of use on Windows.
For Linux, most distros come with OpenSSL, if not you can install it with a package manager, usually under `libssl3`.
## Performance
Some small benchmarks have been done in the comparisons directory.
My main priority in terms of performance is currently Linux (because most servers end up there in production).
Other targets are still made to be performant, but benchmarking etc. is mostly done on Linux.
## IO implementations
Although these implementation details are not exposed when using the package, these are the underlying kernel API's that are used.
- Windows: [IOCP (IO Completion Ports)](https://en.wikipedia.org/wiki/Input/output_completion_port)
- Linux: [io_uring](https://en.wikipedia.org/wiki/Io_uring)
- Darwin: [KQueue](https://en.wikipedia.org/wiki/Kqueue)
The IO part of this package can be used on its own for other types of applications, see the nbio directory for the documentation on that.
It has APIs for reading, writing, opening, closing, seeking files and accepting, connecting, sending, receiving and closing sockets, both UDP and TCP, fully cross-platform.
## Server example
```odin
package main
import "core:fmt"
import "core:log"
import "core:net"
import "core:time"
import http "../.." // Change to path of package.
main :: proc() {
context.logger = log.create_console_logger(.Info)
s: http.Server
// Register a graceful shutdown when the program receives a SIGINT signal.
http.server_shutdown_on_interrupt(&s)
// Set up routing
router: http.Router
http.router_init(&router)
defer http.router_destroy(&router)
// Routes are tried in order.
// Route matching is implemented using an implementation of Lua patterns, see the docs on them here:
// https://www.lua.org/pil/20.2.html
// They are very similar to regex patterns but a bit more limited, which makes them much easier to implement since Odin does not have a regex implementation.
// Matches /users followed by any word (alphanumeric) followed by /comments and then / with any number.
// The word is available as req.url_params[0], and the number as req.url_params[1].
http.route_get(&router, "/users/(%w+)/comments/(%d+)", http.handler(proc(req: ^http.Request, res: ^http.Response) {
http.respond_plain(res, fmt.tprintf("user %s, comment: %s", req.url_params[0], req.url_params[1]))
}))
http.route_get(&router, "/cookies", http.handler(cookies))
http.route_get(&router, "/api", http.handler(api))
http.route_get(&router, "/ping", http.handler(ping))
http.route_get(&router, "/index", http.handler(index))
// Matches every get request that did not match another route.
http.route_get(&router, "(.*)", http.handler(static))
http.route_post(&router, "/ping", http.handler(post_ping))
routed := http.router_handler(&router)
log.info("Listening on http://localhost:6969")
err := http.listen_and_serve(&s, routed, net.Endpoint{address = net.IP4_Loopback, port = 6969})
fmt.assertf(err == nil, "server stopped with error: %v", err)
}
cookies :: proc(req: ^http.Request, res: ^http.Response) {
append(
&res.cookies,
http.Cookie{
name = "Session",
value = "123",
expires_gmt = time.now(),
max_age_secs = 10,
http_only = true,
same_site = .Lax,
},
)
http.respond_plain(res, "Yo!")
}
api :: proc(req: ^http.Request, res: ^http.Response) {
if err := http.respond_json(res, req.line); err != nil {
log.errorf("could not respond with JSON: %s", err)
}
}
ping :: proc(req: ^http.Request, res: ^http.Response) {
http.respond_plain(res, "pong")
}
index :: proc(req: ^http.Request, res: ^http.Response) {
http.respond_file(res, "examples/complete/static/index.html")
}
static :: proc(req: ^http.Request, res: ^http.Response) {
http.respond_dir(res, "/", "examples/complete/static", req.url_params[0])
}
post_ping :: proc(req: ^http.Request, res: ^http.Response) {
http.body(req, len("ping"), res, proc(res: rawptr, body: http.Body, err: http.Body_Error) {
res := cast(^http.Response)res
if err != nil {
http.respond(res, http.body_error_status(err))
return
}
if body != "ping" {
http.respond(res, http.Status.Unprocessable_Content)
return
}
http.respond_plain(res, "pong")
})
}
```
## Client example
```odin
package main
import "core:fmt"
import "../../client"
main :: proc() {
get()
post()
}
// basic get request.
get :: proc() {
res, err := client.get("https://www.google.com/")
if err != nil {
fmt.printf("Request failed: %s", err)
return
}
defer client.response_destroy(&res)
fmt.printf("Status: %s\n", res.status)
fmt.printf("Headers: %v\n", res.headers)
fmt.printf("Cookies: %v\n", res.cookies)
body, allocation, berr := client.response_body(&res)
if berr != nil {
fmt.printf("Error retrieving response body: %s", berr)
return
}
defer client.body_destroy(body, allocation)
fmt.println(body)
}
Post_Body :: struct {
name: string,
message: string,
}
// POST request with JSON.
post :: proc() {
req: client.Request
client.request_init(&req, .Post)
defer client.request_destroy(&req)
pbody := Post_Body{"Laytan", "Hello, World!"}
if err := client.with_json(&req, pbody); err != nil {
fmt.printf("JSON error: %s", err)
return
}
res, err := client.request(&req, "https://webhook.site/YOUR-ID-HERE")
if err != nil {
fmt.printf("Request failed: %s", err)
return
}
defer client.response_destroy(&res)
fmt.printf("Status: %s\n", res.status)
fmt.printf("Headers: %v\n", res.headers)
fmt.printf("Cookies: %v\n", res.cookies)
body, allocation, berr := client.response_body(&res)
if berr != nil {
fmt.printf("Error retrieving response body: %s", berr)
return
}
defer client.body_destroy(body, allocation)
fmt.println(body)
}
```

262
odin-http/allocator.odin Normal file
View file

@ -0,0 +1,262 @@
#+private
#+build ignore
package http
// NOTE: currently not in use, had a strange crash I can't figure out.
import "core:container/queue"
import "core:log"
import "core:mem"
// Defaults, reassigned when server is set up.
initial_block_cap := mem.Kilobyte * 256
max_free_blocks_queued := 64
// A lean, growing, block based allocator.
//
// The first block is kept around after a `free_all` and only free'd using `allocator_destroy`,
// so it doesn't have to allocate it each time.
//
// Blocks start at the `initial_block_cap` (configurable) size and double in size after each new block.
//
// The last allocation is saved and can be freed with `free_with_size` or resized without
// taking up a whole new region in the block.
Allocator :: struct {
parent: mem.Allocator,
curr: ^Block,
cap: int,
last_alloc: rawptr,
}
Block :: struct {
prev: Maybe(^Block),
size: int,
total_size: int,
offset: int,
data: [0]byte,
}
allocator :: proc(a: ^Allocator) -> mem.Allocator {
return {
procedure = allocator_proc,
data = a,
}
}
allocator_init :: proc(a: ^Allocator, parent := context.allocator, loc := #caller_location) -> mem.Allocator_Error {
a.parent = parent
a.cap = initial_block_cap
a.curr = allocator_new_block(a, 0, 0, loc) or_return
return nil
}
allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode,
size, alignment: int,
old_memory: rawptr, old_size: int,
loc := #caller_location) -> (bytes: []byte, err: mem.Allocator_Error) {
a := (^Allocator)(allocator_data)
switch mode {
case .Alloc:
return allocator_alloc_zerod(a, size, alignment, loc)
case .Alloc_Non_Zeroed:
return allocator_alloc_non_zerod(a, size, alignment, loc)
case .Free:
// We can only free if this was the last allocation done.
if old_memory == a.last_alloc {
a.curr.offset -= old_size
a.last_alloc = nil
return nil, nil
}
return nil, .Mode_Not_Implemented
case .Free_All:
allocator_free_all(a, loc)
return
case .Resize, .Resize_Non_Zeroed:
// Shrink, if it was the last alloc also decrease from block offset.
if old_size >= size {
if a.last_alloc == old_memory {
a.curr.offset -= old_size - size
}
return mem.byte_slice(old_memory, size), nil
}
// If this was the last alloc, and we have space in it's block, keep same spot and just
// increase the offset.
if a.last_alloc == old_memory {
needed := size - old_size
got := a.curr.size - a.curr.offset
if needed <= got {
a.curr.offset += needed
return mem.byte_slice(old_memory, size), nil
}
}
// Resize with older than last allocation or doesn't fit in block, need to allocate new mem.
bytes = allocator_alloc_non_zerod(a, size, alignment, loc) or_return
copy(bytes, mem.byte_slice(old_memory, old_size))
return
case .Query_Features:
set := (^mem.Allocator_Mode_Set)(old_memory)
if set != nil {
set^ = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Query_Features}
}
return nil, nil
case .Query_Info:
return nil, .Mode_Not_Implemented
case: unreachable()
}
}
allocator_new_block :: proc(a: ^Allocator, min_size: int, alignment: int, loc := #caller_location) -> (b: ^Block, err: mem.Allocator_Error) {
base_offset := max(alignment, size_of(Block))
total := max(a.cap, min_size + base_offset)
a.cap *= 2
assert_has_td(loc)
if bucket, has_bucket := &td.free_temp_blocks[total]; has_bucket {
if block, has_block := queue.pop_back_safe(bucket); has_block {
b = block
td.free_temp_blocks_count -= 1
}
}
if b == nil {
data := mem.alloc(total, max(16, align_of(Block)), a.parent, loc) or_return
b = (^Block)(data)
}
b.total_size = total
b.size = total - base_offset
b.offset = base_offset
b.prev = a.curr
a.curr = b
return
}
allocator_alloc_zerod :: proc(a: ^Allocator, size: int, alignment: int, loc := #caller_location) -> (bytes: []byte, err: mem.Allocator_Error) {
bytes, err = allocator_alloc_non_zerod(a, size, alignment, loc)
mem.zero_slice(bytes)
return
}
allocator_alloc_non_zerod :: proc(a: ^Allocator, size: int, alignment: int, loc := #caller_location) -> (bytes: []byte, err: mem.Allocator_Error) {
if size == 0 do return
block := a.curr
data := ([^]byte)(&block.data)
assert(block != nil, "you must initialize the allocator first", loc)
assert(alignment & (alignment-1) == 0, "non-power of two alignment", loc)
// TODO: handle int overflows.
needed := int(mem.align_forward_uint(uint(size), uint(alignment)))
if block.offset + needed > block.size {
block = allocator_new_block(a, needed, alignment, loc) or_return
data = ([^]byte)(&block.data)
}
alignment_offset := 0; {
ptr := uintptr(data[block.offset:])
mask := uintptr(alignment-1)
if ptr & mask != 0 {
alignment_offset = int(uintptr(alignment) - (ptr & mask))
}
}
block.offset += alignment_offset
bytes = data[block.offset:][:size]
block.offset += size
a.last_alloc = raw_data(bytes)
return
}
allocator_free_all :: proc(a: ^Allocator, loc := #caller_location) -> (blocks: int, total_size: int, total_used: int) {
blocks += 1
total_size += a.curr.size + size_of(Block)
total_used += a.curr.offset
for a.curr.prev != nil {
block := a.curr
blocks += 1
total_size += block.total_size
total_used += block.offset
a.curr = block.prev.?
allocator_free_block(a, block, loc)
}
a.curr.offset = 0
a.cap = initial_block_cap
return
}
allocator_destroy :: proc(a: ^Allocator, loc := #caller_location) {
allocator_free_all(a, loc)
allocator_free_block(a, a.curr, loc)
}
allocator_free_block :: proc(a: ^Allocator, b: ^Block, loc := #caller_location) {
assert_has_td(loc)
if td.free_temp_blocks_count > max_free_blocks_queued {
free(b, a.parent)
log.debug("max temp blocks reached, freeing the block")
return
}
bucket, is_initialized := &td.free_temp_blocks[b.total_size]
if !is_initialized {
td.free_temp_blocks[b.total_size] = {}
bucket = &td.free_temp_blocks[b.total_size]
queue.init(bucket, max_free_blocks_queued, allocator=td.free_temp_blocks.allocator)
}
b.prev = nil
queue.push(bucket, b)
td.free_temp_blocks_count += 1
}
import "core:testing"
@(test)
test_allocator_alignment_boundary :: proc(t: ^testing.T) {
arena: Allocator
allocator_init(&arena)
context.allocator = allocator(&arena)
_, _ = mem.alloc(int(arena.cap)-120)
_, err := mem.alloc(112, 32)
testing.expect_value(t, err, nil)
}
@(test)
test_temp_allocator_big_alloc_and_alignment :: proc(t: ^testing.T) {
arena: Allocator
allocator_init(&arena)
context.allocator = allocator(&arena)
mappy: map[[8]int]int
err := reserve(&mappy, 50000)
testing.expect_value(t, err, nil)
}
@(test)
test_temp_allocator_returns_correct_size :: proc(t: ^testing.T) {
arena: Allocator
allocator_init(&arena)
context.allocator = allocator(&arena)
bytes, err := mem.alloc_bytes(10, 16)
testing.expect_value(t, err, nil)
testing.expect_value(t, len(bytes), 10)
}

309
odin-http/body.odin Normal file
View file

@ -0,0 +1,309 @@
package http
import "core:bufio"
import "core:io"
import "core:log"
import "core:net"
import "core:strconv"
import "core:strings"
Body :: string
Body_Callback :: #type proc(user_data: rawptr, body: Body, err: Body_Error)
Body_Error :: bufio.Scanner_Error
/*
Retrieves the request's body.
If the request has the chunked Transfer-Encoding header set, the chunks are all read and returned.
Otherwise, the Content-Length header is used to determine what to read and return it.
`max_length` can be used to set a maximum amount of bytes we try to read, once it goes over this,
an error is returned.
Do not call this more than once.
**Tip** If an error is returned, easily respond with an appropriate error code like this, `http.respond(res, http.body_error_status(err))`.
*/
body :: proc(req: ^Request, max_length: int = -1, user_data: rawptr, cb: Body_Callback) {
assert(req._body_ok == nil, "you can only call body once per request")
enc_header, ok := headers_get_unsafe(req.headers, "transfer-encoding")
if ok && strings.has_suffix(enc_header, "chunked") {
_body_chunked(req, max_length, user_data, cb)
} else {
_body_length(req, max_length, user_data, cb)
}
}
/*
Parses a URL encoded body, aka bodies with the 'Content-Type: application/x-www-form-urlencoded'.
Key&value pairs are percent decoded and put in a map.
*/
body_url_encoded :: proc(plain: Body, allocator := context.temp_allocator) -> (res: map[string]string, ok: bool) {
insert :: proc(m: ^map[string]string, plain: string, keys: int, vals: int, end: int, allocator := context.temp_allocator) -> bool {
has_value := vals != -1
key_end := vals - 1 if has_value else end
key := plain[keys:key_end]
val := plain[vals:end] if has_value else ""
// PERF: this could be a hot spot and I don't like that we allocate the decoded key and value here.
keye := (net.percent_decode(key, allocator) or_return) if strings.index_byte(key, '%') > -1 else key
vale := (net.percent_decode(val, allocator) or_return) if has_value && strings.index_byte(val, '%') > -1 else val
m[keye] = vale
return true
}
count := 1
for b in plain {
if b == '&' do count += 1
}
queries := make(map[string]string, count, allocator)
keys := 0
vals := -1
for b, i in plain {
switch b {
case '=':
vals = i + 1
case '&':
insert(&queries, plain, keys, vals, i) or_return
keys = i + 1
vals = -1
}
}
insert(&queries, plain, keys, vals, len(plain)) or_return
return queries, true
}
// Returns an appropriate status code for the given body error.
body_error_status :: proc(e: Body_Error) -> Status {
switch t in e {
case bufio.Scanner_Extra_Error:
switch t {
case .Too_Long: return .Payload_Too_Large
case .Too_Short, .Bad_Read_Count: return .Bad_Request
case .Negative_Advance, .Advanced_Too_Far: return .Internal_Server_Error
case .None: return .OK
case:
return .Internal_Server_Error
}
case io.Error:
switch t {
case .EOF, .Unknown, .No_Progress, .Unexpected_EOF:
return .Bad_Request
case .Empty, .Short_Write, .Buffer_Full, .Short_Buffer,
.Invalid_Write, .Negative_Read, .Invalid_Whence, .Invalid_Offset,
.Invalid_Unread, .Negative_Write, .Negative_Count:
return .Internal_Server_Error
case .None:
return .OK
case:
return .Internal_Server_Error
}
case: unreachable()
}
}
// "Decodes" a request body based on the content length header.
// Meant for internal usage, you should use `http.request_body`.
_body_length :: proc(req: ^Request, max_length: int = -1, user_data: rawptr, cb: Body_Callback) {
req._body_ok = false
len, ok := headers_get_unsafe(req.headers, "content-length")
if !ok {
cb(user_data, "", nil)
return
}
ilen, lenok := strconv.parse_int(len, 10)
if !lenok {
cb(user_data, "", .Bad_Read_Count)
return
}
if max_length > -1 && ilen > max_length {
cb(user_data, "", .Too_Long)
return
}
if ilen == 0 {
req._body_ok = true
cb(user_data, "", nil)
return
}
req._scanner.max_token_size = ilen
req._scanner.split = scan_num_bytes
req._scanner.split_data = rawptr(uintptr(ilen))
req._body_ok = true
scanner_scan(req._scanner, user_data, cb)
}
/*
"Decodes" a chunked transfer encoded request body.
Meant for internal usage, you should use `http.request_body`.
PERF: this could be made non-allocating by writing over the part of the body that contains the
metadata with the rest of the body, and then returning a slice of that, but it is some effort and
I don't think this functionality of HTTP is used that much anyway.
RFC 7230 4.1.3 pseudo-code:
length := 0
read chunk-size, chunk-ext (if any), and CRLF
while (chunk-size > 0) {
read chunk-data and CRLF
append chunk-data to decoded-body
length := length + chunk-size
read chunk-size, chunk-ext (if any), and CRLF
}
read trailer field
while (trailer field is not empty) {
if (trailer field is allowed to be sent in a trailer) {
append trailer field to existing header fields
}
read trailer-field
}
Content-Length := length
Remove "chunked" from Transfer-Encoding
Remove Trailer from existing header fields
*/
_body_chunked :: proc(req: ^Request, max_length: int = -1, user_data: rawptr, cb: Body_Callback) {
req._body_ok = false
on_scan :: proc(s: rawptr, size_line: string, err: bufio.Scanner_Error) {
s := cast(^Chunked_State)s
size_line := size_line
if err != nil {
s.cb(s.user_data, "", err)
return
}
// If there is a semicolon, discard everything after it,
// that would be chunk extensions which we currently have no interest in.
if semi := strings.index_byte(size_line, ';'); semi > -1 {
size_line = size_line[:semi]
}
size, ok := strconv.parse_int(string(size_line), 16)
if !ok {
log.infof("Encountered an invalid chunk size when decoding a chunked body: %q", string(size_line))
s.cb(s.user_data, "", .Bad_Read_Count)
return
}
// start scanning trailer headers.
if size == 0 {
scanner_scan(s.req._scanner, s, on_scan_trailer)
return
}
if s.max_length > -1 && strings.builder_len(s.buf) + size > s.max_length {
s.cb(s.user_data, "", .Too_Long)
return
}
s.req._scanner.max_token_size = size
s.req._scanner.split = scan_num_bytes
s.req._scanner.split_data = rawptr(uintptr(size))
scanner_scan(s.req._scanner, s, on_scan_chunk)
}
on_scan_chunk :: proc(s: rawptr, token: string, err: bufio.Scanner_Error) {
s := cast(^Chunked_State)s
if err != nil {
s.cb(s.user_data, "", err)
return
}
s.req._scanner.max_token_size = bufio.DEFAULT_MAX_SCAN_TOKEN_SIZE
s.req._scanner.split = scan_lines
strings.write_string(&s.buf, token)
on_scan_empty_line :: proc(s: rawptr, token: string, err: bufio.Scanner_Error) {
s := cast(^Chunked_State)s
if err != nil {
s.cb(s.user_data, "", err)
return
}
assert(len(token) == 0)
scanner_scan(s.req._scanner, s, on_scan)
}
scanner_scan(s.req._scanner, s, on_scan_empty_line)
}
on_scan_trailer :: proc(s: rawptr, line: string, err: bufio.Scanner_Error) {
s := cast(^Chunked_State)s
// Headers are done, success.
if err != nil || len(line) == 0 {
headers_delete_unsafe(&s.req.headers, "trailer")
te_header := headers_get_unsafe(s.req.headers, "transfer-encoding")
new_te_header := strings.trim_suffix(te_header, "chunked")
s.req.headers.readonly = false
headers_set_unsafe(&s.req.headers, "transfer-encoding", new_te_header)
s.req.headers.readonly = true
s.req._body_ok = true
s.cb(s.user_data, strings.to_string(s.buf), nil)
return
}
key, ok := header_parse(&s.req.headers, string(line))
if !ok {
log.infof("Invalid header when decoding chunked body: %q", string(line))
s.cb(s.user_data, "", .Unknown)
return
}
// A recipient MUST ignore (or consider as an error) any fields that are forbidden to be sent in a trailer.
if !header_allowed_trailer(key) {
log.infof("Invalid trailer header received, discarding it: %q", key)
headers_delete(&s.req.headers, key)
}
scanner_scan(s.req._scanner, s, on_scan_trailer)
}
Chunked_State :: struct {
req: ^Request,
max_length: int,
user_data: rawptr,
cb: Body_Callback,
buf: strings.Builder,
}
s := new(Chunked_State, context.temp_allocator)
s.buf.buf.allocator = context.temp_allocator
s.req = req
s.max_length = max_length
s.user_data = user_data
s.cb = cb
s.req._scanner.split = scan_lines
scanner_scan(s.req._scanner, s, on_scan)
}

View file

@ -0,0 +1,510 @@
// package provides a very simple (for now) HTTP/1.1 client.
package client
import "core:bufio"
import "core:bytes"
import "core:c"
import "core:encoding/json"
import "core:io"
import "core:log"
import "core:net"
import "core:strconv"
import "core:strings"
import http ".."
import openssl "../openssl"
Request :: struct {
method: http.Method,
headers: http.Headers,
cookies: [dynamic]http.Cookie,
body: bytes.Buffer,
}
// Initializes the request with sane defaults using the given allocator.
request_init :: proc(r: ^Request, method := http.Method.Get, allocator := context.allocator) {
r.method = method
http.headers_init(&r.headers, allocator)
r.cookies = make([dynamic]http.Cookie, allocator)
bytes.buffer_init_allocator(&r.body, 0, 0, allocator)
}
// Destroys the request.
// Header keys and values that the user added will have to be deleted by the user.
// Same with any strings inside the cookies.
request_destroy :: proc(r: ^Request) {
delete(r.headers._kv)
delete(r.cookies)
bytes.buffer_destroy(&r.body)
}
with_json :: proc(r: ^Request, v: any, opt: json.Marshal_Options = {}) -> json.Marshal_Error {
if r.method == .Get do r.method = .Post
http.headers_set_content_type(&r.headers, http.mime_to_content_type(.Json))
stream := bytes.buffer_to_stream(&r.body)
opt := opt
json.marshal_to_writer(io.to_writer(stream), v, &opt) or_return
return nil
}
get :: proc(target: string, allocator := context.allocator) -> (Response, Error) {
r: Request
request_init(&r, .Get, allocator)
defer request_destroy(&r)
return request(&r, target, allocator)
}
Request_Error :: enum {
Ok,
Invalid_Response_HTTP_Version,
Invalid_Response_Method,
Invalid_Response_Header,
Invalid_Response_Cookie,
}
SSL_Error :: enum {
Ok,
Controlled_Shutdown,
Fatal_Shutdown,
SSL_Write_Failed,
}
Error :: union #shared_nil {
net.Dial_Error,
net.Parse_Endpoint_Error,
net.Network_Error,
bufio.Scanner_Error,
Request_Error,
SSL_Error,
}
request :: proc(request: ^Request, target: string, allocator := context.allocator) -> (res: Response, err: Error) {
url, endpoint := parse_endpoint(target) or_return
// NOTE: we don't support persistent connections yet.
http.headers_set_close(&request.headers)
req_buf := format_request(url, request, allocator)
defer bytes.buffer_destroy(&req_buf)
socket := net.dial_tcp(endpoint) or_return
// HTTPS using openssl.
if url.scheme == "https" {
ctx := openssl.SSL_CTX_new(openssl.TLS_client_method())
ssl := openssl.SSL_new(ctx)
openssl.SSL_set_fd(ssl, c.int(socket))
// For servers using SNI for SSL certs (like cloudflare), this needs to be set.
chostname := strings.clone_to_cstring(url.host, allocator)
defer delete(chostname, allocator)
openssl.SSL_set_tlsext_host_name(ssl, chostname)
switch openssl.SSL_connect(ssl) {
case 2:
err = SSL_Error.Controlled_Shutdown
return
case 1: // success
case:
err = SSL_Error.Fatal_Shutdown
return
}
buf := bytes.buffer_to_bytes(&req_buf)
to_write := len(buf)
for to_write > 0 {
ret := openssl.SSL_write(ssl, raw_data(buf), c.int(to_write))
if ret <= 0 {
err = SSL_Error.SSL_Write_Failed
return
}
to_write -= int(ret)
}
return parse_response(SSL_Communication{ssl = ssl, ctx = ctx, socket = socket}, allocator)
}
// HTTP, just send the request.
net.send_tcp(socket, bytes.buffer_to_bytes(&req_buf)) or_return
return parse_response(socket, allocator)
}
Response :: struct {
status: http.Status,
// headers and cookies should be considered read-only, after a response is returned.
headers: http.Headers,
cookies: [dynamic]http.Cookie,
_socket: Communication,
_body: bufio.Scanner,
_body_err: Body_Error,
}
// Frees the response, closes the connection.
// Optionally pass the response_body returned 'body' and 'was_allocation' to destroy it too.
response_destroy :: proc(res: ^Response, body: Maybe(Body_Type) = nil, was_allocation := false) {
// Header keys are allocated, values are slices into the body.
// NOTE: this is fine because we don't add any headers with `headers_set_unsafe()`.
// If we did, we wouldn't know if the key was allocated or a literal.
// We also set the headers to readonly before giving them to the user so they can't add any either.
for k, v in res.headers._kv {
delete(v, res.headers._kv.allocator)
delete(k, res.headers._kv.allocator)
}
delete(res.headers._kv)
bufio.scanner_destroy(&res._body)
// Cookies only contain slices to memory inside the scanner body.
// So just deleting the array will be enough.
delete(res.cookies)
if body != nil {
body_destroy(body.(Body_Type), was_allocation)
}
// We close now and not at the time we got the response because reading the body,
// could make more reads need to happen (like with chunked encoding).
switch comm in res._socket {
case net.TCP_Socket:
net.close(comm)
case SSL_Communication:
openssl.SSL_free(comm.ssl)
openssl.SSL_CTX_free(comm.ctx)
net.close(comm.socket)
}
}
Body_Error :: enum {
None,
No_Length,
Invalid_Length,
Too_Long,
Scan_Failed,
Invalid_Chunk_Size,
Invalid_Trailer_Header,
}
// Any non-special body, could have been a chunked body that has been read in fully automatically.
// Depending on the return value for 'was_allocation' of the parse function, this is either an
// allocated string that you should delete or a slice into the body.
Body_Plain :: string
// A URL encoded body, map, keys and values are fully allocated on the allocator given to the parsing function,
// And should be deleted by you.
Body_Url_Encoded :: map[string]string
Body_Type :: union {
Body_Plain,
Body_Url_Encoded,
Body_Error, // TODO: why is this here if we also return an error?
}
// Frees the memory allocated by parsing the body.
// was_allocation is returned by the body parsing procedure.
body_destroy :: proc(body: Body_Type, was_allocation: bool) {
switch b in body {
case Body_Plain:
if was_allocation do delete(b)
case Body_Url_Encoded:
for k, v in b {
delete(k)
delete(v)
}
delete(b)
case Body_Error:
}
}
// Retrieves the response's body, can only be called once.
// Free the returned body using body_destroy().
response_body :: proc(
res: ^Response,
max_length := -1,
allocator := context.allocator,
) -> (
body: Body_Type,
was_allocation: bool,
err: Body_Error,
) {
defer res._body_err = err
assert(res._body_err == nil)
body, was_allocation, err = _parse_body(&res.headers, &res._body, max_length, allocator)
return
}
_parse_body :: proc(
headers: ^http.Headers,
_body: ^bufio.Scanner,
max_length := -1,
allocator := context.allocator,
) -> (
body: Body_Type,
was_allocation: bool,
err: Body_Error,
) {
// See [RFC 7230 3.3.3](https://www.rfc-editor.org/rfc/rfc7230#section-3.3.3) for the rules.
// Point 3 paragraph 3 and point 4 are handled before we get here.
enc, has_enc := http.headers_get_unsafe(headers^, "transfer-encoding")
length, has_length := http.headers_get_unsafe(headers^, "content-length")
switch {
case has_enc && strings.has_suffix(enc, "chunked"):
was_allocation = true
body = _response_body_chunked(headers, _body, max_length, allocator) or_return
case has_length:
body = _response_body_length(_body, max_length, length) or_return
case:
body = _response_till_close(_body, max_length) or_return
}
// Automatically decode url encoded bodies.
if typ, ok := http.headers_get_unsafe(headers^, "content-type"); ok && typ == "application/x-www-form-urlencoded" {
plain := body.(Body_Plain)
defer if was_allocation do delete(plain)
keyvalues := strings.split(plain, "&", allocator)
defer delete(keyvalues, allocator)
queries := make(Body_Url_Encoded, len(keyvalues), allocator)
for keyvalue in keyvalues {
seperator := strings.index(keyvalue, "=")
if seperator == -1 { // The keyvalue has no value.
queries[keyvalue] = ""
continue
}
key, key_decoded_ok := net.percent_decode(keyvalue[:seperator], allocator)
if !key_decoded_ok {
log.warnf("url encoded body key %q could not be decoded", keyvalue[:seperator])
continue
}
val, val_decoded_ok := net.percent_decode(keyvalue[seperator + 1:], allocator)
if !val_decoded_ok {
log.warnf("url encoded body value %q for key %q could not be decoded", keyvalue[seperator + 1:], key)
continue
}
queries[key] = val
}
body = queries
}
return
}
_response_till_close :: proc(_body: ^bufio.Scanner, max_length: int) -> (string, Body_Error) {
_body.max_token_size = max_length
defer _body.max_token_size = bufio.DEFAULT_MAX_SCAN_TOKEN_SIZE
_body.split =
proc(data: []byte, at_eof: bool) -> (advance: int, token: []byte, err: bufio.Scanner_Error, final_token: bool) {
if at_eof {
return len(data), data, nil, true
}
return
}
defer _body.split = bufio.scan_lines
if !bufio.scanner_scan(_body) {
if bufio.scanner_error(_body) == .Too_Long {
return "", .Too_Long
}
return "", .Scan_Failed
}
return bufio.scanner_text(_body), .None
}
// "Decodes" a response body based on the content length header.
// Meant for internal usage, you should use `client.response_body`.
_response_body_length :: proc(_body: ^bufio.Scanner, max_length: int, len: string) -> (string, Body_Error) {
ilen, lenok := strconv.parse_int(len, 10)
if !lenok {
return "", .Invalid_Length
}
if max_length > -1 && ilen > max_length {
return "", .Too_Long
}
if ilen == 0 {
return "", nil
}
// user_index is used to set the amount of bytes to scan in scan_num_bytes.
context.user_index = ilen
_body.max_token_size = ilen
defer _body.max_token_size = bufio.DEFAULT_MAX_SCAN_TOKEN_SIZE
_body.split = scan_num_bytes
defer _body.split = bufio.scan_lines
log.debugf("scanning %i bytes body", ilen)
if !bufio.scanner_scan(_body) {
return "", .Scan_Failed
}
return bufio.scanner_text(_body), .None
}
// "Decodes" a chunked transfer encoded request body.
// Meant for internal usage, you should use `client.response_body`.
//
// RFC 7230 4.1.3 pseudo-code:
//
// length := 0
// read chunk-size, chunk-ext (if any), and CRLF
// while (chunk-size > 0) {
// read chunk-data and CRLF
// append chunk-data to decoded-body
// length := length + chunk-size
// read chunk-size, chunk-ext (if any), and CRLF
// }
// read trailer field
// while (trailer field is not empty) {
// if (trailer field is allowed to be sent in a trailer) {
// append trailer field to existing header fields
// }
// read trailer-field
// }
// Content-Length := length
// Remove "chunked" from Transfer-Encoding
// Remove Trailer from existing header fields
_response_body_chunked :: proc(
headers: ^http.Headers,
_body: ^bufio.Scanner,
max_length: int,
allocator := context.allocator,
) -> (
body: string,
err: Body_Error,
) {
body_buff: bytes.Buffer
bytes.buffer_init_allocator(&body_buff, 0, 0, allocator)
defer if err != nil do bytes.buffer_destroy(&body_buff)
for {
if !bufio.scanner_scan(_body) {
return "", .Scan_Failed
}
size_line := bufio.scanner_bytes(_body)
// If there is a semicolon, discard everything after it,
// that would be chunk extensions which we currently have no interest in.
if semi := bytes.index_byte(size_line, ';'); semi > -1 {
size_line = size_line[:semi]
}
size, ok := strconv.parse_int(string(size_line), 16)
if !ok {
err = .Invalid_Chunk_Size
return
}
if size == 0 do break
if max_length > -1 && bytes.buffer_length(&body_buff) + size > max_length {
return "", .Too_Long
}
// user_index is used to set the amount of bytes to scan in scan_num_bytes.
context.user_index = size
_body.max_token_size = size
_body.split = scan_num_bytes
if !bufio.scanner_scan(_body) {
return "", .Scan_Failed
}
_body.max_token_size = bufio.DEFAULT_MAX_SCAN_TOKEN_SIZE
_body.split = bufio.scan_lines
bytes.buffer_write(&body_buff, bufio.scanner_bytes(_body))
// Read empty line after chunk.
if !bufio.scanner_scan(_body) {
return "", .Scan_Failed
}
assert(bufio.scanner_text(_body) == "")
}
// Read trailing empty line (after body, before trailing headers).
if !bufio.scanner_scan(_body) || bufio.scanner_text(_body) != "" {
return "", .Scan_Failed
}
// Keep parsing the request as line delimited headers until we get to an empty line.
for {
// If there are no trailing headers, this case is hit.
if !bufio.scanner_scan(_body) {
break
}
line := bufio.scanner_text(_body)
// The first empty line denotes the end of the headers section.
if line == "" {
break
}
key, ok := http.header_parse(headers, line)
if !ok {
return "", .Invalid_Trailer_Header
}
// A recipient MUST ignore (or consider as an error) any fields that are forbidden to be sent in a trailer.
if !http.header_allowed_trailer(key) {
http.headers_delete(headers, key)
}
}
if http.headers_has(headers^, "trailer") {
http.headers_delete_unsafe(headers, "trailer")
}
te := strings.trim_suffix(http.headers_get_unsafe(headers^, "transfer-encoding"), "chunked")
headers.readonly = false
http.headers_set_unsafe(headers, "transfer-encoding", te)
headers.readonly = true
return bytes.buffer_to_string(&body_buff), .None
}
// A scanner bufio.Split_Proc implementation to scan a given amount of bytes.
// The amount of bytes should be set in the context.user_index.
@(private)
scan_num_bytes :: proc(
data: []byte,
at_eof: bool,
) -> (
advance: int,
token: []byte,
err: bufio.Scanner_Error,
final_token: bool,
) {
n := context.user_index // Set context.user_index to the amount of bytes to read.
if at_eof && len(data) < n {
return
}
if len(data) < n {
return
}
return n, data[:n], nil, false
}

View file

@ -0,0 +1,294 @@
#+private
package client
import "core:bufio"
import "core:bytes"
import "core:c"
import "core:io"
import "core:log"
import "core:net"
import "core:strconv"
import "core:strings"
import http ".."
import openssl "../openssl"
parse_endpoint :: proc(target: string) -> (url: http.URL, endpoint: net.Endpoint, err: net.Network_Error) {
url = http.url_parse(target)
host_or_endpoint := net.parse_hostname_or_endpoint(url.host) or_return
switch t in host_or_endpoint {
case net.Endpoint:
endpoint = t
return
case net.Host:
ep4, ep6 := net.resolve(t.hostname) or_return
endpoint = ep4 if ep4.address != nil else ep6
endpoint.port = t.port
if endpoint.port == 0 {
endpoint.port = url.scheme == "https" ? 443 : 80
}
return
case:
unreachable()
}
}
format_request :: proc(target: http.URL, request: ^Request, allocator := context.allocator) -> (buf: bytes.Buffer) {
// Responses are on average at least 100 bytes, so lets start there, but add the body's length.
bytes.buffer_init_allocator(&buf, 0, bytes.buffer_length(&request.body) + 100, allocator)
http.requestline_write(
bytes.buffer_to_stream(&buf),
{method = request.method, target = target, version = http.Version{1, 1}},
)
if !http.headers_has_unsafe(request.headers, "content-length") {
buf_len := bytes.buffer_length(&request.body)
if buf_len == 0 {
bytes.buffer_write_string(&buf, "content-length: 0\r\n")
} else {
bytes.buffer_write_string(&buf, "content-length: ")
// Make sure at least 20 bytes are there to write into, should be enough for the content length.
bytes.buffer_grow(&buf, buf_len + 20)
// Write the length into unwritten portion.
unwritten := http._dynamic_unwritten(buf.buf)
l := len(strconv.itoa(unwritten, buf_len))
assert(l <= 20)
http._dynamic_add_len(&buf.buf, l)
bytes.buffer_write_string(&buf, "\r\n")
}
}
if !http.headers_has_unsafe(request.headers, "accept") {
bytes.buffer_write_string(&buf, "accept: */*\r\n")
}
if !http.headers_has_unsafe(request.headers, "user-agent") {
bytes.buffer_write_string(&buf, "user-agent: odin-http\r\n")
}
if !http.headers_has_unsafe(request.headers, "host") {
bytes.buffer_write_string(&buf, "host: ")
bytes.buffer_write_string(&buf, target.host)
bytes.buffer_write_string(&buf, "\r\n")
}
for header, value in request.headers._kv {
bytes.buffer_write_string(&buf, header)
bytes.buffer_write_string(&buf, ": ")
// Escape newlines in headers, if we don't, an attacker can find an endpoint
// that returns a header with user input, and inject headers into the response.
esc_value, was_allocation := strings.replace_all(value, "\n", "\\n", allocator)
defer if was_allocation do delete(esc_value)
bytes.buffer_write_string(&buf, esc_value)
bytes.buffer_write_string(&buf, "\r\n")
}
if len(request.cookies) > 0 {
bytes.buffer_write_string(&buf, "cookie: ")
for cookie, i in request.cookies {
bytes.buffer_write_string(&buf, cookie.name)
bytes.buffer_write_byte(&buf, '=')
bytes.buffer_write_string(&buf, cookie.value)
if i != len(request.cookies) - 1 {
bytes.buffer_write_string(&buf, "; ")
}
}
bytes.buffer_write_string(&buf, "\r\n")
}
// Empty line denotes end of headers and start of body.
bytes.buffer_write_string(&buf, "\r\n")
bytes.buffer_write(&buf, bytes.buffer_to_bytes(&request.body))
return
}
SSL_Communication :: struct {
socket: net.TCP_Socket,
ssl: ^openssl.SSL,
ctx: ^openssl.SSL_CTX,
}
Communication :: union {
net.TCP_Socket, // HTTP.
SSL_Communication, // HTTPS.
}
parse_response :: proc(socket: Communication, allocator := context.allocator) -> (res: Response, err: Error) {
res._socket = socket
stream: io.Stream
switch comm in socket {
case net.TCP_Socket:
stream = tcp_stream(comm)
case SSL_Communication:
stream = ssl_tcp_stream(comm.ssl)
}
stream_reader := io.to_reader(stream)
scanner: bufio.Scanner
bufio.scanner_init(&scanner, stream_reader, allocator)
http.headers_init(&res.headers, allocator)
if !bufio.scanner_scan(&scanner) {
err = bufio.scanner_error(&scanner)
return
}
rline_str := bufio.scanner_text(&scanner)
si := strings.index_byte(rline_str, ' ')
version, ok := http.version_parse(rline_str[:si])
if !ok {
err = Request_Error.Invalid_Response_HTTP_Version
return
}
// Might need to support more versions later.
if version.major != 1 {
err = Request_Error.Invalid_Response_HTTP_Version
return
}
res.status, ok = http.status_from_string(rline_str[si + 1:])
if !ok {
err = Request_Error.Invalid_Response_Method
return
}
for {
if !bufio.scanner_scan(&scanner) {
err = bufio.scanner_error(&scanner)
return
}
line := bufio.scanner_text(&scanner)
// Empty line means end of headers.
if line == "" do break
key, hok := http.header_parse(&res.headers, line, allocator)
if !hok {
err = Request_Error.Invalid_Response_Header
return
}
if key == "set-cookie" {
cookie_str := http.headers_get_unsafe(res.headers, "set-cookie")
http.headers_delete_unsafe(&res.headers, "set-cookie")
delete(key)
cookie, cok := http.cookie_parse(cookie_str, allocator)
if !cok {
err = Request_Error.Invalid_Response_Cookie
return
}
append(&res.cookies, cookie)
}
}
if !http.headers_validate(&res.headers) {
err = Request_Error.Invalid_Response_Header
return
}
res.headers.readonly = true
res._body = scanner
return res, nil
}
ssl_tcp_stream :: proc(sock: ^openssl.SSL) -> (s: io.Stream) {
s.data = sock
s.procedure = _ssl_stream_proc
return s
}
@(private)
_ssl_stream_proc :: proc(
stream_data: rawptr,
mode: io.Stream_Mode,
p: []byte,
offset: i64,
whence: io.Seek_From,
) -> (
n: i64,
err: io.Error,
) {
#partial switch mode {
case .Query:
return io.query_utility(io.Stream_Mode_Set{.Query, .Read})
case .Read:
ssl := cast(^openssl.SSL)stream_data
ret := openssl.SSL_read(ssl, raw_data(p), c.int(len(p)))
if ret <= 0 {
return 0, .Unexpected_EOF
}
return i64(ret), nil
case:
err = .Empty
}
return
}
// Wraps a tcp socket with a stream.
tcp_stream :: proc(sock: net.TCP_Socket) -> (s: io.Stream) {
s.data = rawptr(uintptr(sock))
s.procedure = _socket_stream_proc
return s
}
@(private)
_socket_stream_proc :: proc(
stream_data: rawptr,
mode: io.Stream_Mode,
p: []byte,
offset: i64,
whence: io.Seek_From,
) -> (
n: i64,
err: io.Error,
) {
#partial switch mode {
case .Query:
return io.query_utility(io.Stream_Mode_Set{.Query, .Read})
case .Read:
sock := net.TCP_Socket(uintptr(stream_data))
received, recv_err := net.recv_tcp(sock, p)
n = i64(received)
#partial switch ex in recv_err {
case net.TCP_Recv_Error:
#partial switch ex {
case .None:
err = .None
case .Shutdown, .Not_Connected, .Aborted, .Connection_Closed, .Host_Unreachable, .Timeout:
log.errorf("unexpected error reading tcp: %s", ex)
err = .Unexpected_EOF
case:
log.errorf("unexpected error reading tcp: %s", ex)
err = .Unknown
}
case nil:
err = .None
case:
assert(false, "recv_tcp only returns TCP_Recv_Error or nil")
}
case:
err = .Empty
}
return
}

View file

@ -0,0 +1,21 @@
# Comparison - Empty OK All
This comparison measures raw IO rate, the server needs to respond to requests on port :8080 with 200 OK.
Of course this is not a full picture but you can get an idea of performance.
## Results
Taken on Pop!_OS Linux using a AMD Ryzen 7 5800X 8-core processor.
Load is created using [Bombardier](https://github.com/codesenberg/bombardier) set to 250 connections and 10.000.000 requests.
Bombardier command used: `bombardier -c 250 -n 10000000 http://localhost:8080`
| Language/framework | Command | Requests per second | Total time | Avg response time | Throughput |
|--------------------|-----------------------------------------------------------|---------------------|------------|-------------------|------------|
| Rust Actix 4.2 | `cargo build --release` (this installs 256 dependencies!) | 712k | 14s | 347us | 120.8MB/s |
| Odin-HTTP dev | `odin build . -o:speed -disable-assert -no-bounds-check` | 637k | 15s | 340us | 105.2MB/s |
| Go net/http 1.21 | `go build main.go` | 598k | 16s | 417us | 77.98MB/s |
| Bun.serve 1.1 | `NODE_ENV=production bun run index.ts` | 302k | 33s | 827us | 39.43MB/s |
| Node http 20.5 | `NODE_ENV=production node app.js` | 65k | 2m35s | 3.88ms | 12.90MB/s |

View file

@ -0,0 +1,169 @@
# Based on https://raw.githubusercontent.com/github/gitignore/main/Node.gitignore
# Logs
logs
_.log
npm-debug.log_
yarn-debug.log*
yarn-error.log*
lerna-debug.log*
.pnpm-debug.log*
# Diagnostic reports (https://nodejs.org/api/report.html)
report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json
# Runtime data
pids
_.pid
_.seed
\*.pid.lock
# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov
# Coverage directory used by tools like istanbul
coverage
\*.lcov
# nyc test coverage
.nyc_output
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
.grunt
# Bower dependency directory (https://bower.io/)
bower_components
# node-waf configuration
.lock-wscript
# Compiled binary addons (https://nodejs.org/api/addons.html)
build/Release
# Dependency directories
node_modules/
jspm_packages/
# Snowpack dependency directory (https://snowpack.dev/)
web_modules/
# TypeScript cache
\*.tsbuildinfo
# Optional npm cache directory
.npm
# Optional eslint cache
.eslintcache
# Optional stylelint cache
.stylelintcache
# Microbundle cache
.rpt2_cache/
.rts2_cache_cjs/
.rts2_cache_es/
.rts2_cache_umd/
# Optional REPL history
.node_repl_history
# Output of 'npm pack'
\*.tgz
# Yarn Integrity file
.yarn-integrity
# dotenv environment variable files
.env
.env.development.local
.env.test.local
.env.production.local
.env.local
# parcel-bundler cache (https://parceljs.org/)
.cache
.parcel-cache
# Next.js build output
.next
out
# Nuxt.js build / generate output
.nuxt
dist
# Gatsby files
.cache/
# Comment in the public line in if your project uses Gatsby and not Next.js
# https://nextjs.org/blog/next-9-1#public-directory-support
# public
# vuepress build output
.vuepress/dist
# vuepress v2.x temp and cache directory
.temp
.cache
# Docusaurus cache and generated files
.docusaurus
# Serverless directories
.serverless/
# FuseBox cache
.fusebox/
# DynamoDB Local files
.dynamodb/
# TernJS port file
.tern-port
# Stores VSCode versions used for testing VSCode extensions
.vscode-test
# yarn v2
.yarn/cache
.yarn/unplugged
.yarn/build-state.yml
.yarn/install-state.gz
.pnp.\*

Binary file not shown.

View file

@ -0,0 +1,8 @@
const server = Bun.serve({
port: 8080,
fetch(req) {
return new Response();
},
});
console.log(`Listening on http://localhost:${server.port}...`);

View file

@ -0,0 +1,11 @@
{
"name": "empty-ok-all",
"module": "index.ts",
"type": "module",
"devDependencies": {
"bun-types": "latest"
},
"peerDependencies": {
"typescript": "^5.0.0"
}
}

View file

@ -0,0 +1,22 @@
{
"compilerOptions": {
"lib": ["ESNext"],
"module": "esnext",
"target": "esnext",
"moduleResolution": "bundler",
"moduleDetection": "force",
"allowImportingTsExtensions": true,
"noEmit": true,
"composite": true,
"strict": true,
"downlevelIteration": true,
"skipLibCheck": true,
"jsx": "preserve",
"allowSyntheticDefaultImports": true,
"forceConsistentCasingInFileNames": true,
"allowJs": true,
"types": [
"bun-types" // add Bun global
]
}
}

View file

@ -0,0 +1,12 @@
package main
import (
"net/http"
)
func main() {
http.HandleFunc("/", HelloServer)
http.ListenAndServe(":8080", nil)
}
func HelloServer(w http.ResponseWriter, r *http.Request) {}

View file

@ -0,0 +1,14 @@
const http = require('http');
const hostname = '127.0.0.1';
const port = 8080;
const server = http.createServer((req, res) => {
res.statusCode = 200;
res.setHeader('Content-Type', 'text/plain');
res.end();
});
server.listen(port, hostname, () => {
console.log(`Server running at http://${hostname}:${port}/`);
});

View file

@ -0,0 +1,18 @@
package empty_ok_all
import "core:fmt"
import http "../../.."
main :: proc() {
s: http.Server
fmt.println("Listening on http://localost:8080...")
handler := http.handler(proc(_: ^http.Request, res: ^http.Response) {
res.status = .OK
http.respond(res)
})
http.listen_and_serve(&s, handler)
}

View file

@ -0,0 +1 @@
target

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,10 @@
[package]
name = "rust"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
actix-web = "4.2.1"
serde = { version = "1.0.145", features = ["derive"] }

View file

@ -0,0 +1,12 @@
use actix_web::{web, App, HttpServer};
#[actix_web::main]
async fn main() -> std::io::Result<()> {
HttpServer::new(|| {
App::new()
.service(web::resource("/").to(|| async { "" }))
})
.bind(("127.0.0.1", 8080))?
.run()
.await
}

432
odin-http/cookie.odin Normal file
View file

@ -0,0 +1,432 @@
package http
import "core:io"
import "core:strconv"
import "core:strings"
import "core:time"
Cookie_Same_Site :: enum {
Unspecified,
None,
Strict,
Lax,
}
Cookie :: struct {
name: string,
value: string,
domain: Maybe(string),
expires_gmt: Maybe(time.Time),
max_age_secs: Maybe(int),
path: Maybe(string),
http_only: bool,
partitioned: bool,
secure: bool,
same_site: Cookie_Same_Site,
}
// Builds the Set-Cookie header string representation of the given cookie.
cookie_write :: proc(w: io.Writer, c: Cookie) -> io.Error {
// odinfmt:disable
io.write_string(w, "set-cookie: ") or_return
write_escaped_newlines(w, c.name) or_return
io.write_byte(w, '=') or_return
write_escaped_newlines(w, c.value) or_return
if d, ok := c.domain.(string); ok {
io.write_string(w, "; Domain=") or_return
write_escaped_newlines(w, d) or_return
}
if e, ok := c.expires_gmt.(time.Time); ok {
io.write_string(w, "; Expires=") or_return
date_write(w, e) or_return
}
if a, ok := c.max_age_secs.(int); ok {
io.write_string(w, "; Max-Age=") or_return
io.write_int(w, a) or_return
}
if p, ok := c.path.(string); ok {
io.write_string(w, "; Path=") or_return
write_escaped_newlines(w, p) or_return
}
switch c.same_site {
case .None: io.write_string(w, "; SameSite=None") or_return
case .Lax: io.write_string(w, "; SameSite=Lax") or_return
case .Strict: io.write_string(w, "; SameSite=Strict") or_return
case .Unspecified: // no-op.
}
// odinfmt:enable
if c.secure {
io.write_string(w, "; Secure") or_return
}
if c.partitioned {
io.write_string(w, "; Partitioned") or_return
}
if c.http_only {
io.write_string(w, "; HttpOnly") or_return
}
return nil
}
// Builds the Set-Cookie header string representation of the given cookie.
cookie_string :: proc(c: Cookie, allocator := context.allocator) -> string {
b: strings.Builder
strings.builder_init(&b, 0, 20, allocator)
cookie_write(strings.to_writer(&b), c)
return strings.to_string(b)
}
// TODO: check specific whitespace requirements in RFC.
//
// Allocations are done to check case-insensitive attributes but they are deleted right after.
// So, all the returned strings (inside cookie) are slices into the given value string.
cookie_parse :: proc(value: string, allocator := context.allocator) -> (cookie: Cookie, ok: bool) {
value := value
eq := strings.index_byte(value, '=')
if eq < 1 do return
cookie.name = value[:eq]
value = value[eq + 1:]
semi := strings.index_byte(value, ';')
switch semi {
case -1:
cookie.value = value
ok = true
return
case 0:
return
case:
cookie.value = value[:semi]
value = value[semi + 1:]
}
parse_part :: proc(cookie: ^Cookie, part: string, allocator := context.temp_allocator) -> (ok: bool) {
eq := strings.index_byte(part, '=')
switch eq {
case -1:
key := strings.to_lower(part, allocator)
defer delete(key)
switch key {
case "httponly":
cookie.http_only = true
case "partitioned":
cookie.partitioned = true
case "secure":
cookie.secure = true
case:
return
}
case 0:
return
case:
key := strings.to_lower(part[:eq], allocator)
defer delete(key)
value := part[eq + 1:]
switch key {
case "domain":
cookie.domain = value
case "expires":
cookie.expires_gmt = cookie_date_parse(value) or_return
case "max-age":
cookie.max_age_secs = strconv.parse_int(value, 10) or_return
case "path":
cookie.path = value
case "samesite":
switch value {
case "lax", "Lax", "LAX":
cookie.same_site = .Lax
case "none", "None", "NONE":
cookie.same_site = .None
case "strict", "Strict", "STRICT":
cookie.same_site = .Strict
case:
return
}
case:
return
}
}
return true
}
for semi = strings.index_byte(value, ';'); semi != -1; semi = strings.index_byte(value, ';') {
part := strings.trim_left_space(value[:semi])
value = value[semi + 1:]
parse_part(&cookie, part, allocator) or_return
}
part := strings.trim_left_space(value)
if part == "" {
ok = true
return
}
parse_part(&cookie, part, allocator) or_return
ok = true
return
}
/*
Implementation of the algorithm described in RFC 6265 section 5.1.1.
*/
cookie_date_parse :: proc(value: string) -> (t: time.Time, ok: bool) {
iter_delim :: proc(value: ^string) -> (token: string, ok: bool) {
start := -1
start_loop: for ch, i in transmute([]byte)value^ {
switch ch {
case 0x09, 0x20..=0x2F, 0x3B..=0x40, 0x5B..=0x60, 0x7B..=0x7E:
case:
start = i
break start_loop
}
}
if start == -1 {
return
}
token = value[start:]
length := len(token)
end_loop: for ch, i in transmute([]byte)token {
switch ch {
case 0x09, 0x20..=0x2F, 0x3B..=0x40, 0x5B..=0x60, 0x7B..=0x7E:
length = i
break end_loop
}
}
ok = true
token = token[:length]
value^ = value[start+length:]
return
}
parse_digits :: proc(value: string, min, max: int, trailing_ok: bool) -> (int, bool) {
count: int
for ch in transmute([]byte)value {
if ch <= 0x2f || ch >= 0x3a {
break
}
count += 1
}
if count < min || count > max {
return 0, false
}
if !trailing_ok && len(value) != count {
return 0, false
}
return strconv.parse_int(value[:count], 10)
}
parse_time :: proc(token: string) -> (t: Time, ok: bool) {
hours, match1, tail := strings.partition(token, ":")
if match1 != ":" { return }
minutes, match2, seconds := strings.partition(tail, ":")
if match2 != ":" { return }
t.hours = parse_digits(hours, 1, 2, false) or_return
t.minutes = parse_digits(minutes, 1, 2, false) or_return
t.seconds = parse_digits(seconds, 1, 2, true) or_return
ok = true
return
}
parse_month :: proc(token: string) -> (month: int) {
if len(token) < 3 {
return
}
lower: [3]byte
for &ch, i in lower {
#no_bounds_check orig := token[i]
switch orig {
case 'A'..='Z':
ch = orig + 32
case:
ch = orig
}
}
switch string(lower[:]) {
case "jan":
return 1
case "feb":
return 2
case "mar":
return 3
case "apr":
return 4
case "may":
return 5
case "jun":
return 6
case "jul":
return 7
case "aug":
return 8
case "sep":
return 9
case "oct":
return 10
case "nov":
return 11
case "dec":
return 12
case:
return
}
}
Time :: struct {
hours, minutes, seconds: int,
}
clock: Maybe(Time)
day_of_month, month, year: Maybe(int)
value := value
for token in iter_delim(&value) {
if _, has_time := clock.?; !has_time {
if t, tok := parse_time(token); tok {
clock = t
continue
}
}
if _, has_day_of_month := day_of_month.?; !has_day_of_month {
if dom, dok := parse_digits(token, 1, 2, true); dok {
day_of_month = dom
continue
}
}
if _, has_month := month.?; !has_month {
if mon := parse_month(token); mon > 0 {
month = mon
continue
}
}
if _, has_year := year.?; !has_year {
if yr, yrok := parse_digits(token, 2, 4, true); yrok {
if yr >= 70 && yr <= 99 {
yr += 1900
} else if yr >= 0 && yr <= 69 {
yr += 2000
}
year = yr
continue
}
}
}
c := clock.? or_return
y := year.? or_return
if y < 1601 {
return
}
t = time.datetime_to_time(
y,
month.? or_return,
day_of_month.? or_return,
c.hours,
c.minutes,
c.seconds,
) or_return
ok = true
return
}
/*
Retrieves the cookie with the given `key` out of the requests `Cookie` header.
If the same key is in the header multiple times the last one is returned.
*/
request_cookie_get :: proc(r: ^Request, key: string) -> (value: string, ok: bool) {
cookies := headers_get_unsafe(r.headers, "cookie") or_return
for k, v in request_cookies_iter(&cookies) {
if key == k do return v, true
}
return
}
/*
Allocates a map with the given allocator and puts all cookie pairs from the requests `Cookie` header into it.
If the same key is in the header multiple times the last one is returned.
*/
request_cookies :: proc(r: ^Request, allocator := context.temp_allocator) -> (res: map[string]string) {
res.allocator = allocator
cookies := headers_get_unsafe(r.headers, "cookie") or_else ""
for k, v in request_cookies_iter(&cookies) {
// Don't overwrite, the iterator goes from right to left and we want the last.
if k in res do continue
res[k] = v
}
return
}
/*
Iterates the cookies from right to left.
*/
request_cookies_iter :: proc(cookies: ^string) -> (key: string, value: string, ok: bool) {
end := len(cookies)
eq := -1
for i := end-1; i >= 0; i-=1 {
b := cookies[i]
start := i == 0
sep := start || b == ' ' && cookies[i-1] == ';'
if sep {
defer end = i - 1
// Invalid.
if eq < 0 {
continue
}
off := 0 if start else 1
key = cookies[i+off:eq]
value = cookies[eq+1:end]
cookies^ = cookies[:i-off]
return key, value, true
} else if b == '=' {
eq = i
}
}
return
}

16
odin-http/docs/all.odin Normal file
View file

@ -0,0 +1,16 @@
/*
This file simply imports any packages we want in the documentation.
*/
package docs
import "../client"
import http ".."
import "../nbio"
import nbio_poly "../nbio/poly"
import "../openssl"
_ :: client
_ :: http
_ :: nbio
_ :: nbio_poly
_ :: openssl

24
odin-http/docs/generate.sh Executable file
View file

@ -0,0 +1,24 @@
#!/usr/bin/env bash
set -ex
cd docs
rm -rf build
mkdir build
odin doc . -all-packages -doc-format
cd build
# This is the binary of https://github.com/laytan/pkg.odin-lang.org, built by `odin built . -out:odin-doc`
odin-doc ../docs.odin-doc ../odin-doc.json
# For GitHub pages, a CNAME file with the intended domain is required.
echo "odin-http.laytan.dev" > CNAME
cd ..
rm docs.odin-doc
cd ..

View file

@ -0,0 +1,20 @@
{
"hide_core": true,
"hide_base": true,
"collections": {
"odin-http": {
"name": "http",
"source_url": "https://github.com/laytan/odin-http/blob/main",
"base_url": "/http",
"root_path": "../..",
"license": {
"text": "MIT",
"url": "https://github.com/laytan/odin-http/tree/main/LICENSE"
},
"home": {
"title": "Odin-HTTP",
"embed_readme": "../../README.md"
}
}
}
}

View file

@ -0,0 +1,70 @@
package client_example
import "core:fmt"
import "../../client"
main :: proc() {
get()
post()
}
// basic get request.
get :: proc() {
res, err := client.get("https://www.google.com/")
if err != nil {
fmt.printf("Request failed: %s", err)
return
}
defer client.response_destroy(&res)
fmt.printf("Status: %s\n", res.status)
fmt.printf("Headers: %v\n", res.headers)
fmt.printf("Cookies: %v\n", res.cookies)
body, allocation, berr := client.response_body(&res)
if berr != nil {
fmt.printf("Error retrieving response body: %s", berr)
return
}
defer client.body_destroy(body, allocation)
fmt.println(body)
}
Post_Body :: struct {
name: string,
message: string,
}
// POST request with JSON.
post :: proc() {
req: client.Request
client.request_init(&req, .Post)
defer client.request_destroy(&req)
pbody := Post_Body{"Laytan", "Hello, World!"}
if err := client.with_json(&req, pbody); err != nil {
fmt.printf("JSON error: %s", err)
return
}
res, err := client.request(&req, "https://webhook.site/YOUR-ID-HERE")
if err != nil {
fmt.printf("Request failed: %s", err)
return
}
defer client.response_destroy(&res)
fmt.printf("Status: %s\n", res.status)
fmt.printf("Headers: %v\n", res.headers)
fmt.printf("Cookies: %v\n", res.cookies)
body, allocation, berr := client.response_body(&res)
if berr != nil {
fmt.printf("Error retrieving response body: %s", berr)
return
}
defer client.body_destroy(body, allocation)
fmt.println(body)
}

View file

@ -0,0 +1,68 @@
package example_tcp_echo
import "core:fmt"
import "core:net"
import "core:os"
import nbio "../../nbio/poly"
Echo_Server :: struct {
io: nbio.IO,
sock: net.TCP_Socket,
connections: [dynamic]^Echo_Connection,
}
Echo_Connection :: struct {
server: ^Echo_Server,
sock: net.TCP_Socket,
buf: [50]byte,
}
main :: proc() {
server: Echo_Server
defer delete(server.connections)
nbio.init(&server.io)
defer nbio.destroy(&server.io)
sock, err := nbio.open_and_listen_tcp(&server.io, {net.IP4_Loopback, 8080})
fmt.assertf(err == nil, "Error opening and listening on localhost:8080: %v", err)
server.sock = sock
nbio.accept(&server.io, sock, &server, echo_on_accept)
// Start the event loop.
errno: os.Errno
for errno == os.ERROR_NONE {
errno = nbio.tick(&server.io)
}
fmt.assertf(errno == os.ERROR_NONE, "Server stopped with error code: %v", errno)
}
echo_on_accept :: proc(server: ^Echo_Server, client: net.TCP_Socket, source: net.Endpoint, err: net.Network_Error) {
fmt.assertf(err == nil, "Error accepting a connection: %v", err)
// Register a new accept for the next client.
nbio.accept(&server.io, server.sock, server, echo_on_accept)
c := new(Echo_Connection)
c.server = server
c.sock = client
append(&server.connections, c)
nbio.recv(&server.io, client, c.buf[:], c, echo_on_recv)
}
echo_on_recv :: proc(c: ^Echo_Connection, received: int, _: Maybe(net.Endpoint), err: net.Network_Error) {
fmt.assertf(err == nil, "Error receiving from client: %v", err)
nbio.send_all(&c.server.io, c.sock, c.buf[:received], c, echo_on_sent)
}
echo_on_sent :: proc(c: ^Echo_Connection, sent: int, err: net.Network_Error) {
fmt.assertf(err == nil, "Error sending to client: %v", err)
// Accept the next message, to then ultimately echo back again.
nbio.recv(&c.server.io, c.sock, c.buf[:], c, echo_on_recv)
}

121
odin-http/handlers.odin Normal file
View file

@ -0,0 +1,121 @@
package http
import "core:net"
import "core:strconv"
import "core:sync"
import "core:time"
Handler_Proc :: proc(handler: ^Handler, req: ^Request, res: ^Response)
Handle_Proc :: proc(req: ^Request, res: ^Response)
Handler :: struct {
user_data: rawptr,
next: Maybe(^Handler),
handle: Handler_Proc,
}
// TODO: something like http.handler_with_body which gets the body before calling the handler.
handler :: proc(handle: Handle_Proc) -> Handler {
h: Handler
h.user_data = rawptr(handle)
handle := proc(h: ^Handler, req: ^Request, res: ^Response) {
p := (Handle_Proc)(h.user_data)
p(req, res)
}
h.handle = handle
return h
}
middleware_proc :: proc(next: Maybe(^Handler), handle: Handler_Proc) -> Handler {
h: Handler
h.next = next
h.handle = handle
return h
}
Rate_Limit_On_Limit :: struct {
user_data: rawptr,
on_limit: proc(req: ^Request, res: ^Response, user_data: rawptr),
}
// Convenience method to create a Rate_Limit_On_Limit that writes the given message.
rate_limit_message :: proc(message: ^string) -> Rate_Limit_On_Limit {
return Rate_Limit_On_Limit{user_data = message, on_limit = proc(_: ^Request, res: ^Response, user_data: rawptr) {
message := (^string)(user_data)
body_set(res, message^)
respond(res)
}}
}
Rate_Limit_Opts :: struct {
window: time.Duration,
max: int,
// Optional handler to call when a request is being rate-limited, allows you to customize the response.
on_limit: Maybe(Rate_Limit_On_Limit),
}
Rate_Limit_Data :: struct {
opts: ^Rate_Limit_Opts,
next_sweep: time.Time,
hits: map[net.Address]int,
mu: sync.Mutex,
}
rate_limit_destroy :: proc(data: ^Rate_Limit_Data) {
sync.guard(&data.mu)
delete(data.hits)
}
// Basic rate limit based on IP address.
rate_limit :: proc(data: ^Rate_Limit_Data, next: ^Handler, opts: ^Rate_Limit_Opts, allocator := context.allocator) -> Handler {
assert(next != nil)
h: Handler
h.next = next
data.opts = opts
data.hits = make(map[net.Address]int, 16, allocator)
data.next_sweep = time.time_add(time.now(), opts.window)
h.user_data = data
h.handle = proc(h: ^Handler, req: ^Request, res: ^Response) {
data := (^Rate_Limit_Data)(h.user_data)
sync.lock(&data.mu)
// PERF: if this is not performing, we could run a thread that sweeps on a regular basis.
if time.since(data.next_sweep) > 0 {
clear(&data.hits)
data.next_sweep = time.time_add(time.now(), data.opts.window)
}
hits := data.hits[req.client.address]
data.hits[req.client.address] = hits + 1
sync.unlock(&data.mu)
if hits > data.opts.max {
res.status = .Too_Many_Requests
retry_dur := int(time.diff(time.now(), data.next_sweep) / time.Second)
buf := make([]byte, 32, context.temp_allocator)
retry_str := strconv.itoa(buf, retry_dur)
headers_set_unsafe(&res.headers, "retry-after", retry_str)
if on, ok := data.opts.on_limit.(Rate_Limit_On_Limit); ok {
on.on_limit(req, res, on.user_data)
} else {
respond(res)
}
return
}
next := h.next.(^Handler)
next.handle(next, req, res)
}
return h
}

138
odin-http/headers.odin Normal file
View file

@ -0,0 +1,138 @@
package http
import "core:strings"
// A case-insensitive ASCII map for storing headers.
Headers :: struct {
_kv: map[string]string,
readonly: bool,
}
headers_init :: proc(h: ^Headers, allocator := context.temp_allocator) {
h._kv.allocator = allocator
}
headers_count :: #force_inline proc(h: Headers) -> int {
return len(h._kv)
}
/*
Sets a header, given key is first sanitized, final (sanitized) key is returned.
*/
headers_set :: proc(h: ^Headers, k: string, v: string, loc := #caller_location) -> string {
if h.readonly {
panic("these headers are readonly, did you accidentally try to set a header on the request?", loc)
}
l := sanitize_key(h^, k)
h._kv[l] = v
return l
}
/*
Unsafely set header, given key is assumed to be a lowercase string and to be without newlines.
*/
headers_set_unsafe :: #force_inline proc(h: ^Headers, k: string, v: string, loc := #caller_location) {
assert(!h.readonly, "these headers are readonly, did you accidentally try to set a header on the request?", loc)
h._kv[k] = v
}
headers_get :: proc(h: Headers, k: string) -> (string, bool) #optional_ok {
return h._kv[sanitize_key(h, k)]
}
/*
Unsafely get header, given key is assumed to be a lowercase string.
*/
headers_get_unsafe :: #force_inline proc(h: Headers, k: string) -> (string, bool) #optional_ok {
return h._kv[k]
}
headers_has :: proc(h: Headers, k: string) -> bool {
return sanitize_key(h, k) in h._kv
}
/*
Unsafely check for a header, given key is assumed to be a lowercase string.
*/
headers_has_unsafe :: #force_inline proc(h: Headers, k: string) -> bool {
return k in h._kv
}
headers_delete :: proc(h: ^Headers, k: string) -> (deleted_key: string, deleted_value: string) {
return delete_key(&h._kv, sanitize_key(h^, k))
}
/*
Unsafely delete a header, given key is assumed to be a lowercase string.
*/
headers_delete_unsafe :: #force_inline proc(h: ^Headers, k: string) {
delete_key(&h._kv, k)
}
/* Common Helpers */
headers_set_content_type :: proc {
headers_set_content_type_mime,
headers_set_content_type_string,
}
headers_set_content_type_string :: #force_inline proc(h: ^Headers, ct: string) {
headers_set_unsafe(h, "content-type", ct)
}
headers_set_content_type_mime :: #force_inline proc(h: ^Headers, ct: Mime_Type) {
headers_set_unsafe(h, "content-type", mime_to_content_type(ct))
}
headers_set_close :: #force_inline proc(h: ^Headers) {
headers_set_unsafe(h, "connection", "close")
}
/*
Escapes any newlines and converts ASCII to lowercase.
*/
@(private="file")
sanitize_key :: proc(h: Headers, k: string) -> string {
allocator := h._kv.allocator if h._kv.allocator.procedure != nil else context.temp_allocator
// general +4 in rare case of newlines, so we might not need to reallocate.
b := strings.builder_make(0, len(k)+4, allocator)
for c in k {
switch c {
case 'A'..='Z': strings.write_rune(&b, c + 32)
case '\n': strings.write_string(&b, "\\n")
case: strings.write_rune(&b, c)
}
}
return strings.to_string(b)
// NOTE: implementation that only allocates if needed, but we use arena's anyway so just allocating
// some space should be about as fast?
//
// b: strings.Builder = ---
// i: int
// for c in v {
// if c == '\n' || (c >= 'A' && c <= 'Z') {
// b = strings.builder_make(0, len(v)+4, allocator)
// strings.write_string(&b, v[:i])
// alloc = true
// break
// }
// i+=1
// }
//
// if !alloc {
// return v, false
// }
//
// for c in v[i:] {
// switch c {
// case 'A'..='Z': strings.write_rune(&b, c + 32)
// case '\n': strings.write_string(&b, "\\n")
// case: strings.write_rune(&b, c)
// }
// }
//
// return strings.to_string(b), true
}

440
odin-http/http.odin Normal file
View file

@ -0,0 +1,440 @@
package http
import "base:runtime"
import "core:io"
import "core:slice"
import "core:strconv"
import "core:strings"
import "core:sync"
import "core:time"
Requestline_Error :: enum {
None,
Method_Not_Implemented,
Not_Enough_Fields,
Invalid_Version_Format,
}
Requestline :: struct {
method: Method,
target: union {
string,
URL,
},
version: Version,
}
// A request-line begins with a method token, followed by a single space
// (SP), the request-target, another single space (SP), the protocol
// version, and ends with CRLF.
//
// This allocates a clone of the target, because this is intended to be used with a scanner,
// which has a buffer that changes every read.
requestline_parse :: proc(s: string, allocator := context.temp_allocator) -> (line: Requestline, err: Requestline_Error) {
s := s
next_space := strings.index_byte(s, ' ')
if next_space == -1 do return line, .Not_Enough_Fields
ok: bool
line.method, ok = method_parse(s[:next_space])
if !ok do return line, .Method_Not_Implemented
s = s[next_space + 1:]
next_space = strings.index_byte(s, ' ')
if next_space == -1 do return line, .Not_Enough_Fields
line.target = strings.clone(s[:next_space], allocator)
s = s[len(line.target.(string)) + 1:]
line.version, ok = version_parse(s)
if !ok do return line, .Invalid_Version_Format
return
}
requestline_write :: proc(w: io.Writer, rline: Requestline) -> io.Error {
// odinfmt:disable
io.write_string(w, method_string(rline.method)) or_return // <METHOD>
io.write_byte(w, ' ') or_return // <METHOD> <SP>
switch t in rline.target {
case string: io.write_string(w, t) or_return // <METHOD> <SP> <TARGET>
case URL: request_path_write(w, t) or_return // <METHOD> <SP> <TARGET>
}
io.write_byte(w, ' ') or_return // <METHOD> <SP> <TARGET> <SP>
version_write(w, rline.version) or_return // <METHOD> <SP> <TARGET> <SP> <VERSION>
io.write_string(w, "\r\n") or_return // <METHOD> <SP> <TARGET> <SP> <VERSION> <CRLF>
// odinfmt:enable
return nil
}
Version :: struct {
major: u8,
minor: u8,
}
// Parses an HTTP version string according to RFC 7230, section 2.6.
version_parse :: proc(s: string) -> (version: Version, ok: bool) {
switch len(s) {
case 8:
(s[6] == '.') or_return
version.minor = u8(int(s[7]) - '0')
fallthrough
case 6:
(s[:5] == "HTTP/") or_return
version.major = u8(int(s[5]) - '0')
case:
return
}
ok = true
return
}
version_write :: proc(w: io.Writer, v: Version) -> io.Error {
io.write_string(w, "HTTP/") or_return
io.write_rune(w, '0' + rune(v.major)) or_return
if v.minor > 0 {
io.write_rune(w, '.')
io.write_rune(w, '0' + rune(v.minor))
}
return nil
}
version_string :: proc(v: Version, allocator := context.allocator) -> string {
buf := make([]byte, 8, allocator)
b: strings.Builder
b.buf = slice.into_dynamic(buf)
version_write(strings.to_writer(&b), v)
return strings.to_string(b)
}
Method :: enum {
Get,
Post,
Delete,
Patch,
Put,
Head,
Connect,
Options,
Trace,
}
_method_strings := [?]string{"GET", "POST", "DELETE", "PATCH", "PUT", "HEAD", "CONNECT", "OPTIONS", "TRACE"}
method_string :: proc(m: Method) -> string #no_bounds_check {
if m < .Get || m > .Trace do return ""
return _method_strings[m]
}
method_parse :: proc(m: string) -> (method: Method, ok: bool) #no_bounds_check {
// PERF: I assume this is faster than a map with this amount of items.
for r in Method {
if _method_strings[r] == m {
return r, true
}
}
return nil, false
}
// Parses the header and adds it to the headers if valid. The given string is copied.
header_parse :: proc(headers: ^Headers, line: string, allocator := context.temp_allocator) -> (key: string, ok: bool) {
// Preceding spaces should not be allowed.
(len(line) > 0 && line[0] != ' ') or_return
colon := strings.index_byte(line, ':')
(colon > 0) or_return
// There must not be a space before the colon.
(line[colon - 1] != ' ') or_return
// TODO/PERF: only actually relevant/needed if the key is one of these.
has_host := headers_has_unsafe(headers^, "host")
cl, has_cl := headers_get_unsafe(headers^, "content-length")
value := strings.clone(strings.trim_space(line[colon + 1:]), allocator)
key = headers_set(headers, line[:colon], value)
// RFC 7230 5.4: Server MUST respond with 400 to any request
// with multiple "Host" header fields.
if key == "host" && has_host {
return
}
// RFC 7230 3.3.3: If a message is received without Transfer-Encoding and with
// either multiple Content-Length header fields having differing
// field-values or a single Content-Length header field having an
// invalid value, then the message framing is invalid and the
// recipient MUST treat it as an unrecoverable error.
if key == "content-length" && has_cl && cl != value {
return
}
ok = true
return
}
// Returns if this is a valid trailer header.
//
// RFC 7230 4.1.2:
// A sender MUST NOT generate a trailer that contains a field necessary
// for message framing (e.g., Transfer-Encoding and Content-Length),
// routing (e.g., Host), request modifiers (e.g., controls and
// conditionals in Section 5 of [RFC7231]), authentication (e.g., see
// [RFC7235] and [RFC6265]), response control data (e.g., see Section
// 7.1 of [RFC7231]), or determining how to process the payload (e.g.,
// Content-Encoding, Content-Type, Content-Range, and Trailer).
header_allowed_trailer :: proc(key: string) -> bool {
// odinfmt:disable
return (
// Message framing:
key != "transfer-encoding" &&
key != "content-length" &&
// Routing:
key != "host" &&
// Request modifiers:
key != "if-match" &&
key != "if-none-match" &&
key != "if-modified-since" &&
key != "if-unmodified-since" &&
key != "if-range" &&
// Authentication:
key != "www-authenticate" &&
key != "authorization" &&
key != "proxy-authenticate" &&
key != "proxy-authorization" &&
key != "cookie" &&
key != "set-cookie" &&
// Control data:
key != "age" &&
key != "cache-control" &&
key != "expires" &&
key != "date" &&
key != "location" &&
key != "retry-after" &&
key != "vary" &&
key != "warning" &&
// How to process:
key != "content-encoding" &&
key != "content-type" &&
key != "content-range" &&
key != "trailer")
// odinfmt:enable
}
@(private)
DATE_LENGTH :: len("Fri, 05 Feb 2023 09:01:10 GMT")
// Formats a time in the HTTP header format (no timezone conversion is done, GMT expected):
// `<day-name>, <day> <month> <year> <hour>:<minute>:<second> GMT`
date_write :: proc(w: io.Writer, t: time.Time) -> io.Error {
year, month, day := time.date(t)
hour, minute, second := time.clock_from_time(t)
wday := time.weekday(t)
// odinfmt:disable
io.write_string(w, DAYS[wday]) or_return // 'Fri, '
write_padded_int(w, day) or_return // 'Fri, 05'
io.write_string(w, MONTHS[month]) or_return // 'Fri, 05 Feb '
io.write_int(w, year) or_return // 'Fri, 05 Feb 2023'
io.write_byte(w, ' ') or_return // 'Fri, 05 Feb 2023 '
write_padded_int(w, hour) or_return // 'Fri, 05 Feb 2023 09'
io.write_byte(w, ':') or_return // 'Fri, 05 Feb 2023 09:'
write_padded_int(w, minute) or_return // 'Fri, 05 Feb 2023 09:01'
io.write_byte(w, ':') or_return // 'Fri, 05 Feb 2023 09:01:'
write_padded_int(w, second) or_return // 'Fri, 05 Feb 2023 09:01:10'
io.write_string(w, " GMT") or_return // 'Fri, 05 Feb 2023 09:01:10 GMT'
// odinfmt:enable
return nil
}
// Formats a time in the HTTP header format (no timezone conversion is done, GMT expected):
// `<day-name>, <day> <month> <year> <hour>:<minute>:<second> GMT`
date_string :: proc(t: time.Time, allocator := context.allocator) -> string {
b: strings.Builder
buf := make([]byte, DATE_LENGTH, allocator)
b.buf = slice.into_dynamic(buf)
date_write(strings.to_writer(&b), t)
return strings.to_string(b)
}
date_parse :: proc(value: string) -> (t: time.Time, ok: bool) #no_bounds_check {
if len(value) != DATE_LENGTH do return
// Remove 'Fri, '
value := value
value = value[5:]
// Parse '05'
day := strconv.parse_i64_of_base(value[:2], 10) or_return
value = value[2:]
// Parse ' Feb ' or '-Feb-' (latter is a deprecated format but should still be parsed).
month_index := -1
month_str := value[1:4]
value = value[5:]
for month, i in MONTHS[1:] {
if month_str == month[1:4] {
month_index = i
break
}
}
month_index += 1
if month_index <= 0 do return
year := strconv.parse_i64_of_base(value[:4], 10) or_return
value = value[4:]
hour := strconv.parse_i64_of_base(value[1:3], 10) or_return
value = value[4:]
minute := strconv.parse_i64_of_base(value[:2], 10) or_return
value = value[3:]
seconds := strconv.parse_i64_of_base(value[:2], 10) or_return
value = value[3:]
// Should have only 'GMT' left now.
if value != "GMT" do return
t = time.datetime_to_time(int(year), int(month_index), int(day), int(hour), int(minute), int(seconds)) or_return
ok = true
return
}
request_path_write :: proc(w: io.Writer, target: URL) -> io.Error {
// TODO: maybe net.percent_encode.
if target.path == "" {
io.write_byte(w, '/') or_return
} else {
io.write_string(w, target.path) or_return
}
if len(target.query) > 0 {
io.write_byte(w, '?') or_return
io.write_string(w, target.query) or_return
}
return nil
}
request_path :: proc(target: URL, allocator := context.allocator) -> (rq_path: string) {
res := strings.builder_make(0, len(target.path), allocator)
request_path_write(strings.to_writer(&res), target)
return strings.to_string(res)
}
_dynamic_unwritten :: proc(d: [dynamic]$E) -> []E {
return (cast([^]E)raw_data(d))[len(d):cap(d)]
}
_dynamic_add_len :: proc(d: ^[dynamic]$E, len: int) {
(transmute(^runtime.Raw_Dynamic_Array)d).len += len
}
@(private)
write_padded_int :: proc(w: io.Writer, i: int) -> io.Error {
if i < 10 {
io.write_string(w, PADDED_NUMS[i]) or_return
return nil
}
_, err := io.write_int(w, i)
return err
}
@(private)
write_escaped_newlines :: proc(w: io.Writer, v: string) -> io.Error {
for c in v {
if c == '\n' {
io.write_string(w, "\\n") or_return
} else {
io.write_rune(w, c) or_return
}
}
return nil
}
@(private)
PADDED_NUMS := [10]string{"00", "01", "02", "03", "04", "05", "06", "07", "08", "09"}
@(private)
DAYS := [7]string{"Sun, ", "Mon, ", "Tue, ", "Wed, ", "Thu, ", "Fri, ", "Sat, "}
@(private)
MONTHS := [13]string {
" ", // Jan is 1, so 0 should never be accessed.
" Jan ",
" Feb ",
" Mar ",
" Apr ",
" May ",
" Jun ",
" Jul ",
" Aug ",
" Sep ",
" Oct ",
" Nov ",
" Dec ",
}
@(private)
Atomic :: struct($T: typeid) {
raw: T,
}
@(private)
atomic_store :: #force_inline proc(a: ^Atomic($T), val: T) {
sync.atomic_store(&a.raw, val)
}
@(private)
atomic_load :: #force_inline proc(a: ^Atomic($T)) -> T {
return sync.atomic_load(&a.raw)
}
import "core:testing"
@(test)
test_dynamic_unwritten :: proc(t: ^testing.T) {
{
d := make([dynamic]int, 4, 8)
du := _dynamic_unwritten(d)
testing.expect(t, len(du) == 4)
}
{
d := slice.into_dynamic([]int{1, 2, 3, 4, 5})
_dynamic_add_len(&d, 3)
du := _dynamic_unwritten(d)
testing.expect(t, len(d) == 3)
testing.expect(t, len(du) == 2)
testing.expect(t, du[0] == 4)
testing.expect(t, du[1] == 5)
}
{
d := slice.into_dynamic([]int{})
du := _dynamic_unwritten(d)
testing.expect(t, len(du) == 0)
}
}

67
odin-http/mimes.odin Normal file
View file

@ -0,0 +1,67 @@
package http
import "core:path/filepath"
Mime_Type :: enum {
Plain,
Css,
Csv,
Gif,
Html,
Ico,
Jpeg,
Js,
Json,
Png,
Svg,
Url_Encoded,
Xml,
Zip,
Wasm,
}
mime_from_extension :: proc(s: string) -> Mime_Type {
//odinfmt:disable
switch filepath.ext(s) {
case ".html": return .Html
case ".js": return .Js
case ".css": return .Css
case ".csv": return .Csv
case ".xml": return .Xml
case ".zip": return .Zip
case ".json": return .Json
case ".ico": return .Ico
case ".gif": return .Gif
case ".jpeg": return .Jpeg
case ".png": return .Png
case ".svg": return .Svg
case ".wasm": return .Wasm
case: return .Plain
}
//odinfmt:enable
}
@(private="file")
_mime_to_content_type := [Mime_Type]string{
.Plain = "text/plain",
.Css = "text/css",
.Csv = "text/csv",
.Gif = "image/gif",
.Html = "text/html",
.Ico = "application/vnd.microsoft.ico",
.Jpeg = "image/jpeg",
.Js = "application/javascript",
.Json = "application/json",
.Png = "image/png",
.Svg = "image/svg+xml",
.Url_Encoded = "application/x-www-form-urlencoded",
.Xml = "text/xml",
.Zip = "application/zip",
.Wasm = "application/wasm",
}
mime_to_content_type :: proc(m: Mime_Type) -> string {
return _mime_to_content_type[m]
}

8
odin-http/mod.pkg Normal file
View file

@ -0,0 +1,8 @@
{
"version": "0.0.4-beta",
"description": "A HTTP/1.1 client/server implementation",
"url": "https://github.com/laytan/odin-http",
"readme": "README.md",
"license": "MIT",
"keywords": ["HTTP"]
}

99
odin-http/nbio/README.md Normal file
View file

@ -0,0 +1,99 @@
# package nbio
Package nbio implements a non blocking IO abstraction layer over several platform specific APIs.
This package implements an event loop based abstraction.
*TODO:*
- Benchmarking
- Some UDP implementations
*APIs:*
- Windows: [IOCP (IO Completion Ports)](https://en.wikipedia.org/wiki/Input/output_completion_port)
- Linux: [io_uring](https://en.wikipedia.org/wiki/Io_uring)
- Darwin: [KQueue](https://en.wikipedia.org/wiki/Kqueue)
*How to read the code:*
The file nbio.odin can be read a little bit like a header file,
it has all the procedures heavily explained and commented and dispatches them to platform specific code.
You can also have a look at the tests for more general usages, the example below or the generated docs even further below.
```odin
/*
This example shows a simple TCP server that echos back anything it receives.
Better error handling and closing/freeing connections are left for the reader.
*/
package main
import "core:fmt"
import "core:net"
import "core:os"
import nbio "nbio/poly"
Echo_Server :: struct {
io: nbio.IO,
sock: net.TCP_Socket,
connections: [dynamic]^Echo_Connection,
}
Echo_Connection :: struct {
server: ^Echo_Server,
sock: net.TCP_Socket,
buf: [50]byte,
}
main :: proc() {
server: Echo_Server
defer delete(server.connections)
nbio.init(&server.io)
defer nbio.destroy(&server.io)
sock, err := nbio.open_and_listen_tcp(&server.io, {net.IP4_Loopback, 8080})
fmt.assertf(err == nil, "Error opening and listening on localhost:8080: %v", err)
server.sock = sock
nbio.accept(&server.io, sock, &server, echo_on_accept)
// Start the event loop.
errno: os.Errno
for errno == os.ERROR_NONE {
errno = nbio.tick(&server.io)
}
fmt.assertf(errno == os.ERROR_NONE, "Server stopped with error code: %v", errno)
}
echo_on_accept :: proc(server: ^Echo_Server, client: net.TCP_Socket, source: net.Endpoint, err: net.Network_Error) {
fmt.assertf(err == nil, "Error accepting a connection: %v", err)
// Register a new accept for the next client.
nbio.accept(&server.io, server.sock, server, echo_on_accept)
c := new(Echo_Connection)
c.server = server
c.sock = client
append(&server.connections, c)
nbio.recv(&server.io, client, c.buf[:], c, echo_on_recv)
}
echo_on_recv :: proc(c: ^Echo_Connection, received: int, _: Maybe(net.Endpoint), err: net.Network_Error) {
fmt.assertf(err == nil, "Error receiving from client: %v", err)
nbio.send_all(&c.server.io, c.sock, c.buf[:received], c, echo_on_sent)
}
echo_on_sent :: proc(c: ^Echo_Connection, sent: int, err: net.Network_Error) {
fmt.assertf(err == nil, "Error sending to client: %v", err)
// Accept the next message, to then ultimately echo back again.
nbio.recv(&c.server.io, c.sock, c.buf[:], c, echo_on_recv)
}
```

View file

@ -0,0 +1,738 @@
#+build linux
package io_uring
import "core:math"
import "core:os"
import "core:sync"
import "core:sys/linux"
import "core:sys/unix"
DEFAULT_THREAD_IDLE_MS :: 1000
DEFAULT_ENTRIES :: 32
MAX_ENTRIES :: 4096
IO_Uring_Error :: enum {
None,
Entries_Zero,
Entries_Not_Power_Of_Two,
Entries_Too_Large,
Params_Outside_Accessible_Address_Space,
Arguments_Invalid,
Process_Fd_Quota_Exceeded,
System_Fd_Quota_Exceeded,
System_Resources,
Permission_Denied,
System_Outdated,
Submission_Queue_Full,
File_Descriptor_Invalid,
Completion_Queue_Overcommitted,
Submission_Queue_Entry_Invalid,
Buffer_Invalid,
Ring_Shutting_Down,
Opcode_Not_Supported,
Signal_Interrupt,
Unexpected,
}
IO_Uring :: struct {
fd: os.Handle,
sq: Submission_Queue,
cq: Completion_Queue,
flags: u32,
features: u32,
}
// Set up an IO_Uring with default parameters, `entries` must be a power of 2 between 1 and 4096.
io_uring_make :: proc(
params: ^io_uring_params,
entries: u32 = DEFAULT_ENTRIES,
flags: u32 = 0,
) -> (
ring: IO_Uring,
err: IO_Uring_Error,
) {
params.flags = flags
params.sq_thread_idle = DEFAULT_THREAD_IDLE_MS
err = io_uring_init(&ring, entries, params)
return
}
// Initialize and setup a io_uring with more control than io_uring_make.
io_uring_init :: proc(ring: ^IO_Uring, entries: u32, params: ^io_uring_params) -> (err: IO_Uring_Error) {
check_entries(entries) or_return
res := sys_io_uring_setup(entries, params)
if res < 0 {
#partial switch os.Platform_Error(-res) {
case .EFAULT:
return .Params_Outside_Accessible_Address_Space
// The resv array contains non-zero data, p.flags contains an unsupported flag,
// entries out of bounds, IORING_SETUP_SQ_AFF was specified without IORING_SETUP_SQPOLL,
// or IORING_SETUP_CQSIZE was specified but linux.io_uring_params.cq_entries was invalid:
case .EINVAL:
return .Arguments_Invalid
case .EMFILE:
return .Process_Fd_Quota_Exceeded
case .ENFILE:
return .System_Fd_Quota_Exceeded
case .ENOMEM:
return .System_Resources
// IORING_SETUP_SQPOLL was specified but effective user ID lacks sufficient privileges,
// or a container seccomp policy prohibits io_uring syscalls:
case .EPERM:
return .Permission_Denied
case .ENOSYS:
return .System_Outdated
case:
return .Unexpected
}
}
fd := os.Handle(res)
// Unsupported features.
assert((params.features & IORING_FEAT_SINGLE_MMAP) != 0)
assert((params.flags & IORING_SETUP_CQE32) == 0)
assert((params.flags & IORING_SETUP_SQE128) == 0)
sq, ok := submission_queue_make(fd, params)
if !ok do return .System_Resources
ring.fd = fd
ring.sq = sq
ring.cq = completion_queue_make(fd, params, &sq)
ring.flags = params.flags
ring.features = params.features
return
}
// Checks if the entries conform to the kernel rules.
@(private)
check_entries :: proc(entries: u32) -> (err: IO_Uring_Error) {
switch {
case entries >= MAX_ENTRIES:
err = .Entries_Too_Large
case entries == 0:
err = .Entries_Zero
case !math.is_power_of_two(int(entries)):
err = .Entries_Not_Power_Of_Two
case:
err = .None
}
return
}
io_uring_destroy :: proc(ring: ^IO_Uring) {
assert(ring.fd >= 0)
submission_queue_destroy(&ring.sq)
os.close(ring.fd)
ring.fd = -1
}
// Returns a pointer to a vacant submission queue entry, or an error if the submission queue is full.
get_sqe :: proc(ring: ^IO_Uring) -> (sqe: ^io_uring_sqe, err: IO_Uring_Error) {
sq := &ring.sq
head: u32 = sync.atomic_load_explicit(sq.head, .Acquire)
next := sq.sqe_tail + 1
if int(next - head) > len(sq.sqes) {
err = .Submission_Queue_Full
return
}
sqe = &sq.sqes[sq.sqe_tail & sq.mask]
sqe^ = {}
sq.sqe_tail = next
return
}
// Submits the submission queue entries acquired via get_sqe().
// Returns the number of entries submitted.
// Optionally wait for a number of events by setting wait_nr.
submit :: proc(ring: ^IO_Uring, wait_nr: u32 = 0) -> (n_submitted: u32, err: IO_Uring_Error) {
n_submitted = flush_sq(ring)
flags: u32 = 0
if sq_ring_needs_enter(ring, &flags) || wait_nr > 0 {
if wait_nr > 0 || ring.flags & IORING_SETUP_IOPOLL != 0 {
flags |= IORING_ENTER_GETEVENTS
}
n_submitted, err = enter(ring, n_submitted, wait_nr, flags)
}
return
}
// Tells the kernel that submission queue entries were submitted and/or we want to wait for their completion queue entries.
// Returns the number of submission queue entries that were submitted.
enter :: proc(
ring: ^IO_Uring,
n_to_submit: u32,
min_complete: u32,
flags: u32,
) -> (
n_submitted: u32,
err: IO_Uring_Error,
) {
assert(ring.fd >= 0)
ns := sys_io_uring_enter(u32(ring.fd), n_to_submit, min_complete, flags, nil)
if ns < 0 {
#partial switch os.Platform_Error(-ns) {
case .NONE:
err = .None
case .EAGAIN:
// The kernel was unable to allocate memory or ran out of resources for the request. (try again)
err = .System_Resources
case .EBADF:
// The SQE `fd` is invalid, or `IOSQE_FIXED_FILE` was set but no files were registered
err = .File_Descriptor_Invalid
// case os.EBUSY: // TODO: why is this not in os_linux
// // Attempted to overcommit the number of requests it can have pending. Should wait for some completions and try again.
// err = .Completion_Queue_Overcommitted
case .EINVAL:
// The SQE is invalid, or valid but the ring was setup with `IORING_SETUP_IOPOLL`
err = .Submission_Queue_Entry_Invalid
case .EFAULT:
// The buffer is outside the process' accessible address space, or `IORING_OP_READ_FIXED`
// or `IORING_OP_WRITE_FIXED` was specified but no buffers were registered, or the range
// described by `addr` and `len` is not within the buffer registered at `buf_index`
err = .Buffer_Invalid
case .ENXIO:
err = .Ring_Shutting_Down
case .EOPNOTSUPP:
// The kernel believes the `fd` doesn't refer to an `io_uring`, or the opcode isn't supported by this kernel (more likely)
err = .Opcode_Not_Supported
case .EINTR:
// The op was interrupted by a delivery of a signal before it could complete.This can happen while waiting for events with `IORING_ENTER_GETEVENTS`
err = .Signal_Interrupt
case:
err = .Unexpected
}
return
}
n_submitted = u32(ns)
return
}
// Sync internal state with kernel ring state on the submission queue side.
// Returns the number of all pending events in the submission queue.
// Rationale is to determine that an enter call is needed.
flush_sq :: proc(ring: ^IO_Uring) -> (n_pending: u32) {
sq := &ring.sq
to_submit := sq.sqe_tail - sq.sqe_head
if to_submit != 0 {
tail := sq.tail^
i: u32 = 0
for ; i < to_submit; i += 1 {
sq.array[tail & sq.mask] = sq.sqe_head & sq.mask
tail += 1
sq.sqe_head += 1
}
sync.atomic_store_explicit(sq.tail, tail, .Release)
}
n_pending = sq_ready(ring)
return
}
// Returns true if we are not using an SQ thread (thus nobody submits but us),
// or if IORING_SQ_NEED_WAKEUP is set and the SQ thread must be explicitly awakened.
// For the latter case, we set the SQ thread wakeup flag.
// Matches the implementation of sq_ring_needs_enter() in liburing.
sq_ring_needs_enter :: proc(ring: ^IO_Uring, flags: ^u32) -> bool {
assert(flags^ == 0)
if ring.flags & IORING_SETUP_SQPOLL == 0 do return true
if sync.atomic_load_explicit(ring.sq.flags, .Relaxed) & IORING_SQ_NEED_WAKEUP != 0 {
flags^ |= IORING_ENTER_SQ_WAKEUP
return true
}
return false
}
// Returns the number of submission queue entries in the submission queue.
sq_ready :: proc(ring: ^IO_Uring) -> u32 {
// Always use the shared ring state (i.e. head and not sqe_head) to avoid going out of sync,
// see https://github.com/axboe/liburing/issues/92.
return ring.sq.sqe_tail - sync.atomic_load_explicit(ring.sq.head, .Acquire)
}
// Returns the number of completion queue entries in the completion queue (yet to consume).
cq_ready :: proc(ring: ^IO_Uring) -> (n_ready: u32) {
return sync.atomic_load_explicit(ring.cq.tail, .Acquire) - ring.cq.head^
}
// Copies as many CQEs as are ready, and that can fit into the destination `cqes` slice.
// If none are available, enters into the kernel to wait for at most `wait_nr` CQEs.
// Returns the number of CQEs copied, advancing the CQ ring.
// Provides all the wait/peek methods found in liburing, but with batching and a single method.
copy_cqes :: proc(ring: ^IO_Uring, cqes: []io_uring_cqe, wait_nr: u32) -> (n_copied: u32, err: IO_Uring_Error) {
n_copied = copy_cqes_ready(ring, cqes)
if n_copied > 0 do return
if wait_nr > 0 || cq_ring_needs_flush(ring) {
_ = enter(ring, 0, wait_nr, IORING_ENTER_GETEVENTS) or_return
n_copied = copy_cqes_ready(ring, cqes)
}
return
}
copy_cqes_ready :: proc(ring: ^IO_Uring, cqes: []io_uring_cqe) -> (n_copied: u32) {
n_ready := cq_ready(ring)
n_copied = min(u32(len(cqes)), n_ready)
head := ring.cq.head^
tail := head + n_copied
i := 0
for head != tail {
cqes[i] = ring.cq.cqes[head & ring.cq.mask]
head += 1
i += 1
}
cq_advance(ring, n_copied)
return
}
cq_ring_needs_flush :: proc(ring: ^IO_Uring) -> bool {
return sync.atomic_load_explicit(ring.sq.flags, .Relaxed) & IORING_SQ_CQ_OVERFLOW != 0
}
// For advanced use cases only that implement custom completion queue methods.
// If you use copy_cqes() or copy_cqe() you must not call cqe_seen() or cq_advance().
// Must be called exactly once after a zero-copy CQE has been processed by your application.
// Not idempotent, calling more than once will result in other CQEs being lost.
// Matches the implementation of cqe_seen() in liburing.
cqe_seen :: proc(ring: ^IO_Uring) {
cq_advance(ring, 1)
}
// For advanced use cases only that implement custom completion queue methods.
// Matches the implementation of cq_advance() in liburing.
cq_advance :: proc(ring: ^IO_Uring, count: u32) {
if count == 0 do return
sync.atomic_store_explicit(ring.cq.head, ring.cq.head^ + count, .Release)
}
// Queues (but does not submit) an SQE to perform an `fsync(2)`.
// Returns a pointer to the SQE so that you can further modify the SQE for advanced use cases.
fsync :: proc(
ring: ^IO_Uring,
user_data: u64,
fd: os.Handle,
flags: u32,
) -> (
sqe: ^io_uring_sqe,
err: IO_Uring_Error,
) {
sqe = get_sqe(ring) or_return
sqe.opcode = .FSYNC
sqe.rw_flags = i32(flags)
sqe.fd = i32(fd)
sqe.user_data = user_data
return
}
// Queues (but does not submit) an SQE to perform a no-op.
// Returns a pointer to the SQE so that you can further modify the SQE for advanced use cases.
// A no-op is more useful than may appear at first glance.
// For example, you could call `drain_previous_sqes()` on the returned SQE, to use the no-op to
// know when the ring is idle before acting on a kill signal.
nop :: proc(ring: ^IO_Uring, user_data: u64) -> (sqe: ^io_uring_sqe, err: IO_Uring_Error) {
sqe = get_sqe(ring) or_return
sqe.opcode = .NOP
sqe.user_data = user_data
return
}
// Queues (but does not submit) an SQE to perform a `read(2)`.
read :: proc(
ring: ^IO_Uring,
user_data: u64,
fd: os.Handle,
buf: []u8,
offset: u64,
) -> (
sqe: ^io_uring_sqe,
err: IO_Uring_Error,
) {
sqe = get_sqe(ring) or_return
sqe.opcode = .READ
sqe.fd = i32(fd)
sqe.addr = cast(u64)uintptr(raw_data(buf))
sqe.len = u32(len(buf))
sqe.off = offset
sqe.user_data = user_data
return
}
// Queues (but does not submit) an SQE to perform a `write(2)`.
write :: proc(
ring: ^IO_Uring,
user_data: u64,
fd: os.Handle,
buf: []u8,
offset: u64,
) -> (
sqe: ^io_uring_sqe,
err: IO_Uring_Error,
) {
sqe = get_sqe(ring) or_return
sqe.opcode = .WRITE
sqe.fd = i32(fd)
sqe.addr = cast(u64)uintptr(raw_data(buf))
sqe.len = u32(len(buf))
sqe.off = offset
sqe.user_data = user_data
return
}
// Queues (but does not submit) an SQE to perform an `accept4(2)` on a socket.
// `addr`,`addr_len` optional
accept :: proc(
ring: ^IO_Uring,
user_data: u64,
sockfd: os.Socket,
addr: ^os.SOCKADDR = nil,
addr_len: ^os.socklen_t = nil,
flags: u32 = 0,
) -> (
sqe: ^io_uring_sqe,
err: IO_Uring_Error,
) {
sqe = get_sqe(ring) or_return
sqe.opcode = IORING_OP.ACCEPT
sqe.fd = i32(sockfd)
sqe.addr = cast(u64)uintptr(addr)
sqe.off = cast(u64)uintptr(addr_len)
sqe.rw_flags = i32(flags)
sqe.user_data = user_data
return
}
// Queue (but does not submit) an SQE to perform a `connect(2)` on a socket.
connect :: proc(
ring: ^IO_Uring,
user_data: u64,
sockfd: os.Socket,
addr: ^os.SOCKADDR,
addr_len: os.socklen_t,
) -> (
sqe: ^io_uring_sqe,
err: IO_Uring_Error,
) {
sqe = get_sqe(ring) or_return
sqe.opcode = IORING_OP.CONNECT
sqe.fd = i32(sockfd)
sqe.addr = cast(u64)uintptr(addr)
sqe.off = cast(u64)addr_len
sqe.user_data = user_data
return
}
// Queues (but does not submit) an SQE to perform a `recv(2)`.
recv :: proc(
ring: ^IO_Uring,
user_data: u64,
sockfd: os.Socket,
buf: []byte,
flags: u32,
) -> (
sqe: ^io_uring_sqe,
err: IO_Uring_Error,
) {
sqe = get_sqe(ring) or_return
sqe.opcode = IORING_OP.RECV
sqe.fd = i32(sockfd)
sqe.addr = cast(u64)uintptr(raw_data(buf))
sqe.len = cast(u32)uintptr(len(buf))
sqe.rw_flags = i32(flags)
sqe.user_data = user_data
return
}
// Queues (but does not submit) an SQE to perform a `send(2)`.
send :: proc(
ring: ^IO_Uring,
user_data: u64,
sockfd: os.Socket,
buf: []byte,
flags: u32,
) -> (
sqe: ^io_uring_sqe,
err: IO_Uring_Error,
) {
sqe = get_sqe(ring) or_return
sqe.opcode = IORING_OP.SEND
sqe.fd = i32(sockfd)
sqe.addr = cast(u64)uintptr(raw_data(buf))
sqe.len = u32(len(buf))
sqe.rw_flags = i32(flags)
sqe.user_data = user_data
return
}
// Queues (but does not submit) an SQE to perform an `openat(2)`.
openat :: proc(
ring: ^IO_Uring,
user_data: u64,
fd: os.Handle,
path: cstring,
mode: u32,
flags: u32,
) -> (
sqe: ^io_uring_sqe,
err: IO_Uring_Error,
) {
sqe = get_sqe(ring) or_return
sqe.opcode = IORING_OP.OPENAT
sqe.fd = i32(fd)
sqe.addr = cast(u64)transmute(uintptr)path
sqe.len = mode
sqe.rw_flags = i32(flags)
sqe.user_data = user_data
return
}
// Queues (but does not submit) an SQE to perform a `close(2)`.
close :: proc(ring: ^IO_Uring, user_data: u64, fd: os.Handle) -> (sqe: ^io_uring_sqe, err: IO_Uring_Error) {
sqe, err = get_sqe(ring)
if err != .None {return}
sqe.opcode = IORING_OP.CLOSE
sqe.fd = i32(fd)
sqe.user_data = user_data
return
}
// Queues (but does not submit) an SQE to register a timeout operation.
// Returns a pointer to the SQE.
//
// The timeout will complete when either the timeout expires, or after the specified number of
// events complete (if `count` is greater than `0`).
//
// `flags` may be `0` for a relative timeout, or `IORING_TIMEOUT_ABS` for an absolute timeout.
//
// The completion event result will be `-ETIME` if the timeout completed through expiration,
// `0` if the timeout completed after the specified number of events, or `-ECANCELED` if the
// timeout was removed before it expired.
//
// io_uring timeouts use the `CLOCK.MONOTONIC` clock source.
timeout :: proc(
ring: ^IO_Uring,
user_data: u64,
ts: ^linux.Time_Spec,
count: u32,
flags: u32,
) -> (
sqe: ^io_uring_sqe,
err: IO_Uring_Error,
) {
sqe = get_sqe(ring) or_return
sqe.opcode = IORING_OP.TIMEOUT
sqe.fd = -1
sqe.addr = cast(u64)uintptr(ts)
sqe.len = 1
sqe.off = u64(count)
sqe.rw_flags = i32(flags)
sqe.user_data = user_data
return
}
// Queues (but does not submit) an SQE to remove an existing timeout operation.
// Returns a pointer to the SQE.
//
// The timeout is identified by its `user_data`.
//
// The completion event result will be `0` if the timeout was found and cancelled successfully,
// `-EBUSY` if the timeout was found but expiration was already in progress, or
// `-ENOENT` if the timeout was not found.
timeout_remove :: proc(
ring: ^IO_Uring,
user_data: u64,
timeout_user_data: u64,
flags: u32,
) -> (
sqe: ^io_uring_sqe,
err: IO_Uring_Error,
) {
sqe = get_sqe(ring) or_return
sqe.opcode = IORING_OP.TIMEOUT_REMOVE
sqe.fd = -1
sqe.addr = timeout_user_data
sqe.rw_flags = i32(flags)
sqe.user_data = user_data
return
}
// Queues (but does not submit) an SQE to add a link timeout operation.
// Returns a pointer to the SQE.
//
// You need to set linux.IOSQE_IO_LINK to flags of the target operation
// and then call this method right after the target operation.
// See https://lwn.net/Articles/803932/ for detail.
//
// If the dependent request finishes before the linked timeout, the timeout
// is canceled. If the timeout finishes before the dependent request, the
// dependent request will be canceled.
//
// The completion event result of the link_timeout will be
// `-ETIME` if the timeout finishes before the dependent request
// (in this case, the completion event result of the dependent request will
// be `-ECANCELED`), or
// `-EALREADY` if the dependent request finishes before the linked timeout.
link_timeout :: proc(
ring: ^IO_Uring,
user_data: u64,
ts: ^os.Unix_File_Time,
flags: u32,
) -> (
sqe: ^io_uring_sqe,
err: IO_Uring_Error,
) {
sqe = get_sqe(ring) or_return
sqe.opcode = IORING_OP.LINK_TIMEOUT
sqe.fd = -1
sqe.addr = cast(u64)uintptr(ts)
sqe.len = 1
sqe.rw_flags = i32(flags)
sqe.user_data = user_data
return
}
poll_add :: proc(
ring: ^IO_Uring,
user_data: u64,
fd: os.Handle,
events: linux.Fd_Poll_Events,
flags: IORing_Poll_Flags,
) -> (
sqe: ^io_uring_sqe,
err: IO_Uring_Error,
) {
sqe = get_sqe(ring) or_return
sqe.opcode = IORING_OP.POLL_ADD
sqe.fd = i32(fd)
sqe.poll_events = transmute(u16)events
sqe.len = transmute(u32)flags
sqe.user_data = user_data
return
}
poll_remove :: proc(
ring: ^IO_Uring,
user_data: u64,
fd: os.Handle,
events: linux.Fd_Poll_Events,
) -> (
sqe: ^io_uring_sqe,
err: IO_Uring_Error,
) {
sqe = get_sqe(ring) or_return
sqe.opcode = IORING_OP.POLL_REMOVE
sqe.fd = i32(fd)
sqe.poll_events = transmute(u16)events
sqe.user_data = user_data
return
}
Submission_Queue :: struct {
head: ^u32,
tail: ^u32,
mask: u32,
flags: ^u32,
dropped: ^u32,
array: []u32,
sqes: []io_uring_sqe,
mmap: []u8,
mmap_sqes: []u8,
// We use `sqe_head` and `sqe_tail` in the same way as liburing:
// We increment `sqe_tail` (but not `tail`) for each call to `get_sqe()`.
// We then set `tail` to `sqe_tail` once, only when these events are actually submitted.
// This allows us to amortize the cost of the @atomicStore to `tail` across multiple SQEs.
sqe_head: u32,
sqe_tail: u32,
}
submission_queue_make :: proc(fd: os.Handle, params: ^io_uring_params) -> (sq: Submission_Queue, ok: bool) {
assert(fd >= 0)
// Unsupported feature.
assert((params.features & IORING_FEAT_SINGLE_MMAP) != 0)
sq_size := params.sq_off.array + params.sq_entries * size_of(u32)
cq_size := params.cq_off.cqes + params.cq_entries * size_of(io_uring_cqe)
size := max(sq_size, cq_size)
mmap_result := unix.sys_mmap(
nil,
uint(size),
unix.PROT_READ | unix.PROT_WRITE,
unix.MAP_SHARED,
/* | unix.MAP_POPULATE */
int(fd),
IORING_OFF_SQ_RING,
)
if mmap_result < 0 do return
defer if !ok do unix.sys_munmap(rawptr(uintptr(mmap_result)), uint(size))
mmap := transmute([^]u8)uintptr(mmap_result)
size_sqes := params.sq_entries * size_of(io_uring_sqe)
mmap_sqes_result := unix.sys_mmap(
nil,
uint(size_sqes),
unix.PROT_READ | unix.PROT_WRITE,
unix.MAP_SHARED,
/* | unix.MAP_POPULATE */
int(fd),
IORING_OFF_SQES,
)
if mmap_sqes_result < 0 do return
array := cast([^]u32)&mmap[params.sq_off.array]
sqes := cast([^]io_uring_sqe)uintptr(mmap_sqes_result)
mmap_sqes := cast([^]u8)uintptr(mmap_sqes_result)
sq.head = cast(^u32)&mmap[params.sq_off.head]
sq.tail = cast(^u32)&mmap[params.sq_off.tail]
sq.mask = (cast(^u32)&mmap[params.sq_off.ring_mask])^
sq.flags = cast(^u32)&mmap[params.sq_off.flags]
sq.dropped = cast(^u32)&mmap[params.sq_off.dropped]
sq.array = array[:params.sq_entries]
sq.sqes = sqes[:params.sq_entries]
sq.mmap = mmap[:size]
sq.mmap_sqes = mmap_sqes[:size_sqes]
ok = true
return
}
submission_queue_destroy :: proc(sq: ^Submission_Queue) {
unix.sys_munmap(raw_data(sq.mmap), uint(len(sq.mmap)))
unix.sys_munmap(raw_data(sq.mmap_sqes), uint(len(sq.mmap)))
}
Completion_Queue :: struct {
head: ^u32,
tail: ^u32,
mask: u32,
overflow: ^u32,
cqes: []io_uring_cqe,
}
completion_queue_make :: proc(fd: os.Handle, params: ^io_uring_params, sq: ^Submission_Queue) -> Completion_Queue {
assert(fd >= 0)
// Unsupported feature.
assert((params.features & IORING_FEAT_SINGLE_MMAP) != 0)
mmap := sq.mmap
cqes := cast([^]io_uring_cqe)&mmap[params.cq_off.cqes]
return(
{
head = cast(^u32)&mmap[params.cq_off.head],
tail = cast(^u32)&mmap[params.cq_off.tail],
mask = (cast(^u32)&mmap[params.cq_off.ring_mask])^,
overflow = cast(^u32)&mmap[params.cq_off.overflow],
cqes = cqes[:params.cq_entries],
} \
)
}

View file

@ -0,0 +1,478 @@
#+build linux
package io_uring
import "base:intrinsics"
//odinfmt:disable
SYS_io_uring_setup: uintptr : 425
SYS_io_uring_enter: uintptr : 426
SYS_io_uring_register: uintptr : 427
//odinfmt:enable
NSIG :: 65
sigset_t :: [1024 / 32]u32
io_uring_params :: struct {
sq_entries: u32,
cq_entries: u32,
flags: u32,
sq_thread_cpu: u32,
sq_thread_idle: u32,
features: u32,
wq_fd: u32,
resv: [3]u32,
sq_off: io_sqring_offsets,
cq_off: io_cqring_offsets,
}
#assert(size_of(io_uring_params) == 120)
io_sqring_offsets :: struct {
head: u32,
tail: u32,
ring_mask: u32,
ring_entries: u32,
flags: u32,
dropped: u32,
array: u32,
resv1: u32,
user_addr: u64,
}
io_cqring_offsets :: struct {
head: u32,
tail: u32,
ring_mask: u32,
ring_entries: u32,
overflow: u32,
cqes: u32,
flags: u32,
resv1: u32,
user_addr: u64,
}
// Submission queue entry.
io_uring_sqe :: struct {
opcode: IORING_OP, // u8
flags: u8, /* IOSQE_ flags */
ioprio: u16, /* ioprio for the request */
fd: i32, /* file descriptor to do IO on */
using __offset: struct #raw_union {
off: u64, /* offset into file */
addr2: u64,
using _: struct {
cmd_op: u32,
__pad1: u32,
},
},
using __iovecs: struct #raw_union {
addr: u64, /* pointer to buffer or iovecs */
splice_off_in: u64,
},
len: u32, /* buffer size or number of iovecs */
using __contents: struct #raw_union {
rw_flags: i32,
fsync_flags: u32,
poll_events: u16, /* compatibility */
poll32_events: u32, /* word-reversed for BE */
sync_range_flags: u32,
msg_flags: u32,
timeout_flags: u32,
accept_flags: u32,
cancel_flags: u32,
open_flags: u32,
statx_flags: u32,
fadvise_advice: u32,
splice_flags: u32,
rename_flags: u32,
unlink_flags: u32,
hardlink_flags: u32,
xattr_flags: u32,
msg_ring_flags: u32,
uring_cmd_flags: u32,
},
user_data: u64, /* data to be passed back at completion time */
/* pack this to avoid bogus arm OABI complaints */
using __buffer: struct #raw_union {
/* index into fixed buffers, if used */
buf_index: u16,
/* for grouped buffer selection */
buf_group: u16,
},
/* personality to use, if used */
personality: u16,
using _: struct #raw_union {
splice_fd_in: i32,
file_index: u32,
using _: struct {
addr_len: u16,
__pad3: [1]u16,
},
},
using __: struct #raw_union {
using _: struct {
addr3: u64,
__pad2: [1]u64,
},
/*
* If the ring is initialized with IORING_SETUP_SQE128, then
* this field is used for 80 bytes of arbitrary command data
* NOTE: This is currently not supported.
*/
// cmd: [^]u8,
},
}
#assert(size_of(io_uring_sqe) == 64)
// Completion queue entry.
io_uring_cqe :: struct {
user_data: u64, /* sq.data submission passed back */
res: i32, /* result code for this event */
flags: u32,
/*
* If the ring is initialized with IORING_SETUP_CQE32, then this field
* contains 16-bytes of padding, doubling the size of the CQE.
* NOTE: This is currently not supported.
*/
// big_cqe: [^]u64,
}
#assert(size_of(io_uring_cqe) == 16)
/*
* sqe.flags
*/
/* use fixed fileset */
IOSQE_FIXED_FILE: u32 : (1 << 0)
/* issue after inflight IO */
IOSQE_IO_DRAIN: u32 : (1 << 1)
/* links next sqe */
IOSQE_IO_LINK: u32 : (1 << 2)
/* like LINK, but stronger */
IOSQE_IO_HARDLINK: u32 : (1 << 3)
/* always go async */
IOSQE_ASYNC: u32 : (1 << 4)
/* select buffer from sq.buf_group */
IOSQE_BUFFER_SELECT: u32 : (1 << 5)
/* don't post CQE if request succeeded */
IOSQE_CQE_SKIP_SUCCESS: u32 : (1 << 6)
/*
* io_uring_setup() flags
*/
IORING_SETUP_IOPOLL: u32 : (1 << 0) /* io_context is polled */
IORING_SETUP_SQPOLL: u32 : (1 << 1) /* SQ poll thread */
IORING_SETUP_SQ_AFF: u32 : (1 << 2) /* sq_thread_cpu is valid */
IORING_SETUP_CQSIZE: u32 : (1 << 3) /* app defines CQ size */
IORING_SETUP_CLAMP: u32 : (1 << 4) /* clamp SQ/CQ ring sizes */
IORING_SETUP_ATTACH_WQ: u32 : (1 << 5) /* attach to existing wq */
IORING_SETUP_R_DISABLED: u32 : (1 << 6) /* start with ring disabled */
IORING_SETUP_SUBMIT_ALL: u32 : (1 << 7) /* continue submit on error */
// Cooperative task running. When requests complete, they often require
// forcing the submitter to transition to the kernel to complete. If this
// flag is set, work will be done when the task transitions anyway, rather
// than force an inter-processor interrupt reschedule. This avoids interrupting
// a task running in userspace, and saves an IPI.
IORING_SETUP_COOP_TASKRUN: u32 : (1 << 8)
// If COOP_TASKRUN is set, get notified if task work is available for
// running and a kernel transition would be needed to run it. This sets
// IORING_SQ_TASKRUN in the sq ring flags. Not valid with COOP_TASKRUN.
IORING_SETUP_TASKRUN_FLAG: u32 : (1 << 9)
IORING_SETUP_SQE128: u32 : (1 << 10) /* SQEs are 128 byte */
IORING_SETUP_CQE32: u32 : (1 << 11) /* CQEs are 32 byte */
// Only one task is allowed to submit requests
IORING_SETUP_SINGLE_ISSUER: u32 : (1 << 12)
// Defer running task work to get events.
// Rather than running bits of task work whenever the task transitions
// try to do it just before it is needed.
IORING_SETUP_DEFER_TASKRUN: u32 : (1 << 13)
/*
* sqe.uring_cmd_flags
* IORING_URING_CMD_FIXED use registered buffer; pass this flag
* along with setting sqe.buf_index.
*/
IORING_URING_CMD_FIXED: u32 : (1 << 0)
/*
* sqe.fsync_flags
*/
IORING_FSYNC_DATASYNC: u32 : (1 << 0)
/*
* sqe.timeout_flags
*/
IORING_TIMEOUT_ABS: u32 : (1 << 0)
IORING_TIMEOUT_UPDATE: u32 : (1 << 1)
IORING_TIMEOUT_BOOTTIME: u32 : (1 << 2)
IORING_TIMEOUT_REALTIME: u32 : (1 << 3)
IORING_LINK_TIMEOUT_UPDATE: u32 : (1 << 4)
IORING_TIMEOUT_ETIME_SUCCESS: u32 : (1 << 5)
IORING_TIMEOUT_CLOCK_MASK: u32 : (IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME)
IORING_TIMEOUT_UPDATE_MASK: u32 : (IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE)
/*
* sq_ring.flags
*/
IORING_SQ_NEED_WAKEUP: u32 : (1 << 0) /* needs io_uring_enter wakeup */
IORING_SQ_CQ_OVERFLOW: u32 : (1 << 1) /* CQ ring is overflown */
IORING_SQ_TASKRUN: u32 : (1 << 2) /* task should enter the kernel */
/*
* sqe.splice_flags
* extends splice(2) flags
*/
SPLICE_F_FD_IN_FIXED: u32 : (1 << 31) /* the last bit of __u32 */
/*
* POLL_ADD flags. Note that since sqe.poll_events is the flag space, the command flags for POLL_ADD are stored in sqe.len.
*
* IORING_POLL_ADD_MULTI Multishot poll. Sets IORING_CQE_F_MORE if the poll handler will continue to report CQEs on behalf of the same SQE.
* IORING_POLL_UPDATE Update existing poll request, matching sqe.addr as the old user_data field.
*
* IORING_POLL_LEVEL Level triggered poll.
*/
IORING_POLL_ADD_MULTI: u32 : (1 << 0)
IORING_POLL_UPDATE_EVENTS: u32 : (1 << 1)
IORING_POLL_UPDATE_USER_DATA: u32 : (1 << 2)
IORING_POLL_ADD_LEVEL: u32 : (1 << 3)
IORing_Poll_Bits :: enum {
ADD_MULTI,
UPDATE_EVENTS,
UPDATE_USER_DATA,
ADD_LEVEL,
}
IORing_Poll_Flags :: bit_set[IORing_Poll_Bits; u32]
/*
* send/sendmsg and recv/recvmsg flags (sq.ioprio)
*
* IORING_RECVSEND_POLL_FIRST If set, instead of first attempting to send
* or receive and arm poll if that yields an
* -EAGAIN result, arm poll upfront and skip
* the initial transfer attempt.
*
* IORING_RECV_MULTISHOT Multishot recv. Sets IORING_CQE_F_MORE if
* the handler will continue to report
* CQEs on behalf of the same SQE.
*
* IORING_RECVSEND_FIXED_BUF Use registered buffers, the index is stored in
* the buf_index field.
*
* IORING_SEND_ZC_REPORT_USAGE
* If set, SEND[MSG]_ZC should report
* the zerocopy usage in cqe.res
* for the IORING_CQE_F_NOTIF cqe.
* 0 is reported if zerocopy was actually possible.
* IORING_NOTIF_USAGE_ZC_COPIED if data was copied
* (at least partially).
*/
IORING_RECVSEND_POLL_FIRST: u32 : (1 << 0)
IORING_RECV_MULTISHOT: u32 : (1 << 1)
IORING_RECVSEND_FIXED_BUF: u32 : (1 << 2)
IORING_SEND_ZC_REPORT_USAGE: u32 : (1 << 3)
/*
* cqe.res for IORING_CQE_F_NOTIF if
* IORING_SEND_ZC_REPORT_USAGE was requested
*
* It should be treated as a flag, all other
* bits of cqe.res should be treated as reserved!
*/
IORING_NOTIF_USAGE_ZC_COPIED: u32 : (1 << 31)
/*
* accept flags stored in sq.ioprio
*/
IORING_ACCEPT_MULTISHOT: u32 : (1 << 0)
/*
* IORING_OP_MSG_RING command types, stored in sq.addr
*/
IORING_MSG :: enum {
DATA, /* pass sq.len as 'res' and off as user_data */
SEND_FD, /* send a registered fd to another ring */
}
/*
* IORING_OP_MSG_RING flags (sq.msg_ring_flags)
*
* IORING_MSG_RING_CQE_SKIP Don't post a CQE to the target ring. Not
* applicable for IORING_MSG_DATA, obviously.
*/
IORING_MSG_RING_CQE_SKIP: u32 : (1 << 0)
/* Pass through the flags from sq.file_index to cqe.flags */
IORING_MSG_RING_FLAGS_PASS: u32 : (1 << 1)
IORING_OP :: enum u8 {
NOP,
READV,
WRITEV,
FSYNC,
READ_FIXED,
WRITE_FIXED,
POLL_ADD,
POLL_REMOVE,
SYNC_FILE_RANGE,
SENDMSG,
RECVMSG,
TIMEOUT,
TIMEOUT_REMOVE,
ACCEPT,
ASYNC_CANCEL,
LINK_TIMEOUT,
CONNECT,
FALLOCATE,
OPENAT,
CLOSE,
FILES_UPDATE,
STATX,
READ,
WRITE,
FADVISE,
MADVISE,
SEND,
RECV,
OPENAT2,
EPOLL_CTL,
SPLICE,
PROVIDE_BUFFERS,
REMOVE_BUFFERS,
TEE,
SHUTDOWN,
RENAMEAT,
UNLINKAT,
MKDIRAT,
SYMLINKAT,
LINKAT,
/* this goes last, obviously */
LAST,
}
/*
* sys_io_uring_register() opcodes and arguments.
*/
IORING_REGISTER :: enum u32 {
REGISTER_BUFFERS = 0,
UNREGISTER_BUFFERS = 1,
REGISTER_FILES = 2,
UNREGISTER_FILES = 3,
REGISTER_EVENTFD = 4,
UNREGISTER_EVENTFD = 5,
REGISTER_FILES_UPDATE = 6,
REGISTER_EVENTFD_ASYNC = 7,
REGISTER_PROBE = 8,
REGISTER_PERSONALITY = 9,
UNREGISTER_PERSONALITY = 10,
REGISTER_RESTRICTIONS = 11,
REGISTER_ENABLE_RINGS = 12,
/* extended with tagging */
REGISTER_FILES2 = 13,
REGISTER_FILES_UPDATE2 = 14,
REGISTER_BUFFERS2 = 15,
REGISTER_BUFFERS_UPDATE = 16,
/* set/clear io-wq thread affinities */
REGISTER_IOWQ_AFF = 17,
UNREGISTER_IOWQ_AFF = 18,
/* set/get max number of io-wq workers */
REGISTER_IOWQ_MAX_WORKERS = 19,
/* register/unregister io_uring fd with the ring */
REGISTER_RING_FDS = 20,
UNREGISTER_RING_FDS = 21,
/* register ring based provide buffer group */
REGISTER_PBUF_RING = 22,
UNREGISTER_PBUF_RING = 23,
/* sync cancelation API */
REGISTER_SYNC_CANCEL = 24,
/* register a range of fixed file slots for automatic slot allocation */
REGISTER_FILE_ALLOC_RANGE = 25,
/* this goes last */
REGISTER_LAST,
/* flag added to the opcode to use a registered ring fd */
REGISTER_USE_REGISTERED_RING = 1 << 31,
}
IORING_FEAT_SINGLE_MMAP: u32 : (1 << 0)
IORING_FEAT_NODROP: u32 : (1 << 1)
IORING_FEAT_SUBMIT_STABLE: u32 : (1 << 2)
IORING_FEAT_RW_CUR_POS: u32 : (1 << 3)
IORING_FEAT_CUR_PERSONALITY: u32 : (1 << 4)
IORING_FEAT_FAST_POLL: u32 : (1 << 5)
IORING_FEAT_POLL_32BITS: u32 : (1 << 6)
IORING_FEAT_SQPOLL_NONFIXED: u32 : (1 << 7)
IORING_FEAT_EXT_ARG: u32 : (1 << 8)
IORING_FEAT_NATIVE_WORKERS: u32 : (1 << 9)
IORING_FEAT_RSRC_TAGS: u32 : (1 << 10)
/*
* cqe.flags
*
* IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID
* IORING_CQE_F_MORE If set, parent SQE will generate more CQE entries
* IORING_CQE_F_SOCK_NONEMPTY If set, more data to read after socket recv
* IORING_CQE_F_NOTIF Set for notification CQEs. Can be used to distinct
* them from sends.
*/
IORING_CQE_F_BUFFER: u32 : (1 << 0)
IORING_CQE_F_MORE: u32 : (1 << 1)
IORING_CQE_F_SOCK_NONEMPTY: u32 : (1 << 2)
IORING_CQE_F_NOTIF: u32 : (1 << 3)
IORING_CQE :: enum {
BUFFER_SHIFT = 16,
}
/*
* cq_ring->flags
*/
// disable eventfd notifications
IORING_CQ_EVENTFD_DISABLED: u32 : (1 << 0)
/*
* io_uring_enter(2) flags
*/
IORING_ENTER_GETEVENTS: u32 : (1 << 0)
IORING_ENTER_SQ_WAKEUP: u32 : (1 << 1)
IORING_ENTER_SQ_WAIT: u32 : (1 << 2)
IORING_ENTER_EXT_ARG: u32 : (1 << 3)
IORING_ENTER_REGISTERED_RING: u32 : (1 << 4)
/*
* Magic offsets for the application to mmap the data it needs
*/
IORING_OFF_SQ_RING: uintptr : 0
IORING_OFF_CQ_RING: u64 : 0x8000000
IORING_OFF_SQES: uintptr : 0x10000000
IORING_OFF_PBUF_RING: u64 : 0x80000000
IORING_OFF_PBUF_SHIFT :: 16
IORING_OFF_MMAP_MASK: u64 : 0xf8000000
sys_io_uring_setup :: proc "contextless" (entries: u32, params: ^io_uring_params) -> int {
return int(intrinsics.syscall(SYS_io_uring_setup, uintptr(entries), uintptr(params)))
}
sys_io_uring_enter :: proc "contextless" (
fd: u32,
to_submit: u32,
min_complete: u32,
flags: u32,
sig: ^sigset_t,
) -> int {
return int(
intrinsics.syscall(
SYS_io_uring_enter,
uintptr(fd),
uintptr(to_submit),
uintptr(min_complete),
uintptr(flags),
uintptr(sig),
NSIG / 8 if sig != nil else 0,
),
)
}
sys_io_uring_register :: proc "contextless" (fd: u32, opcode: IORING_REGISTER, arg: rawptr, nr_args: u32) -> int {
return int(intrinsics.syscall(SYS_io_uring_register, uintptr(fd), uintptr(opcode), uintptr(arg), uintptr(nr_args)))
}

View file

@ -0,0 +1,139 @@
#+build darwin
package kqueue
import "core:c"
import "core:os"
Queue_Error :: enum {
None,
Out_Of_Memory,
Descriptor_Table_Full,
File_Table_Full,
Unknown,
}
kqueue :: proc() -> (kq: os.Handle, err: Queue_Error) {
kq = os.Handle(_kqueue())
if kq == -1 {
switch os.Errno(os.get_last_error()) {
case os.ENOMEM:
err = .Out_Of_Memory
case os.EMFILE:
err = .Descriptor_Table_Full
case os.ENFILE:
err = .File_Table_Full
case:
err = .Unknown
}
}
return
}
Event_Error :: enum {
None,
Access_Denied,
Invalid_Event,
Invalid_Descriptor,
Signal,
Invalid_Timeout_Or_Filter,
Event_Not_Found,
Out_Of_Memory,
Process_Not_Found,
Unknown,
}
kevent :: proc(
kq: os.Handle,
change_list: []KEvent,
event_list: []KEvent,
timeout: ^Time_Spec,
) -> (
n_events: int,
err: Event_Error,
) {
n_events = int(
_kevent(
c.int(kq),
raw_data(change_list),
c.int(len(change_list)),
raw_data(event_list),
c.int(len(event_list)),
timeout,
),
)
if n_events == -1 {
switch os.Errno(os.get_last_error()) {
case os.EACCES:
err = .Access_Denied
case os.EFAULT:
err = .Invalid_Event
case os.EBADF:
err = .Invalid_Descriptor
case os.EINTR:
err = .Signal
case os.EINVAL:
err = .Invalid_Timeout_Or_Filter
case os.ENOENT:
err = .Event_Not_Found
case os.ENOMEM:
err = .Out_Of_Memory
case os.ESRCH:
err = .Process_Not_Found
case:
err = .Unknown
}
}
return
}
KEvent :: struct {
ident: c.uintptr_t,
filter: c.int16_t,
flags: c.uint16_t,
fflags: c.uint32_t,
data: c.intptr_t,
udata: rawptr,
}
Time_Spec :: struct {
sec: c.long,
nsec: c.long,
}
EV_ADD :: 0x0001 /* add event to kq (implies enable) */
EV_DELETE :: 0x0002 /* delete event from kq */
EV_ENABLE :: 0x0004 /* enable event */
EV_DISABLE :: 0x0008 /* disable event (not reported) */
EV_ONESHOT :: 0x0010 /* only report one occurrence */
EV_CLEAR :: 0x0020 /* clear event state after reporting */
EV_RECEIPT :: 0x0040 /* force immediate event output */
EV_DISPATCH :: 0x0080 /* disable event after reporting */
EV_UDATA_SPECIFIC :: 0x0100 /* unique kevent per udata value */
EV_FANISHED :: 0x0200 /* report that source has vanished */
EV_SYSFLAGS :: 0xF000 /* reserved by system */
EV_FLAG0 :: 0x1000 /* filter-specific flag */
EV_FLAG1 :: 0x2000 /* filter-specific flag */
EV_ERROR :: 0x4000 /* error, data contains errno */
EV_EOF :: 0x8000 /* EOF detected */
EV_DISPATCH2 :: (EV_DISPATCH | EV_UDATA_SPECIFIC)
EVFILT_READ :: -1
EVFILT_WRITE :: -2
EVFILT_AIO :: -3
EVFILT_VNODE :: -4
EVFILT_PROC :: -5
EVFILT_SIGNAL :: -6
EVFILT_TIMER :: -7
EVFILT_MACHPORT :: -8
EVFILT_FS :: -9
EVFILT_USER :: -10
EVFILT_VM :: -12
EVFILT_EXCEPT :: -15
@(default_calling_convention = "c")
foreign _ {
@(link_name = "kqueue")
_kqueue :: proc() -> c.int ---
@(link_name = "kevent")
_kevent :: proc(kq: c.int, change_list: [^]KEvent, n_changes: c.int, event_list: [^]KEvent, n_events: c.int, timeout: ^Time_Spec) -> c.int ---
}

93
odin-http/nbio/doc.odin Normal file
View file

@ -0,0 +1,93 @@
/*
package nbio implements a non blocking IO abstraction layer over several platform specific APIs.
This package implements an event loop based abstraction.
APIs:
- Windows: [[IOCP IO Completion Ports;https://en.wikipedia.org/wiki/Input/output_completion_port]]
- Linux: [[io_uring;https://en.wikipedia.org/wiki/Io_uring]]
- Darwin: [[KQueue;https://en.wikipedia.org/wiki/Kqueue]]
How to read the code:
The file nbio.odin can be read a little bit like a header file,
it has all the procedures heavily explained and commented and dispatches them to platform specific code.
You can also have a look at the tests for more general usages.
Example:
/*
This example shows a simple TCP server that echos back anything it receives.
Better error handling and closing/freeing connections are left for the reader.
*/
package main
import "core:fmt"
import "core:net"
import "core:os"
import nbio "nbio/poly"
Echo_Server :: struct {
io: nbio.IO,
sock: net.TCP_Socket,
connections: [dynamic]^Echo_Connection,
}
Echo_Connection :: struct {
server: ^Echo_Server,
sock: net.TCP_Socket,
buf: [50]byte,
}
main :: proc() {
server: Echo_Server
defer delete(server.connections)
nbio.init(&server.io)
defer nbio.destroy(&server.io)
sock, err := nbio.open_and_listen_tcp(&server.io, {net.IP4_Loopback, 8080})
fmt.assertf(err == nil, "Error opening and listening on localhost:8080: %v", err)
server.sock = sock
nbio.accept(&server.io, sock, &server, echo_on_accept)
// Start the event loop.
errno: os.Errno
for errno == os.ERROR_NONE {
errno = nbio.tick(&server.io)
}
fmt.assertf(errno == os.ERROR_NONE, "Server stopped with error code: %v", errno)
}
echo_on_accept :: proc(server: ^Echo_Server, client: net.TCP_Socket, source: net.Endpoint, err: net.Network_Error) {
fmt.assertf(err == nil, "Error accepting a connection: %v", err)
// Register a new accept for the next client.
nbio.accept(&server.io, server.sock, server, echo_on_accept)
c := new(Echo_Connection)
c.server = server
c.sock = client
append(&server.connections, c)
nbio.recv(&server.io, client, c.buf[:], c, echo_on_recv)
}
echo_on_recv :: proc(c: ^Echo_Connection, received: int, _: Maybe(net.Endpoint), err: net.Network_Error) {
fmt.assertf(err == nil, "Error receiving from client: %v", err)
nbio.send_all(&c.server.io, c.sock, c.buf[:received], c, echo_on_sent)
}
echo_on_sent :: proc(c: ^Echo_Connection, sent: int, err: net.Network_Error) {
fmt.assertf(err == nil, "Error sending to client: %v", err)
// Accept the next message, to then ultimately echo back again.
nbio.recv(&c.server.io, c.sock, c.buf[:], c, echo_on_recv)
}
*/
package nbio

736
odin-http/nbio/nbio.odin Normal file
View file

@ -0,0 +1,736 @@
package nbio
import "core:net"
import "core:os"
import "core:time"
/*
The main IO type that holds the platform dependant implementation state passed around most procedures in this package
*/
IO :: _IO
/*
Initializes the IO type, allocates different things per platform needs
*Allocates Using Provided Allocator*
Inputs:
- io: The IO struct to initialize
- allocator: (default: context.allocator)
Returns:
- err: An error code when something went wrong with the setup of the platform's IO API, 0 otherwise
*/
init :: proc(io: ^IO, allocator := context.allocator) -> (err: os.Errno) {
return _init(io, allocator)
}
/*
The place where the magic happens, each time you call this the IO implementation checks its state
and calls any callbacks which are ready. You would typically call this in a loop
Inputs:
- io: The IO instance to tick
Returns:
- err: An error code when something went when retrieving events, 0 otherwise
*/
tick :: proc(io: ^IO) -> os.Errno {
return _tick(io)
}
/*
Returns the number of in-progress IO to be completed.
*/
num_waiting :: #force_inline proc(io: ^IO) -> int {
return _num_waiting(io)
}
/*
Deallocates anything that was allocated when calling init()
Inputs:
- io: The IO instance to deallocate
*Deallocates with the allocator that was passed with the init() call*
*/
destroy :: proc(io: ^IO) {
_destroy(io)
}
/*
The callback for a "next tick" event
Inputs:
- user: A passed through pointer from initiation to its callback
*/
On_Next_Tick :: #type proc(user: rawptr)
/*
Schedules a callback to be called during the next tick of the event loop.
Inputs:
- io: The IO instance to use
- user: A pointer that will be passed through to the callback, free to use by you and untouched by us
*/
next_tick :: proc(io: ^IO, user: rawptr, callback: On_Next_Tick) -> ^Completion {
return _next_tick(io, user, callback)
}
/*
The callback for non blocking `timeout` calls
Inputs:
- user: A passed through pointer from initiation to its callback
*/
On_Timeout :: #type proc(user: rawptr)
/*
Schedules a callback to be called after the given duration elapses.
The accuracy depends on the time between calls to `tick`.
When you call it in a loop with no blocks or very expensive calculations in other scheduled event callbacks
it is reliable to about a ms of difference (so timeout of 10ms would almost always be ran between 10ms and 11ms).
Inputs:
- io: The IO instance to use
- dur: The minimum duration to wait before calling the given callback
- user: A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Timeout` for its arguments
*/
timeout :: proc(io: ^IO, dur: time.Duration, user: rawptr, callback: On_Timeout) {
_timeout(io, dur, user, callback)
}
/*
Creates a socket, sets non blocking mode and relates it to the given IO
Inputs:
- io: The IO instance to initialize the socket on/with
- family: Should this be an IP4 or IP6 socket
- protocol: The type of socket (TCP or UDP)
Returns:
- socket: The opened socket
- err: A network error that happened while opening
*/
open_socket :: proc(
io: ^IO,
family: net.Address_Family,
protocol: net.Socket_Protocol,
) -> (
socket: net.Any_Socket,
err: net.Network_Error,
) {
return _open_socket(io, family, protocol)
}
/*
Creates a socket, sets non blocking mode, relates it to the given IO, binds the socket to the given endpoint and starts listening
Inputs:
- io: The IO instance to initialize the socket on/with
- endpoint: Where to bind the socket to
Returns:
- socket: The opened, bound and listening socket
- err: A network error that happened while opening
*/
open_and_listen_tcp :: proc(io: ^IO, ep: net.Endpoint) -> (socket: net.TCP_Socket, err: net.Network_Error) {
family := net.family_from_endpoint(ep)
sock := open_socket(io, family, .TCP) or_return
socket = sock.(net.TCP_Socket)
if err = net.bind(socket, ep); err != nil {
close(io, socket)
return
}
if err = listen(socket); err != nil {
close(io, socket)
}
return
}
/*
Starts listening on the given socket
Inputs:
- socket: The socket to start listening
- backlog: The amount of events to keep in the backlog when they are not consumed
Returns:
- err: A network error that happened when starting listening
*/
listen :: proc(socket: net.TCP_Socket, backlog := 1000) -> (err: net.Network_Error) {
return _listen(socket, backlog)
}
/*
The callback for non blocking `close` requests
Inputs:
- user: A passed through pointer from initiation to its callback
- ok: Whether the operation suceeded sucessfully
*/
On_Close :: #type proc(user: rawptr, ok: bool)
@private
empty_on_close :: proc(_: rawptr, _: bool) {}
/*
A union of types that are `close`'able by this package
*/
Closable :: union #no_nil {
net.TCP_Socket,
net.UDP_Socket,
net.Socket,
os.Handle,
}
/*
Closes the given `Closable` socket or file handle that was originally created by this package.
*Due to platform limitations, you must pass a `Closable` that was opened/returned using/by this package*
Inputs:
- io: The IO instance to use
- fd: The `Closable` socket or handle (created using/by this package) to close
- user: An optional pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: An optional callback that is called when the operation completes, see docs for `On_Close` for its arguments
*/
close :: proc(io: ^IO, fd: Closable, user: rawptr = nil, callback: On_Close = empty_on_close) {
_close(io, fd, user, callback)
}
/*
The callback for non blocking `accept` requests
Inputs:
- user: A passed through pointer from initiation to its callback
- client: The socket to communicate through with the newly accepted client
- source: The origin of the client
- err: A network error that occured during the accept process
*/
On_Accept :: #type proc(user: rawptr, client: net.TCP_Socket, source: net.Endpoint, err: net.Network_Error)
/*
Using the given socket, accepts the next incoming connection, calling the callback when that happens
*Due to platform limitations, you must pass a socket that was opened using the `open_socket` and related procedures from this package*
Inputs:
- io: The IO instance to use
- socket: A bound and listening socket *that was created using this package*
- user: A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Accept` for its arguments
*/
accept :: proc(io: ^IO, socket: net.TCP_Socket, user: rawptr, callback: On_Accept) {
_accept(io, socket, user, callback)
}
/*
The callback for non blocking `connect` requests
Inputs:
- user: A passed through pointer from initiation to its callback
- socket: A socket that is connected to the given endpoint in the `connect` call
- err: A network error that occured during the connect call
*/
On_Connect :: #type proc(user: rawptr, socket: net.TCP_Socket, err: net.Network_Error)
/*
Connects to the given endpoint, calling the given callback once it has been done
Inputs:
- io: The IO instance to use
- endpoint: An endpoint to connect a socket to
- user: A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Connect` for its arguments
*/
connect :: proc(io: ^IO, endpoint: net.Endpoint, user: rawptr, callback: On_Connect) {
_, err := _connect(io, endpoint, user, callback)
if err != nil {
callback(user, {}, err)
}
}
/*
The callback for non blocking `recv` requests
Inputs:
- user: A passed through pointer from initiation to its callback
- received: The amount of bytes that were read and added to the given buf
- udp_client: If the given socket was a `net.UDP_Socket`, this will be the client that was received from
- err: A network error if it occured
*/
On_Recv :: #type proc(user: rawptr, received: int, udp_client: Maybe(net.Endpoint), err: net.Network_Error)
/*
Receives from the given socket, at most `len(buf)` bytes, and calls the given callback
*Due to platform limitations, you must pass a `net.TCP_Socket` or `net.UDP_Socket` that was opened/returned using/by this package*
Inputs:
- io: The IO instance to use
- socket: Either a `net.TCP_Socket` or a `net.UDP_Socket` (that was opened/returned by this package) to receive from
- buf: The buffer to put received bytes into
- user: A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Recv` for its arguments
*/
recv :: proc(io: ^IO, socket: net.Any_Socket, buf: []byte, user: rawptr, callback: On_Recv) {
_recv(io, socket, buf, user, callback)
}
/*
Receives from the given socket until the given buf is full or an error occurred, and calls the given callback
*Due to platform limitations, you must pass a `net.TCP_Socket` or `net.UDP_Socket` that was opened/returned using/by this package*
Inputs:
- io: The IO instance to use
- socket: Either a `net.TCP_Socket` or a `net.UDP_Socket` (that was opened/returned by this package) to receive from
- buf: The buffer to put received bytes into
- user: A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Recv` for its arguments
*/
recv_all :: proc(io: ^IO, socket: net.Any_Socket, buf: []byte, user: rawptr, callback: On_Recv) {
_recv(io, socket, buf, user, callback, all = true)
}
/*
The callback for non blocking `send` and `send_all` requests
Inputs:
- user: A passed through pointer from initiation to its callback
- sent: The amount of bytes that were sent over the connection
- err: A network error if it occured
*/
On_Sent :: #type proc(user: rawptr, sent: int, err: net.Network_Error)
/*
Sends at most `len(buf)` bytes from the given buffer over the socket connection, and calls the given callback
*Prefer using the `send` proc group*
*Due to platform limitations, you must pass a `net.TCP_Socket` that was opened/returned using/by this package*
Inputs:
- io: The IO instance to use
- socket: a `net.TCP_Socket` (that was opened/returned by this package) to send to
- buf: The buffer send
- user: A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Sent` for its arguments
*/
send_tcp :: proc(io: ^IO, socket: net.TCP_Socket, buf: []byte, user: rawptr, callback: On_Sent) {
_send(io, socket, buf, user, callback)
}
/*
Sends at most `len(buf)` bytes from the given buffer over the socket connection to the given endpoint, and calls the given callback
*Prefer using the `send` proc group*
*Due to platform limitations, you must pass a `net.UDP_Socket` that was opened/returned using/by this package*
Inputs:
- io: The IO instance to use
- endpoint: The endpoint to send bytes to over the socket
- socket: a `net.UDP_Socket` (that was opened/returned by this package) to send to
- buf: The buffer send
- user: A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Sent` for its arguments
*/
send_udp :: proc(
io: ^IO,
endpoint: net.Endpoint,
socket: net.UDP_Socket,
buf: []byte,
user: rawptr,
callback: On_Sent,
) {
_send(io, socket, buf, user, callback, endpoint)
}
/*
Sends at most `len(buf)` bytes from the given buffer over the socket connection, and calls the given callback
*Due to platform limitations, you must pass a `net.TCP_Socket` or `net.UDP_Socket` that was opened/returned using/by this package*
*/
send :: proc {
send_udp,
send_tcp,
}
/*
Sends the bytes from the given buffer over the socket connection, and calls the given callback
This will keep sending until either an error or the full buffer is sent
*Prefer using the `send` proc group*
*Due to platform limitations, you must pass a `net.TCP_Socket` that was opened/returned using/by this package*
Inputs:
- io: The IO instance to use
- socket: a `net.TCP_Socket` (that was opened/returned by this package) to send to
- buf: The buffer send
- user: A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Sent` for its arguments
*/
send_all_tcp :: proc(io: ^IO, socket: net.TCP_Socket, buf: []byte, user: rawptr, callback: On_Sent) {
_send(io, socket, buf, user, callback, all = true)
}
/*
Sends the bytes from the given buffer over the socket connection to the given endpoint, and calls the given callback
This will keep sending until either an error or the full buffer is sent
*Prefer using the `send` proc group*
*Due to platform limitations, you must pass a `net.UDP_Socket` that was opened/returned using/by this package*
Inputs:
- io: The IO instance to use
- endpoint: The endpoint to send bytes to over the socket
- socket: a `net.UDP_Socket` (that was opened/returned by this package) to send to
- buf: The buffer send
- user: A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Sent` for its arguments
*/
send_all_udp :: proc(
io: ^IO,
endpoint: net.Endpoint,
socket: net.UDP_Socket,
buf: []byte,
user: rawptr,
callback: On_Sent,
) {
_send(io, socket, buf, user, callback, endpoint, all = true)
}
/*
Sends the bytes from the given buffer over the socket connection, and calls the given callback
This will keep sending until either an error or the full buffer is sent
*Due to platform limitations, you must pass a `net.TCP_Socket` or `net.UDP_Socket` that was opened/returned using/by this package*
*/
send_all :: proc {
send_all_udp,
send_all_tcp,
}
/*
Opens a file hande, sets non blocking mode and relates it to the given IO
*The perm argument is only used when on the darwin or linux platforms, when on Windows you can't use the os.S_\* constants because they aren't declared*
*To prevent compilation errors on Windows, you should use a `when` statement around using those constants and just pass 0*
Inputs:
- io: The IO instance to connect the opened file to
- mode: The file mode (default: os.O_RDONLY)
- perm: The permissions to use when creating a file (default: 0)
Returns:
- handle: The file handle
- err: The error code when an error occured, 0 otherwise
*/
open :: proc(io: ^IO, path: string, mode: int = os.O_RDONLY, perm: int = 0) -> (handle: os.Handle, err: os.Errno) {
return _open(io, path, mode, perm)
}
/*
Where to seek from
Options:
- Set: sets the offset to the given value
- Curr: adds the given offset to the current offset
- End: adds the given offset to the end of the file
*/
Whence :: enum {
Set,
Curr,
End,
}
/*
Seeks the given handle according to the given offset and whence, so that subsequent read and writes *USING THIS PACKAGE* will do so at that offset
*Some platforms require this package to handle offsets while others have state in the kernel, for this reason you should assume that seeking only affects this package*
Inputs:
- io: The IO instance to seek on
- fd: The file handle to seek
- whence: The seek mode/where to seek from (default: Whence.Set)
Returns:
- new_offset: The offset that the file is at when the operation completed
- err: The error when an error occured, 0 otherwise
*/
seek :: proc(io: ^IO, fd: os.Handle, offset: int, whence: Whence = .Set) -> (new_offset: int, err: os.Errno) {
return _seek(io, fd, offset, whence)
}
/*
The callback for non blocking `read` or `read_at` requests
Inputs:
- user: A passed through pointer from initiation to its callback
- read: The amount of bytes that were read and added to the given buf
- err: An error number if an error occured, 0 otherwise
*/
On_Read :: #type proc(user: rawptr, read: int, err: os.Errno)
/*
Reads from the given handle, at the handle's internal offset, at most `len(buf)` bytes, increases the file offset, and calls the given callback
*Due to platform limitations, you must pass a `os.Handle` that was opened/returned using/by this package*
Inputs:
- io: The IO instance to use
- fd: The file handle (created using/by this package) to read from
- buf: The buffer to put read bytes into
- user: A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Read` for its arguments
*/
read :: proc(io: ^IO, fd: os.Handle, buf: []byte, user: rawptr, callback: On_Read) {
_read(io, fd, nil, buf, user, callback)
}
/*
Reads from the given handle, at the handle's internal offset, until the given buf is full or an error occurred, increases the file offset, and calls the given callback
*Due to platform limitations, you must pass a `os.Handle` that was opened/returned using/by this package*
Inputs:
- io: The IO instance to use
- fd: The file handle (created using/by this package) to read from
- buf: The buffer to put read bytes into
- user: A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Read` for its arguments
*/
read_all :: proc(io: ^IO, fd: os.Handle, buf: []byte, user: rawptr, callback: On_Read) {
_read(io, fd, nil, buf, user, callback, all = true)
}
/*
Reads from the given handle, at the given offset, at most `len(buf)` bytes, and calls the given callback
*Due to platform limitations, you must pass a `os.Handle` that was opened/returned using/by this package*
Inputs:
- io: The IO instance to use
- fd: The file handle (created using/by this package) to read from
- offset: The offset to begin the read from
- buf: The buffer to put read bytes into
- user: A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Read` for its arguments
*/
read_at :: proc(io: ^IO, fd: os.Handle, offset: int, buf: []byte, user: rawptr, callback: On_Read) {
_read(io, fd, offset, buf, user, callback)
}
/*
Reads from the given handle, at the given offset, until the given buf is full or an error occurred, and calls the given callback
*Due to platform limitations, you must pass a `os.Handle` that was opened/returned using/by this package*
Inputs:
- io: The IO instance to use
- fd: The file handle (created using/by this package) to read from
- offset: The offset to begin the read from
- buf: The buffer to put read bytes into
- user: A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Read` for its arguments
*/
read_at_all :: proc(io: ^IO, fd: os.Handle, offset: int, buf: []byte, user: rawptr, callback: On_Read) {
_read(io, fd, offset, buf, user, callback, all = true)
}
read_entire_file :: read_full
/*
Reads the entire file (size found by seeking to the end) into a singly allocated buffer that is returned.
The callback is called once the file is read into the returned buf.
*Due to platform limitations, you must pass a `os.Handle` that was opened/returned using/by this package*
Inputs:
- io: The IO instance to use
- fd: The file handle (created using/by this package) to read from
- user: A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Read` for its arguments
Returns:
- buf: The buffer allocated to the size retrieved by seeking to the end of the file that is filled before calling the callback
*/
read_full :: proc(io: ^IO, fd: os.Handle, user: rawptr, callback: On_Read, allocator := context.allocator) -> []byte {
size, err := seek(io, fd, 0, .End)
if err != os.ERROR_NONE {
callback(user, 0, err)
return nil
}
if size <= 0 {
callback(user, 0, os.ERROR_NONE)
return nil
}
buf := make([]byte, size, allocator)
read_at_all(io, fd, 0, buf, user, callback)
return buf
}
/*
The callback for non blocking `write`, `write_all`, `write_at` and `write_at_all` requests
Inputs:
- user: A passed through pointer from initiation to its callback
- written: The amount of bytes that were written to the file
- err: An error number if an error occured, 0 otherwise
*/
On_Write :: #type proc(user: rawptr, written: int, err: os.Errno)
/*
Writes to the given handle, at the handle's internal offset, at most `len(buf)` bytes, increases the file offset, and calls the given callback
*Due to platform limitations, you must pass a `os.Handle` that was opened/returned using/by this package*
Inputs:
- io: The IO instance to use
- fd: The file handle (created using/by this package) to write to
- buf: The buffer to write to the file
- user: A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Write` for its arguments
*/
write :: proc(io: ^IO, fd: os.Handle, buf: []byte, user: rawptr, callback: On_Write) {
_write(io, fd, nil, buf, user, callback)
}
/*
Writes the given buffer to the given handle, at the handle's internal offset, increases the file offset, and calls the given callback
This keeps writing until either an error or the full buffer being written
*Due to platform limitations, you must pass a `os.Handle` that was opened/returned using/by this package*
Inputs:
- io: The IO instance to use
- fd: The file handle (created using/by this package) to write to
- buf: The buffer to write to the file
- user: A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Write` for its arguments
*/
write_all :: proc(io: ^IO, fd: os.Handle, buf: []byte, user: rawptr, callback: On_Write) {
_write(io, fd, nil, buf, user, callback, true)
}
/*
Writes to the given handle, at the given offset, at most `len(buf)` bytes, and calls the given callback
*Due to platform limitations, you must pass a `os.Handle` that was opened/returned using/by this package*
Inputs:
- io: The IO instance to use
- fd: The file handle (created using/by this package) to write to from
- offset: The offset to begin the write from
- buf: The buffer to write to the file
- user: A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Write` for its arguments
*/
write_at :: proc(io: ^IO, fd: os.Handle, offset: int, buf: []byte, user: rawptr, callback: On_Write) {
_write(io, fd, offset, buf, user, callback)
}
/*
Writes the given buffer to the given handle, at the given offset, and calls the given callback
This keeps writing until either an error or the full buffer being written
*Due to platform limitations, you must pass a `os.Handle` that was opened/returned using/by this package*
Inputs:
- io: The IO instance to use
- fd: The file handle (created using/by this package) to write to from
- offset: The offset to begin the write from
- buf: The buffer to write to the file
- user: A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Write` for its arguments
*/
write_at_all :: proc(io: ^IO, fd: os.Handle, offset: int, buf: []byte, user: rawptr, callback: On_Write) {
_write(io, fd, offset, buf, user, callback, true)
}
Poll_Event :: enum {
// The subject is ready to be read from.
Read,
// The subject is ready to be written to.
Write,
}
/*
The callback for poll requests
Inputs:
- user: A passed through pointer from initiation to its callback
- event: The event that is ready to go
*/
On_Poll :: #type proc(user: rawptr, event: Poll_Event)
/*
Polls for the given event on the subject handle
Inputs:
- io: The IO instance to use
- fd: The file descriptor to poll
- event: Whether to call the callback when `fd` is ready to be read from, or be written to
- multi: Keeps the poll after an event happens, calling the callback again for further events, remove poll with `poll_remove`
- user: An optional pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Poll` for its arguments
*/
poll :: proc(io: ^IO, fd: os.Handle, event: Poll_Event, multi: bool, user: rawptr, callback: On_Poll) {
_poll(io, fd, event, multi, user, callback)
}
/*
Removes the polling for this `subject`+`event` pairing
This is only needed when `poll` was called with `multi` set to `true`
Inputs:
- io: The IO instance to use
- fd: The file descriptor to remove the poll of
- event: The event to remove the poll of
*/
poll_remove :: proc(io: ^IO, fd: os.Handle, event: Poll_Event) {
_poll_remove(io, fd, event)
}
MAX_USER_ARGUMENTS :: size_of(rawptr) * 5
Completion :: struct {
// Implementation specifics, don't use outside of implementation/os.
using _: _Completion,
user_data: rawptr,
// Callback pointer and user args passed in poly variants.
user_args: [MAX_USER_ARGUMENTS + size_of(rawptr)]byte,
}
@(private)
Operation :: union #no_nil {
Op_Accept,
Op_Close,
Op_Connect,
Op_Read,
Op_Recv,
Op_Send,
Op_Write,
Op_Timeout,
Op_Next_Tick,
Op_Poll,
Op_Poll_Remove,
}

View file

@ -0,0 +1,270 @@
package nbio
import "core:container/queue"
import "core:net"
import "core:os"
import "core:time"
import kqueue "_kqueue"
_init :: proc(io: ^IO, allocator := context.allocator) -> (err: os.Errno) {
qerr: kqueue.Queue_Error
io.kq, qerr = kqueue.kqueue()
if qerr != .None do return kq_err_to_os_err(qerr)
pool_init(&io.completion_pool, allocator = allocator)
io.timeouts = make([dynamic]^Completion, allocator)
io.io_pending = make([dynamic]^Completion, allocator)
queue.init(&io.completed, allocator = allocator)
io.allocator = allocator
return
}
_num_waiting :: #force_inline proc(io: ^IO) -> int {
return io.completion_pool.num_waiting
}
_destroy :: proc(io: ^IO) {
context.allocator = io.allocator
delete(io.timeouts)
delete(io.io_pending)
queue.destroy(&io.completed)
os.close(io.kq)
pool_destroy(&io.completion_pool)
}
_tick :: proc(io: ^IO) -> os.Errno {
return flush(io)
}
_listen :: proc(socket: net.TCP_Socket, backlog := 1000) -> net.Network_Error {
errno := os.listen(os.Socket(socket), backlog)
if errno != nil {
return net.Listen_Error(errno.(os.Platform_Error))
}
return nil
}
_accept :: proc(io: ^IO, socket: net.TCP_Socket, user: rawptr, callback: On_Accept) -> ^Completion {
completion := pool_get(&io.completion_pool)
completion.ctx = context
completion.user_data = user
completion.operation = Op_Accept{
callback = callback,
sock = socket,
}
queue.push_back(&io.completed, completion)
return completion
}
// Wraps os.close using the kqueue.
_close :: proc(io: ^IO, fd: Closable, user: rawptr, callback: On_Close) -> ^Completion {
completion := pool_get(&io.completion_pool)
completion.ctx = context
completion.user_data = user
completion.operation = Op_Close{
callback = callback,
}
op := &completion.operation.(Op_Close)
switch h in fd {
case net.TCP_Socket: op.handle = os.Handle(h)
case net.UDP_Socket: op.handle = os.Handle(h)
case net.Socket: op.handle = os.Handle(h)
case os.Handle: op.handle = h
}
queue.push_back(&io.completed, completion)
return completion
}
// TODO: maybe call this dial?
_connect :: proc(io: ^IO, endpoint: net.Endpoint, user: rawptr, callback: On_Connect) -> (^Completion, net.Network_Error) {
if endpoint.port == 0 {
return nil, net.Dial_Error.Port_Required
}
family := net.family_from_endpoint(endpoint)
sock, err := net.create_socket(family, .TCP)
if err != nil {
return nil, err
}
if err = _prepare_socket(sock); err != nil {
close(io, net.any_socket_to_socket(sock))
return nil, err
}
completion := pool_get(&io.completion_pool)
completion.ctx = context
completion.user_data = user
completion.operation = Op_Connect {
callback = callback,
socket = sock.(net.TCP_Socket),
sockaddr = _endpoint_to_sockaddr(endpoint),
}
queue.push_back(&io.completed, completion)
return completion, nil
}
_read :: proc(
io: ^IO,
fd: os.Handle,
offset: Maybe(int),
buf: []byte,
user: rawptr,
callback: On_Read,
all := false,
) -> ^Completion {
completion := pool_get(&io.completion_pool)
completion.ctx = context
completion.user_data = user
completion.operation = Op_Read {
callback = callback,
fd = fd,
buf = buf,
offset = offset.? or_else -1,
all = all,
len = len(buf),
}
queue.push_back(&io.completed, completion)
return completion
}
_recv :: proc(io: ^IO, socket: net.Any_Socket, buf: []byte, user: rawptr, callback: On_Recv, all := false) -> ^Completion {
completion := pool_get(&io.completion_pool)
completion.ctx = context
completion.user_data = user
completion.operation = Op_Recv {
callback = callback,
socket = socket,
buf = buf,
all = all,
len = len(buf),
}
queue.push_back(&io.completed, completion)
return completion
}
_send :: proc(
io: ^IO,
socket: net.Any_Socket,
buf: []byte,
user: rawptr,
callback: On_Sent,
endpoint: Maybe(net.Endpoint) = nil,
all := false,
) -> ^Completion {
if _, ok := socket.(net.UDP_Socket); ok {
assert(endpoint != nil)
}
completion := pool_get(&io.completion_pool)
completion.ctx = context
completion.user_data = user
completion.operation = Op_Send {
callback = callback,
socket = socket,
buf = buf,
endpoint = endpoint,
all = all,
len = len(buf),
}
queue.push_back(&io.completed, completion)
return completion
}
_write :: proc(
io: ^IO,
fd: os.Handle,
offset: Maybe(int),
buf: []byte,
user: rawptr,
callback: On_Write,
all := false,
) -> ^Completion {
completion := pool_get(&io.completion_pool)
completion.ctx = context
completion.user_data = user
completion.operation = Op_Write {
callback = callback,
fd = fd,
buf = buf,
offset = offset.? or_else -1,
all = all,
len = len(buf),
}
queue.push_back(&io.completed, completion)
return completion
}
// Runs the callback after the timeout, using the kqueue.
_timeout :: proc(io: ^IO, dur: time.Duration, user: rawptr, callback: On_Timeout) -> ^Completion {
completion := pool_get(&io.completion_pool)
completion.ctx = context
completion.user_data = user
completion.operation = Op_Timeout {
callback = callback,
expires = time.time_add(time.now(), dur),
}
append(&io.timeouts, completion)
return completion
}
_next_tick :: proc(io: ^IO, user: rawptr, callback: On_Next_Tick) -> ^Completion {
completion := pool_get(&io.completion_pool)
completion.ctx = context
completion.user_data = user
completion.operation = Op_Next_Tick {
callback = callback,
}
queue.push_back(&io.completed, completion)
return completion
}
_poll :: proc(io: ^IO, fd: os.Handle, event: Poll_Event, multi: bool, user: rawptr, callback: On_Poll) -> ^Completion {
completion := pool_get(&io.completion_pool)
completion.ctx = context
completion.user_data = user
completion.operation = Op_Poll{
callback = callback,
fd = fd,
event = event,
multi = multi,
}
append(&io.io_pending, completion)
return completion
}
_poll_remove :: proc(io: ^IO, fd: os.Handle, event: Poll_Event) -> ^Completion {
completion := pool_get(&io.completion_pool)
completion.ctx = context
completion.operation = Op_Poll_Remove{
fd = fd,
event = event,
}
append(&io.io_pending, completion)
return completion
}

View file

@ -0,0 +1,564 @@
#+private
package nbio
import "base:runtime"
import "core:container/queue"
import "core:mem"
import "core:net"
import "core:os"
import "core:time"
import kqueue "_kqueue"
MAX_EVENTS :: 256
_IO :: struct {
kq: os.Handle,
io_inflight: int,
completion_pool: Pool(Completion),
timeouts: [dynamic]^Completion,
completed: queue.Queue(^Completion),
io_pending: [dynamic]^Completion,
allocator: mem.Allocator,
}
_Completion :: struct {
operation: Operation,
ctx: runtime.Context,
}
Op_Accept :: struct {
callback: On_Accept,
sock: net.TCP_Socket,
}
Op_Close :: struct {
callback: On_Close,
handle: os.Handle,
}
Op_Connect :: struct {
callback: On_Connect,
socket: net.TCP_Socket,
sockaddr: os.SOCKADDR_STORAGE_LH,
initiated: bool,
}
Op_Recv :: struct {
callback: On_Recv,
socket: net.Any_Socket,
buf: []byte,
all: bool,
received: int,
len: int,
}
Op_Send :: struct {
callback: On_Sent,
socket: net.Any_Socket,
buf: []byte,
endpoint: Maybe(net.Endpoint),
all: bool,
len: int,
sent: int,
}
Op_Read :: struct {
callback: On_Read,
fd: os.Handle,
buf: []byte,
offset: int,
all: bool,
read: int,
len: int,
}
Op_Write :: struct {
callback: On_Write,
fd: os.Handle,
buf: []byte,
offset: int,
all: bool,
written: int,
len: int,
}
Op_Timeout :: struct {
callback: On_Timeout,
expires: time.Time,
}
Op_Next_Tick :: struct {
callback: On_Next_Tick,
}
Op_Poll :: struct {
callback: On_Poll,
fd: os.Handle,
event: Poll_Event,
multi: bool,
}
Op_Poll_Remove :: struct {
fd: os.Handle,
event: Poll_Event,
}
flush :: proc(io: ^IO) -> os.Errno {
events: [MAX_EVENTS]kqueue.KEvent
min_timeout := flush_timeouts(io)
change_events := flush_io(io, events[:])
if (change_events > 0 || queue.len(io.completed) == 0) {
if (change_events == 0 && queue.len(io.completed) == 0 && io.io_inflight == 0) {
return os.ERROR_NONE
}
max_timeout := time.Millisecond * 10
ts: kqueue.Time_Spec
ts.nsec = min(min_timeout.? or_else i64(max_timeout), i64(max_timeout))
new_events, err := kqueue.kevent(io.kq, events[:change_events], events[:], &ts)
if err != .None do return ev_err_to_os_err(err)
// PERF: this is ordered and O(N), can this be made unordered?
remove_range(&io.io_pending, 0, change_events)
io.io_inflight += change_events
io.io_inflight -= new_events
if new_events > 0 {
queue.reserve(&io.completed, new_events)
for event in events[:new_events] {
completion := cast(^Completion)event.udata
queue.push_back(&io.completed, completion)
}
}
}
// Save length so we avoid an infinite loop when there is added to the queue in a callback.
n := queue.len(io.completed)
for _ in 0 ..< n {
completed := queue.pop_front(&io.completed)
context = completed.ctx
switch &op in completed.operation {
case Op_Accept: do_accept (io, completed, &op)
case Op_Close: do_close (io, completed, &op)
case Op_Connect: do_connect (io, completed, &op)
case Op_Read: do_read (io, completed, &op)
case Op_Recv: do_recv (io, completed, &op)
case Op_Send: do_send (io, completed, &op)
case Op_Write: do_write (io, completed, &op)
case Op_Timeout: do_timeout (io, completed, &op)
case Op_Next_Tick: do_next_tick (io, completed, &op)
case Op_Poll: do_poll (io, completed, &op)
case Op_Poll_Remove: do_poll_remove(io, completed, &op)
case: unreachable()
}
}
return os.ERROR_NONE
}
flush_io :: proc(io: ^IO, events: []kqueue.KEvent) -> int {
events := events
events_loop: for &event, i in events {
if len(io.io_pending) <= i do return i
completion := io.io_pending[i]
switch op in completion.operation {
case Op_Accept:
event.ident = uintptr(op.sock)
event.filter = kqueue.EVFILT_READ
case Op_Connect:
event.ident = uintptr(op.socket)
event.filter = kqueue.EVFILT_WRITE
case Op_Read:
event.ident = uintptr(op.fd)
event.filter = kqueue.EVFILT_READ
case Op_Write:
event.ident = uintptr(op.fd)
event.filter = kqueue.EVFILT_WRITE
case Op_Recv:
event.ident = uintptr(os.Socket(net.any_socket_to_socket(op.socket)))
event.filter = kqueue.EVFILT_READ
case Op_Send:
event.ident = uintptr(os.Socket(net.any_socket_to_socket(op.socket)))
event.filter = kqueue.EVFILT_WRITE
case Op_Poll:
event.ident = uintptr(op.fd)
switch op.event {
case .Read: event.filter = kqueue.EVFILT_READ
case .Write: event.filter = kqueue.EVFILT_WRITE
case: unreachable()
}
event.flags = kqueue.EV_ADD | kqueue.EV_ENABLE
if !op.multi {
event.flags |= kqueue.EV_ONESHOT
}
event.udata = completion
continue events_loop
case Op_Poll_Remove:
event.ident = uintptr(op.fd)
switch op.event {
case .Read: event.filter = kqueue.EVFILT_READ
case .Write: event.filter = kqueue.EVFILT_WRITE
case: unreachable()
}
event.flags = kqueue.EV_DELETE | kqueue.EV_DISABLE | kqueue.EV_ONESHOT
event.udata = completion
continue events_loop
case Op_Timeout, Op_Close, Op_Next_Tick:
panic("invalid completion operation queued")
}
event.flags = kqueue.EV_ADD | kqueue.EV_ENABLE | kqueue.EV_ONESHOT
event.udata = completion
}
return len(events)
}
flush_timeouts :: proc(io: ^IO) -> (min_timeout: Maybe(i64)) {
now: time.Time
// PERF: is there a faster way to compare time? Or time since program start and compare that?
if len(io.timeouts) > 0 do now = time.now()
for i := len(io.timeouts) - 1; i >= 0; i -= 1 {
completion := io.timeouts[i]
timeout, ok := &completion.operation.(Op_Timeout)
if !ok do panic("non-timeout operation found in the timeouts queue")
unow := time.to_unix_nanoseconds(now)
expires := time.to_unix_nanoseconds(timeout.expires)
if unow >= expires {
ordered_remove(&io.timeouts, i)
queue.push_back(&io.completed, completion)
continue
}
timeout_ns := expires - unow
if min, has_min_timeout := min_timeout.(i64); has_min_timeout {
if timeout_ns < min {
min_timeout = timeout_ns
}
} else {
min_timeout = timeout_ns
}
}
return
}
do_accept :: proc(io: ^IO, completion: ^Completion, op: ^Op_Accept) {
client, source, err := net.accept_tcp(op.sock)
if err == net.Accept_Error.Would_Block {
append(&io.io_pending, completion)
return
}
if err == nil {
err = _prepare_socket(client)
}
if err != nil {
net.close(client)
op.callback(completion.user_data, {}, {}, err)
} else {
op.callback(completion.user_data, client, source, nil)
}
pool_put(&io.completion_pool, completion)
}
do_close :: proc(io: ^IO, completion: ^Completion, op: ^Op_Close) {
ok := os.close(op.handle)
op.callback(completion.user_data, ok == os.ERROR_NONE)
pool_put(&io.completion_pool, completion)
}
do_connect :: proc(io: ^IO, completion: ^Completion, op: ^Op_Connect) {
defer op.initiated = true
err: os.Errno
if op.initiated {
// We have already called os.connect, retrieve error number only.
os.getsockopt(os.Socket(op.socket), os.SOL_SOCKET, os.SO_ERROR, &err, size_of(os.Errno))
} else {
err = os.connect(os.Socket(op.socket), (^os.SOCKADDR)(&op.sockaddr), i32(op.sockaddr.len))
if err == os.EINPROGRESS {
append(&io.io_pending, completion)
return
}
}
if err != os.ERROR_NONE {
net.close(op.socket)
op.callback(completion.user_data, {}, net.Dial_Error(err.(os.Platform_Error)))
} else {
op.callback(completion.user_data, op.socket, nil)
}
pool_put(&io.completion_pool, completion)
}
do_read :: proc(io: ^IO, completion: ^Completion, op: ^Op_Read) {
read: int
err: os.Errno
//odinfmt:disable
switch {
case op.offset >= 0: read, err = os.read_at(op.fd, op.buf, i64(op.offset))
case: read, err = os.read(op.fd, op.buf)
}
//odinfmt:enable
op.read += read
if err != os.ERROR_NONE {
if err == os.EWOULDBLOCK {
append(&io.io_pending, completion)
return
}
op.callback(completion.user_data, op.read, err)
pool_put(&io.completion_pool, completion)
return
}
if op.all && op.read < op.len {
op.buf = op.buf[read:]
if op.offset >= 0 {
op.offset += read
}
do_read(io, completion, op)
return
}
op.callback(completion.user_data, op.read, os.ERROR_NONE)
pool_put(&io.completion_pool, completion)
}
do_recv :: proc(io: ^IO, completion: ^Completion, op: ^Op_Recv) {
received: int
err: net.Network_Error
remote_endpoint: Maybe(net.Endpoint)
switch sock in op.socket {
case net.TCP_Socket:
received, err = net.recv_tcp(sock, op.buf)
// NOTE: Timeout is the name for EWOULDBLOCK in net package.
if err == net.TCP_Recv_Error.Timeout {
append(&io.io_pending, completion)
return
}
case net.UDP_Socket:
received, remote_endpoint, err = net.recv_udp(sock, op.buf)
// NOTE: Timeout is the name for EWOULDBLOCK in net package.
if err == net.UDP_Recv_Error.Timeout {
append(&io.io_pending, completion)
return
}
}
op.received += received
if err != nil {
op.callback(completion.user_data, op.received, remote_endpoint, err)
pool_put(&io.completion_pool, completion)
return
}
if op.all && op.received < op.len {
op.buf = op.buf[received:]
do_recv(io, completion, op)
return
}
op.callback(completion.user_data, op.received, remote_endpoint, err)
pool_put(&io.completion_pool, completion)
}
do_send :: proc(io: ^IO, completion: ^Completion, op: ^Op_Send) {
sent: u32
errno: os.Errno
err: net.Network_Error
switch sock in op.socket {
case net.TCP_Socket:
sent, errno = os.send(os.Socket(sock), op.buf, 0)
if errno != nil {
err = net.TCP_Send_Error(errno.(os.Platform_Error))
}
case net.UDP_Socket:
toaddr := _endpoint_to_sockaddr(op.endpoint.(net.Endpoint))
sent, errno = os.sendto(os.Socket(sock), op.buf, 0, cast(^os.SOCKADDR)&toaddr, i32(toaddr.len))
if errno != nil {
err = net.UDP_Send_Error(errno.(os.Platform_Error))
}
}
op.sent += int(sent)
if errno != os.ERROR_NONE {
if errno == os.EWOULDBLOCK {
append(&io.io_pending, completion)
return
}
op.callback(completion.user_data, op.sent, err)
pool_put(&io.completion_pool, completion)
return
}
if op.all && op.sent < op.len {
op.buf = op.buf[sent:]
do_send(io, completion, op)
return
}
op.callback(completion.user_data, op.sent, nil)
pool_put(&io.completion_pool, completion)
}
do_write :: proc(io: ^IO, completion: ^Completion, op: ^Op_Write) {
written: int
err: os.Errno
//odinfmt:disable
switch {
case op.offset >= 0: written, err = os.write_at(op.fd, op.buf, i64(op.offset))
case: written, err = os.write(op.fd, op.buf)
}
//odinfmt:enable
op.written += written
if err != os.ERROR_NONE {
if err == os.EWOULDBLOCK {
append(&io.io_pending, completion)
return
}
op.callback(completion.user_data, op.written, err)
pool_put(&io.completion_pool, completion)
return
}
// The write did not write the whole buffer, need to write more.
if op.all && op.written < op.len {
op.buf = op.buf[written:]
// Increase offset so we don't overwrite what we just wrote.
if op.offset >= 0 {
op.offset += written
}
do_write(io, completion, op)
return
}
op.callback(completion.user_data, op.written, os.ERROR_NONE)
pool_put(&io.completion_pool, completion)
}
do_timeout :: proc(io: ^IO, completion: ^Completion, op: ^Op_Timeout) {
op.callback(completion.user_data)
pool_put(&io.completion_pool, completion)
}
do_poll :: proc(io: ^IO, completion: ^Completion, op: ^Op_Poll) {
op.callback(completion.user_data, op.event)
if !op.multi {
pool_put(&io.completion_pool, completion)
}
}
do_poll_remove :: proc(io: ^IO, completion: ^Completion, op: ^Op_Poll_Remove) {
pool_put(&io.completion_pool, completion)
}
do_next_tick :: proc(io: ^IO, completion: ^Completion, op: ^Op_Next_Tick) {
op.callback(completion.user_data)
pool_put(&io.completion_pool, completion)
}
kq_err_to_os_err :: proc(err: kqueue.Queue_Error) -> os.Errno {
switch err {
case .Out_Of_Memory:
return os.ENOMEM
case .Descriptor_Table_Full:
return os.EMFILE
case .File_Table_Full:
return os.ENFILE
case .Unknown:
return os.EFAULT
case .None:
fallthrough
case:
return os.ERROR_NONE
}
}
ev_err_to_os_err :: proc(err: kqueue.Event_Error) -> os.Errno {
switch err {
case .Access_Denied:
return os.EACCES
case .Invalid_Event:
return os.EFAULT
case .Invalid_Descriptor:
return os.EBADF
case .Signal:
return os.EINTR
case .Invalid_Timeout_Or_Filter:
return os.EINVAL
case .Event_Not_Found:
return os.ENOENT
case .Out_Of_Memory:
return os.ENOMEM
case .Process_Not_Found:
return os.ESRCH
case .Unknown:
return os.EFAULT
case .None:
fallthrough
case:
return os.ERROR_NONE
}
}
// Private proc in net package, verbatim copy.
_endpoint_to_sockaddr :: proc(ep: net.Endpoint) -> (sockaddr: os.SOCKADDR_STORAGE_LH) {
switch a in ep.address {
case net.IP4_Address:
(^os.sockaddr_in)(&sockaddr)^ = os.sockaddr_in {
sin_port = u16be(ep.port),
sin_addr = transmute(os.in_addr)a,
sin_family = u8(os.AF_INET),
sin_len = size_of(os.sockaddr_in),
}
return
case net.IP6_Address:
(^os.sockaddr_in6)(&sockaddr)^ = os.sockaddr_in6 {
sin6_port = u16be(ep.port),
sin6_addr = transmute(os.in6_addr)a,
sin6_family = u8(os.AF_INET6),
sin6_len = size_of(os.sockaddr_in6),
}
return
}
unreachable()
}

View file

@ -0,0 +1,629 @@
#+private
package nbio
import "base:runtime"
import "core:c"
import "core:container/queue"
import "core:fmt"
import "core:mem"
import "core:net"
import "core:os"
import "core:sys/linux"
import io_uring "_io_uring"
NANOSECONDS_PER_SECOND :: 1e+9
_IO :: struct {
ring: io_uring.IO_Uring,
completion_pool: Pool(Completion),
// Ready to be submitted to kernel.
unqueued: queue.Queue(^Completion),
// Ready to run callbacks.
completed: queue.Queue(^Completion),
ios_queued: u64,
ios_in_kernel: u64,
allocator: mem.Allocator,
}
_Completion :: struct {
result: i32,
operation: Operation,
ctx: runtime.Context,
}
Op_Accept :: struct {
callback: On_Accept,
socket: net.TCP_Socket,
sockaddr: os.SOCKADDR_STORAGE_LH,
sockaddrlen: c.int,
}
Op_Close :: struct {
callback: On_Close,
fd: os.Handle,
}
Op_Connect :: struct {
callback: On_Connect,
socket: net.TCP_Socket,
sockaddr: os.SOCKADDR_STORAGE_LH,
}
Op_Read :: struct {
callback: On_Read,
fd: os.Handle,
buf: []byte,
offset: int,
all: bool,
read: int,
len: int,
}
Op_Write :: struct {
callback: On_Write,
fd: os.Handle,
buf: []byte,
offset: int,
all: bool,
written: int,
len: int,
}
Op_Send :: struct {
callback: On_Sent,
socket: net.Any_Socket,
buf: []byte,
len: int,
sent: int,
all: bool,
}
Op_Recv :: struct {
callback: On_Recv,
socket: net.Any_Socket,
buf: []byte,
all: bool,
received: int,
len: int,
}
Op_Timeout :: struct {
callback: On_Timeout,
expires: linux.Time_Spec,
}
Op_Next_Tick :: struct {
callback: On_Next_Tick,
}
Op_Poll :: struct {
callback: On_Poll,
fd: os.Handle,
event: Poll_Event,
multi: bool,
}
Op_Poll_Remove :: struct {
fd: os.Handle,
event: Poll_Event,
}
flush :: proc(io: ^IO, wait_nr: u32, timeouts: ^uint, etime: ^bool) -> os.Errno {
err := flush_submissions(io, wait_nr, timeouts, etime)
if err != os.ERROR_NONE do return err
err = flush_completions(io, 0, timeouts, etime)
if err != os.ERROR_NONE do return err
// Store length at this time, so we don't infinite loop if any of the enqueue
// procs below then add to the queue again.
n := queue.len(io.unqueued)
// odinfmt: disable
for _ in 0..<n {
unqueued := queue.pop_front(&io.unqueued)
switch &op in unqueued.operation {
case Op_Accept: accept_enqueue (io, unqueued, &op)
case Op_Close: close_enqueue (io, unqueued, &op)
case Op_Connect: connect_enqueue (io, unqueued, &op)
case Op_Read: read_enqueue (io, unqueued, &op)
case Op_Recv: recv_enqueue (io, unqueued, &op)
case Op_Send: send_enqueue (io, unqueued, &op)
case Op_Write: write_enqueue (io, unqueued, &op)
case Op_Timeout: timeout_enqueue (io, unqueued, &op)
case Op_Poll: poll_enqueue (io, unqueued, &op)
case Op_Poll_Remove: poll_remove_enqueue(io, unqueued, &op)
case Op_Next_Tick: unreachable()
}
}
n = queue.len(io.completed)
for _ in 0 ..< n {
completed := queue.pop_front(&io.completed)
context = completed.ctx
switch &op in completed.operation {
case Op_Accept: accept_callback (io, completed, &op)
case Op_Close: close_callback (io, completed, &op)
case Op_Connect: connect_callback (io, completed, &op)
case Op_Read: read_callback (io, completed, &op)
case Op_Recv: recv_callback (io, completed, &op)
case Op_Send: send_callback (io, completed, &op)
case Op_Write: write_callback (io, completed, &op)
case Op_Timeout: timeout_callback (io, completed, &op)
case Op_Poll: poll_callback (io, completed, &op)
case Op_Poll_Remove: poll_remove_callback(io, completed, &op)
case Op_Next_Tick: next_tick_callback (io, completed, &op)
case: unreachable()
}
}
// odinfmt: enable
return os.ERROR_NONE
}
flush_completions :: proc(io: ^IO, wait_nr: u32, timeouts: ^uint, etime: ^bool) -> os.Errno {
cqes: [256]io_uring.io_uring_cqe
wait_remaining := wait_nr
for {
completed, err := io_uring.copy_cqes(&io.ring, cqes[:], wait_remaining)
if err != .None do return ring_err_to_os_err(err)
wait_remaining = max(0, wait_remaining - completed)
if completed > 0 {
queue.reserve(&io.completed, int(completed))
for cqe in cqes[:completed] {
io.ios_in_kernel -= 1
if cqe.user_data == 0 {
timeouts^ -= 1
if (-cqe.res == i32(os.ETIME)) {
etime^ = true
}
continue
}
completion := cast(^Completion)uintptr(cqe.user_data)
completion.result = cqe.res
queue.push_back(&io.completed, completion)
}
}
if completed < len(cqes) do break
}
return os.ERROR_NONE
}
flush_submissions :: proc(io: ^IO, wait_nr: u32, timeouts: ^uint, etime: ^bool) -> os.Errno {
for {
submitted, err := io_uring.submit(&io.ring, wait_nr)
#partial switch err {
case .None:
break
case .Signal_Interrupt:
continue
case .Completion_Queue_Overcommitted, .System_Resources:
ferr := flush_completions(io, 1, timeouts, etime)
if ferr != os.ERROR_NONE do return ferr
continue
case:
return ring_err_to_os_err(err)
}
io.ios_queued -= u64(submitted)
io.ios_in_kernel += u64(submitted)
break
}
return os.ERROR_NONE
}
accept_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Accept) {
_, err := io_uring.accept(
&io.ring,
u64(uintptr(completion)),
os.Socket(op.socket),
cast(^os.SOCKADDR)&op.sockaddr,
&op.sockaddrlen,
)
if err == .Submission_Queue_Full {
queue.push_back(&io.unqueued, completion)
return
}
io.ios_queued += 1
}
accept_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Accept) {
if completion.result < 0 {
errno := os.Platform_Error(-completion.result)
#partial switch errno {
case .EINTR, .EWOULDBLOCK:
accept_enqueue(io, completion, op)
case:
op.callback(completion.user_data, 0, {}, net.Accept_Error(errno))
pool_put(&io.completion_pool, completion)
}
return
}
client := net.TCP_Socket(completion.result)
err := _prepare_socket(client)
source := sockaddr_storage_to_endpoint(&op.sockaddr)
op.callback(completion.user_data, client, source, err)
pool_put(&io.completion_pool, completion)
}
close_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Close) {
_, err := io_uring.close(&io.ring, u64(uintptr(completion)), op.fd)
if err == .Submission_Queue_Full {
queue.push_back(&io.unqueued, completion)
return
}
io.ios_queued += 1
}
close_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Close) {
errno := os.Platform_Error(-completion.result)
// In particular close() should not be retried after an EINTR
// since this may cause a reused descriptor from another thread to be closed.
op.callback(completion.user_data, errno == .NONE || errno == .EINTR)
pool_put(&io.completion_pool, completion)
}
connect_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Connect) {
_, err := io_uring.connect(
&io.ring,
u64(uintptr(completion)),
os.Socket(op.socket),
cast(^os.SOCKADDR)&op.sockaddr,
size_of(op.sockaddr),
)
if err == .Submission_Queue_Full {
queue.push_back(&io.unqueued, completion)
return
}
io.ios_queued += 1
}
connect_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Connect) {
errno := os.Platform_Error(-completion.result)
#partial switch errno {
case .EINTR, .EWOULDBLOCK:
connect_enqueue(io, completion, op)
return
case .NONE:
op.callback(completion.user_data, op.socket, nil)
case:
net.close(op.socket)
op.callback(completion.user_data, {}, net.Dial_Error(errno))
}
pool_put(&io.completion_pool, completion)
}
read_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Read) {
// Max tells linux to use the file cursor as the offset.
offset := max(u64) if op.offset < 0 else u64(op.offset)
_, err := io_uring.read(&io.ring, u64(uintptr(completion)), op.fd, op.buf, offset)
if err == .Submission_Queue_Full {
queue.push_back(&io.unqueued, completion)
return
}
io.ios_queued += 1
}
read_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Read) {
if completion.result < 0 {
errno := os.Platform_Error(-completion.result)
#partial switch errno {
case .EINTR, .EWOULDBLOCK:
read_enqueue(io, completion, op)
case:
op.callback(completion.user_data, op.read, errno)
pool_put(&io.completion_pool, completion)
}
return
}
op.read += int(completion.result)
if op.all && op.read < op.len {
op.buf = op.buf[completion.result:]
read_enqueue(io, completion, op)
return
}
op.callback(completion.user_data, op.read, os.ERROR_NONE)
pool_put(&io.completion_pool, completion)
}
recv_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Recv) {
tcpsock, ok := op.socket.(net.TCP_Socket)
if !ok {
// TODO: figure out and implement.
unimplemented("UDP recv is unimplemented for linux nbio")
}
_, err := io_uring.recv(&io.ring, u64(uintptr(completion)), os.Socket(tcpsock), op.buf, 0)
if err == .Submission_Queue_Full {
queue.push_back(&io.unqueued, completion)
return
}
// TODO: handle other errors, also in other enqueue procs.
io.ios_queued += 1
}
recv_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Recv) {
if completion.result < 0 {
errno := os.Platform_Error(-completion.result)
#partial switch errno {
case .EINTR, .EWOULDBLOCK:
recv_enqueue(io, completion, op)
case:
op.callback(completion.user_data, op.received, {}, net.TCP_Recv_Error(errno))
pool_put(&io.completion_pool, completion)
}
return
}
op.received += int(completion.result)
if op.all && op.received < op.len {
op.buf = op.buf[completion.result:]
recv_enqueue(io, completion, op)
return
}
op.callback(completion.user_data, op.received, {}, nil)
pool_put(&io.completion_pool, completion)
}
send_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Send) {
tcpsock, ok := op.socket.(net.TCP_Socket)
if !ok {
// TODO: figure out and implement.
unimplemented("UDP send is unimplemented for linux nbio")
}
_, err := io_uring.send(&io.ring, u64(uintptr(completion)), os.Socket(tcpsock), op.buf, 0)
if err == .Submission_Queue_Full {
queue.push_back(&io.unqueued, completion)
return
}
io.ios_queued += 1
}
send_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Send) {
if completion.result < 0 {
errno := os.Platform_Error(-completion.result)
#partial switch errno {
case .EINTR, .EWOULDBLOCK:
send_enqueue(io, completion, op)
case:
op.callback(completion.user_data, op.sent, net.TCP_Send_Error(errno))
pool_put(&io.completion_pool, completion)
}
return
}
op.sent += int(completion.result)
if op.all && op.sent < op.len {
op.buf = op.buf[completion.result:]
send_enqueue(io, completion, op)
return
}
op.callback(completion.user_data, op.sent, nil)
pool_put(&io.completion_pool, completion)
}
write_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Write) {
// Max tells linux to use the file cursor as the offset.
offset := max(u64) if op.offset < 0 else u64(op.offset)
_, err := io_uring.write(&io.ring, u64(uintptr(completion)), op.fd, op.buf, offset)
if err == .Submission_Queue_Full {
queue.push_back(&io.unqueued, completion)
return
}
io.ios_queued += 1
}
write_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Write) {
if completion.result < 0 {
errno := os.Platform_Error(-completion.result)
#partial switch errno {
case .EINTR, .EWOULDBLOCK:
write_enqueue(io, completion, op)
case:
op.callback(completion.user_data, op.written, errno)
pool_put(&io.completion_pool, completion)
}
return
}
op.written += int(completion.result)
if op.all && op.written < op.len {
op.buf = op.buf[completion.result:]
if op.offset >= 0 {
op.offset += int(completion.result)
}
write_enqueue(io, completion, op)
return
}
op.callback(completion.user_data, op.written, os.ERROR_NONE)
pool_put(&io.completion_pool, completion)
}
timeout_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Timeout) {
_, err := io_uring.timeout(&io.ring, u64(uintptr(completion)), &op.expires, 0, 0)
if err == .Submission_Queue_Full {
queue.push_back(&io.unqueued, completion)
return
}
io.ios_queued += 1
}
timeout_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Timeout) {
if completion.result < 0 {
errno := os.Platform_Error(-completion.result)
#partial switch errno {
case .ETIME: // OK.
case .EINTR, .EWOULDBLOCK:
timeout_enqueue(io, completion, op)
return
case:
fmt.panicf("timeout error: %v", errno)
}
}
op.callback(completion.user_data)
pool_put(&io.completion_pool, completion)
}
next_tick_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Next_Tick) {
op.callback(completion.user_data)
pool_put(&io.completion_pool, completion)
}
poll_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Poll) {
events: linux.Fd_Poll_Events
switch op.event {
case .Read: events = linux.Fd_Poll_Events{.IN}
case .Write: events = linux.Fd_Poll_Events{.OUT}
}
flags: io_uring.IORing_Poll_Flags
if op.multi {
flags = io_uring.IORing_Poll_Flags{.ADD_MULTI}
}
_, err := io_uring.poll_add(&io.ring, u64(uintptr(completion)), op.fd, events, flags)
if err == .Submission_Queue_Full {
queue.push_back(&io.unqueued, completion)
return
}
io.ios_queued += 1
}
poll_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Poll) {
op.callback(completion.user_data, op.event)
if !op.multi {
pool_put(&io.completion_pool, completion)
}
}
poll_remove_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Poll_Remove) {
events: linux.Fd_Poll_Events
switch op.event {
case .Read: events = linux.Fd_Poll_Events{.IN}
case .Write: events = linux.Fd_Poll_Events{.OUT}
}
_, err := io_uring.poll_remove(&io.ring, u64(uintptr(completion)), op.fd, events)
if err == .Submission_Queue_Full {
queue.push_back(&io.unqueued, completion)
return
}
io.ios_queued += 1
}
poll_remove_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Poll_Remove) {
pool_put(&io.completion_pool, completion)
}
ring_err_to_os_err :: proc(err: io_uring.IO_Uring_Error) -> os.Errno {
switch err {
case .None:
return os.ERROR_NONE
case .Params_Outside_Accessible_Address_Space, .Buffer_Invalid, .File_Descriptor_Invalid, .Submission_Queue_Entry_Invalid, .Ring_Shutting_Down:
return os.EFAULT
case .Arguments_Invalid, .Entries_Zero, .Entries_Too_Large, .Entries_Not_Power_Of_Two, .Opcode_Not_Supported:
return os.EINVAL
case .Process_Fd_Quota_Exceeded:
return os.EMFILE
case .System_Fd_Quota_Exceeded:
return os.ENFILE
case .System_Resources, .Completion_Queue_Overcommitted:
return os.ENOMEM
case .Permission_Denied:
return os.EPERM
case .System_Outdated:
return os.ENOSYS
case .Submission_Queue_Full:
return os.EOVERFLOW
case .Signal_Interrupt:
return os.EINTR
case .Unexpected:
fallthrough
case:
return os.Platform_Error(-1)
}
}
// verbatim copy of net._sockaddr_storage_to_endpoint.
sockaddr_storage_to_endpoint :: proc(native_addr: ^os.SOCKADDR_STORAGE_LH) -> (ep: net.Endpoint) {
switch native_addr.ss_family {
case u16(os.AF_INET):
addr := cast(^os.sockaddr_in)native_addr
port := int(addr.sin_port)
ep = net.Endpoint {
address = net.IP4_Address(transmute([4]byte)addr.sin_addr),
port = port,
}
case u16(os.AF_INET6):
addr := cast(^os.sockaddr_in6)native_addr
port := int(addr.sin6_port)
ep = net.Endpoint {
address = net.IP6_Address(transmute([8]u16be)addr.sin6_addr),
port = port,
}
case:
panic("native_addr is neither IP4 or IP6 address")
}
return
}
// verbatim copy of net._endpoint_to_sockaddr.
endpoint_to_sockaddr :: proc(ep: net.Endpoint) -> (sockaddr: os.SOCKADDR_STORAGE_LH) {
switch a in ep.address {
case net.IP4_Address:
(^os.sockaddr_in)(&sockaddr)^ = os.sockaddr_in {
sin_family = u16(os.AF_INET),
sin_port = u16be(ep.port),
sin_addr = transmute(os.in_addr)a,
}
return
case net.IP6_Address:
(^os.sockaddr_in6)(&sockaddr)^ = os.sockaddr_in6 {
sin6_family = u16(os.AF_INET6),
sin6_port = u16be(ep.port),
sin6_addr = transmute(os.in6_addr)a,
}
return
}
unreachable()
}

View file

@ -0,0 +1,639 @@
#+private
package nbio
import "base:runtime"
import "core:container/queue"
import "core:log"
import "core:mem"
import "core:net"
import "core:os"
import "core:time"
import win "core:sys/windows"
_IO :: struct {
iocp: win.HANDLE,
allocator: mem.Allocator,
timeouts: [dynamic]^Completion,
completed: queue.Queue(^Completion),
completion_pool: Pool(Completion),
io_pending: int,
// The asynchronous Windows API's don't support reading at the current offset of a file, so we keep track ourselves.
offsets: map[os.Handle]u32,
}
_Completion :: struct {
over: win.OVERLAPPED,
ctx: runtime.Context,
op: Operation,
}
#assert(offset_of(Completion, over) == 0, "needs to be the first field to work")
Op_Accept :: struct {
callback: On_Accept,
socket: win.SOCKET,
client: win.SOCKET,
addr: win.SOCKADDR_STORAGE_LH,
pending: bool,
}
Op_Connect :: struct {
callback: On_Connect,
socket: win.SOCKET,
addr: win.SOCKADDR_STORAGE_LH,
pending: bool,
}
Op_Close :: struct {
callback: On_Close,
fd: Closable,
}
Op_Read :: struct {
callback: On_Read,
fd: os.Handle,
offset: int,
buf: []byte,
pending: bool,
all: bool,
read: int,
len: int,
}
Op_Write :: struct {
callback: On_Write,
fd: os.Handle,
offset: int,
buf: []byte,
pending: bool,
written: int,
len: int,
all: bool,
}
Op_Recv :: struct {
callback: On_Recv,
socket: net.Any_Socket,
buf: win.WSABUF,
pending: bool,
all: bool,
received: int,
len: int,
}
Op_Send :: struct {
callback: On_Sent,
socket: net.Any_Socket,
buf: win.WSABUF,
pending: bool,
len: int,
sent: int,
all: bool,
}
Op_Timeout :: struct {
callback: On_Timeout,
expires: time.Time,
}
Op_Next_Tick :: struct {}
Op_Poll :: struct {}
Op_Poll_Remove :: struct {}
flush_timeouts :: proc(io: ^IO) -> (expires: Maybe(time.Duration)) {
curr: time.Time
timeout_len := len(io.timeouts)
// PERF: could use a faster clock, is getting time since program start fast?
if timeout_len > 0 do curr = time.now()
for i := 0; i < timeout_len; {
completion := io.timeouts[i]
op := &completion.op.(Op_Timeout)
cexpires := time.diff(curr, op.expires)
// Timeout done.
if (cexpires <= 0) {
ordered_remove(&io.timeouts, i)
queue.push_back(&io.completed, completion)
timeout_len -= 1
continue
}
// Update minimum timeout.
exp, ok := expires.?
expires = min(exp, cexpires) if ok else cexpires
i += 1
}
return
}
prepare_socket :: proc(io: ^IO, socket: net.Any_Socket) -> net.Network_Error {
net.set_option(socket, .Reuse_Address, true) or_return
net.set_option(socket, .TCP_Nodelay, true) or_return
handle := win.HANDLE(uintptr(net.any_socket_to_socket(socket)))
handle_iocp := win.CreateIoCompletionPort(handle, io.iocp, 0, 0)
assert(handle_iocp == io.iocp)
mode: byte
mode |= FILE_SKIP_COMPLETION_PORT_ON_SUCCESS
mode |= FILE_SKIP_SET_EVENT_ON_HANDLE
if !win.SetFileCompletionNotificationModes(handle, mode) {
return net.Socket_Option_Error(win.GetLastError())
}
return nil
}
submit :: proc(io: ^IO, user: rawptr, op: Operation) -> ^Completion {
completion := pool_get(&io.completion_pool)
completion.ctx = context
completion.user_data = user
completion.op = op
queue.push_back(&io.completed, completion)
return completion
}
handle_completion :: proc(io: ^IO, completion: ^Completion) {
switch &op in completion.op {
case Op_Accept:
// TODO: we should directly call the accept callback here, no need for it to be on the Op_Acccept struct.
source, err := accept_callback(io, completion, &op)
if wsa_err_incomplete(err) {
io.io_pending += 1
return
}
rerr := net.Accept_Error(err)
if rerr != nil do win.closesocket(op.client)
op.callback(completion.user_data, net.TCP_Socket(op.client), source, rerr)
case Op_Connect:
err := connect_callback(io, completion, &op)
if wsa_err_incomplete(err) {
io.io_pending += 1
return
}
rerr := net.Dial_Error(err)
if rerr != nil do win.closesocket(op.socket)
op.callback(completion.user_data, net.TCP_Socket(op.socket), rerr)
case Op_Close:
op.callback(completion.user_data, close_callback(io, op))
case Op_Read:
read, err := read_callback(io, completion, &op)
if err_incomplete(err) {
io.io_pending += 1
return
}
if err == win.ERROR_HANDLE_EOF {
err = win.NO_ERROR
}
op.read += int(read)
if err != win.NO_ERROR {
op.callback(completion.user_data, op.read, os.Platform_Error(err))
} else if op.all && op.read < op.len {
op.buf = op.buf[read:]
if op.offset >= 0 {
op.offset += int(read)
}
op.pending = false
handle_completion(io, completion)
return
} else {
op.callback(completion.user_data, op.read, os.ERROR_NONE)
}
case Op_Write:
written, err := write_callback(io, completion, &op)
if err_incomplete(err) {
io.io_pending += 1
return
}
op.written += int(written)
oerr := os.Platform_Error(err)
if oerr != os.ERROR_NONE {
op.callback(completion.user_data, op.written, oerr)
} else if op.all && op.written < op.len {
op.buf = op.buf[written:]
if op.offset >= 0 {
op.offset += int(written)
}
op.pending = false
handle_completion(io, completion)
return
} else {
op.callback(completion.user_data, op.written, os.ERROR_NONE)
}
case Op_Recv:
received, err := recv_callback(io, completion, &op)
if wsa_err_incomplete(err) {
io.io_pending += 1
return
}
op.received += int(received)
nerr := net.TCP_Recv_Error(err)
if nerr != nil {
op.callback(completion.user_data, op.received, {}, nerr)
} else if op.all && op.received < op.len {
op.buf = win.WSABUF{
len = op.buf.len - win.ULONG(received),
buf = (cast([^]byte)op.buf.buf)[received:],
}
op.pending = false
handle_completion(io, completion)
return
} else {
op.callback(completion.user_data, op.received, {}, nil)
}
case Op_Send:
sent, err := send_callback(io, completion, &op)
if wsa_err_incomplete(err) {
io.io_pending += 1
return
}
op.sent += int(sent)
nerr := net.TCP_Send_Error(err)
if nerr != nil {
op.callback(completion.user_data, op.sent, nerr)
} else if op.all && op.sent < op.len {
op.buf = win.WSABUF{
len = op.buf.len - win.ULONG(sent),
buf = (cast([^]byte)op.buf.buf)[sent:],
}
op.pending = false
handle_completion(io, completion)
return
} else {
op.callback(completion.user_data, op.sent, nil)
}
case Op_Timeout:
op.callback(completion.user_data)
case Op_Next_Tick, Op_Poll, Op_Poll_Remove:
unreachable()
}
pool_put(&io.completion_pool, completion)
}
accept_callback :: proc(io: ^IO, comp: ^Completion, op: ^Op_Accept) -> (source: net.Endpoint, err: win.c_int) {
ok: win.BOOL
if op.pending {
// Get status update, we've already initiated the accept.
flags: win.DWORD
transferred: win.DWORD
ok = win.WSAGetOverlappedResult(op.socket, &comp.over, &transferred, win.FALSE, &flags)
} else {
op.pending = true
oclient, oerr := open_socket(io, .IP4, .TCP)
err = win.c_int(net_err_to_code(oerr))
if err != win.NO_ERROR do return
op.client = win.SOCKET(net.any_socket_to_socket(oclient))
accept_ex: LPFN_ACCEPTEX
load_socket_fn(op.socket, win.WSAID_ACCEPTEX, &accept_ex)
#assert(size_of(win.SOCKADDR_STORAGE_LH) >= size_of(win.sockaddr_in) + 16)
bytes_read: win.DWORD
ok = accept_ex(
op.socket,
op.client,
&op.addr,
0,
size_of(win.sockaddr_in) + 16,
size_of(win.sockaddr_in) + 16,
&bytes_read,
&comp.over,
)
}
if !ok {
err = win.WSAGetLastError()
return
}
// enables getsockopt, setsockopt, getsockname, getpeername.
win.setsockopt(op.client, win.SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT, nil, 0)
source = sockaddr_to_endpoint(&op.addr)
return
}
connect_callback :: proc(io: ^IO, comp: ^Completion, op: ^Op_Connect) -> (err: win.c_int) {
transferred: win.DWORD
ok: win.BOOL
if op.pending {
flags: win.DWORD
ok = win.WSAGetOverlappedResult(op.socket, &comp.over, &transferred, win.FALSE, &flags)
} else {
op.pending = true
osocket, oerr := open_socket(io, .IP4, .TCP)
err = win.c_int(net_err_to_code(oerr))
if err != win.NO_ERROR do return
op.socket = win.SOCKET(net.any_socket_to_socket(osocket))
sockaddr := endpoint_to_sockaddr({net.IP4_Any, 0})
res := win.bind(op.socket, &sockaddr, size_of(sockaddr))
if res < 0 do return win.WSAGetLastError()
connect_ex: LPFN_CONNECTEX
load_socket_fn(op.socket, WSAID_CONNECTEX, &connect_ex)
// TODO: size_of(win.sockaddr_in6) when ip6.
ok = connect_ex(op.socket, &op.addr, size_of(win.sockaddr_in) + 16, nil, 0, &transferred, &comp.over)
}
if !ok do return win.WSAGetLastError()
// enables getsockopt, setsockopt, getsockname, getpeername.
win.setsockopt(op.socket, win.SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT, nil, 0)
return
}
close_callback :: proc(io: ^IO, op: Op_Close) -> bool {
// NOTE: This might cause problems if there is still IO queued/pending.
// Is that our responsibility to check/keep track of?
// Might want to call win.CancelloEx to cancel all pending operations first.
switch h in op.fd {
case os.Handle:
delete_key(&io.offsets, h)
return win.CloseHandle(win.HANDLE(h)) == true
case net.TCP_Socket:
return win.closesocket(win.SOCKET(h)) == win.NO_ERROR
case net.UDP_Socket:
return win.closesocket(win.SOCKET(h)) == win.NO_ERROR
case net.Socket:
return win.closesocket(win.SOCKET(h)) == win.NO_ERROR
case:
unreachable()
}
}
read_callback :: proc(io: ^IO, comp: ^Completion, op: ^Op_Read) -> (read: win.DWORD, err: win.DWORD) {
ok: win.BOOL
if op.pending {
ok = win.GetOverlappedResult(win.HANDLE(op.fd), &comp.over, &read, win.FALSE)
} else {
comp.over.Offset = u32(op.offset) if op.offset >= 0 else io.offsets[op.fd]
comp.over.OffsetHigh = comp.over.Offset >> 32
ok = win.ReadFile(win.HANDLE(op.fd), raw_data(op.buf), win.DWORD(len(op.buf)), &read, &comp.over)
// Not sure if this also happens with correctly set up handles some times.
if ok do log.info("non-blocking write returned immediately, is the handle set up correctly?")
op.pending = true
}
if !ok do err = win.GetLastError()
// Increment offset if this was not a call with an offset set.
if op.offset >= 0 {
io.offsets[op.fd] += read
}
return
}
write_callback :: proc(io: ^IO, comp: ^Completion, op: ^Op_Write) -> (written: win.DWORD, err: win.DWORD) {
ok: win.BOOL
if op.pending {
ok = win.GetOverlappedResult(win.HANDLE(op.fd), &comp.over, &written, win.FALSE)
} else {
comp.over.Offset = u32(op.offset) if op.offset >= 0 else io.offsets[op.fd]
comp.over.OffsetHigh = comp.over.Offset >> 32
ok = win.WriteFile(win.HANDLE(op.fd), raw_data(op.buf), win.DWORD(len(op.buf)), &written, &comp.over)
// Not sure if this also happens with correctly set up handles some times.
if ok do log.debug("non-blocking write returned immediately, is the handle set up correctly?")
op.pending = true
}
if !ok do err = win.GetLastError()
// Increment offset if this was not a call with an offset set.
if op.offset >= 0 {
io.offsets[op.fd] += written
}
return
}
recv_callback :: proc(io: ^IO, comp: ^Completion, op: ^Op_Recv) -> (received: win.DWORD, err: win.c_int) {
sock := win.SOCKET(net.any_socket_to_socket(op.socket))
ok: win.BOOL
if op.pending {
flags: win.DWORD
ok = win.WSAGetOverlappedResult(sock, &comp.over, &received, win.FALSE, &flags)
} else {
flags: win.DWORD
err_code := win.WSARecv(sock, &op.buf, 1, &received, &flags, win.LPWSAOVERLAPPED(&comp.over), nil)
ok = err_code != win.SOCKET_ERROR
op.pending = true
}
if !ok do err = win.WSAGetLastError()
return
}
send_callback :: proc(io: ^IO, comp: ^Completion, op: ^Op_Send) -> (sent: win.DWORD, err: win.c_int) {
sock := win.SOCKET(net.any_socket_to_socket(op.socket))
ok: win.BOOL
if op.pending {
flags: win.DWORD
ok = win.WSAGetOverlappedResult(sock, &comp.over, &sent, win.FALSE, &flags)
} else {
err_code := win.WSASend(sock, &op.buf, 1, &sent, 0, win.LPWSAOVERLAPPED(&comp.over), nil)
ok = err_code != win.SOCKET_ERROR
op.pending = true
}
if !ok do err = win.WSAGetLastError()
return
}
FILE_SKIP_COMPLETION_PORT_ON_SUCCESS :: 0x1
FILE_SKIP_SET_EVENT_ON_HANDLE :: 0x2
SO_UPDATE_ACCEPT_CONTEXT :: 28683
WSAID_CONNECTEX :: win.GUID{0x25a207b9, 0xddf3, 0x4660, [8]win.BYTE{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e}}
LPFN_CONNECTEX :: #type proc "stdcall" (
socket: win.SOCKET,
addr: ^win.SOCKADDR_STORAGE_LH,
namelen: win.c_int,
send_buf: win.PVOID,
send_data_len: win.DWORD,
bytes_sent: win.LPDWORD,
overlapped: win.LPOVERLAPPED,
) -> win.BOOL
LPFN_ACCEPTEX :: #type proc "stdcall" (
listen_sock: win.SOCKET,
accept_sock: win.SOCKET,
addr_buf: win.PVOID,
addr_len: win.DWORD,
local_addr_len: win.DWORD,
remote_addr_len: win.DWORD,
bytes_received: win.LPDWORD,
overlapped: win.LPOVERLAPPED,
) -> win.BOOL
wsa_err_incomplete :: proc(err: win.c_int) -> bool {
#partial switch win.System_Error(err) {
case .WSAEWOULDBLOCK, .IO_PENDING, .IO_INCOMPLETE, .WSAEALREADY:
return true
case:
return false
}
}
err_incomplete :: proc(err: win.DWORD) -> bool {
return err == win.ERROR_IO_PENDING
}
// Verbatim copy of private proc in core:net.
sockaddr_to_endpoint :: proc(native_addr: ^win.SOCKADDR_STORAGE_LH) -> (ep: net.Endpoint) {
switch native_addr.ss_family {
case u16(win.AF_INET):
addr := cast(^win.sockaddr_in)native_addr
port := int(addr.sin_port)
ep = net.Endpoint {
address = net.IP4_Address(transmute([4]byte)addr.sin_addr),
port = port,
}
case u16(win.AF_INET6):
addr := cast(^win.sockaddr_in6)native_addr
port := int(addr.sin6_port)
ep = net.Endpoint {
address = net.IP6_Address(transmute([8]u16be)addr.sin6_addr),
port = port,
}
case:
panic("native_addr is neither IP4 or IP6 address")
}
return
}
// Verbatim copy of private proc in core:net.
endpoint_to_sockaddr :: proc(ep: net.Endpoint) -> (sockaddr: win.SOCKADDR_STORAGE_LH) {
switch a in ep.address {
case net.IP4_Address:
(^win.sockaddr_in)(&sockaddr)^ = win.sockaddr_in {
sin_port = u16be(win.USHORT(ep.port)),
sin_addr = transmute(win.in_addr)a,
sin_family = u16(win.AF_INET),
}
return
case net.IP6_Address:
(^win.sockaddr_in6)(&sockaddr)^ = win.sockaddr_in6 {
sin6_port = u16be(win.USHORT(ep.port)),
sin6_addr = transmute(win.in6_addr)a,
sin6_family = u16(win.AF_INET6),
}
return
}
unreachable()
}
net_err_to_code :: proc(err: net.Network_Error) -> os.Platform_Error {
switch e in err {
case net.Create_Socket_Error:
return os.Platform_Error(e)
case net.Socket_Option_Error:
return os.Platform_Error(e)
case net.General_Error:
return os.Platform_Error(e)
case net.Platform_Error:
return os.Platform_Error(e)
case net.Dial_Error:
return os.Platform_Error(e)
case net.Listen_Error:
return os.Platform_Error(e)
case net.Accept_Error:
return os.Platform_Error(e)
case net.Bind_Error:
return os.Platform_Error(e)
case net.TCP_Send_Error:
return os.Platform_Error(e)
case net.UDP_Send_Error:
return os.Platform_Error(e)
case net.TCP_Recv_Error:
return os.Platform_Error(e)
case net.UDP_Recv_Error:
return os.Platform_Error(e)
case net.Shutdown_Error:
return os.Platform_Error(e)
case net.Set_Blocking_Error:
return os.Platform_Error(e)
case net.Parse_Endpoint_Error:
return os.Platform_Error(e)
case net.Resolve_Error:
return os.Platform_Error(e)
case net.DNS_Error:
return os.Platform_Error(e)
case:
return nil
}
}
// TODO: loading this takes a overlapped parameter, maybe we can do this async?
load_socket_fn :: proc(subject: win.SOCKET, guid: win.GUID, fn: ^$T) {
guid := guid
bytes: u32
rc := win.WSAIoctl(
subject,
win.SIO_GET_EXTENSION_FUNCTION_POINTER,
&guid,
size_of(guid),
fn,
size_of(fn),
&bytes,
nil,
nil,
)
assert(rc != win.SOCKET_ERROR)
assert(bytes == size_of(fn^))
}

View file

@ -0,0 +1,316 @@
package nbio
import "core:container/queue"
import "core:net"
import "core:os"
import "core:sys/linux"
import "core:time"
import io_uring "_io_uring"
_init :: proc(io: ^IO, alloc := context.allocator) -> (err: os.Errno) {
flags: u32 = 0
entries: u32 = 256
io.allocator = alloc
pool_init(&io.completion_pool, allocator = alloc)
params: io_uring.io_uring_params
// Make read, write etc. increment and use the file cursor.
params.features |= io_uring.IORING_FEAT_RW_CUR_POS
ring, rerr := io_uring.io_uring_make(&params, entries, flags)
#partial switch rerr {
case .None:
io.ring = ring
queue.init(&io.unqueued, allocator = alloc)
queue.init(&io.completed, allocator = alloc)
case:
err = ring_err_to_os_err(rerr)
}
return
}
_num_waiting :: #force_inline proc(io: ^IO) -> int {
return io.completion_pool.num_waiting
}
_destroy :: proc(io: ^IO) {
context.allocator = io.allocator
queue.destroy(&io.unqueued)
queue.destroy(&io.completed)
pool_destroy(&io.completion_pool)
io_uring.io_uring_destroy(&io.ring)
}
_tick :: proc(io: ^IO) -> os.Errno {
timeouts: uint = 0
etime := false
t: linux.Time_Spec
t.time_nsec += uint(time.Millisecond * 10)
for !etime {
// Queue the timeout, if there is an error, flush (cause its probably full) and try again.
sqe, err := io_uring.timeout(&io.ring, 0, &t, 1, 0)
if err != nil {
if errno := flush_submissions(io, 0, &timeouts, &etime); errno != os.ERROR_NONE {
return errno
}
sqe, err = io_uring.timeout(&io.ring, 0, &t, 1, 0)
}
if err != nil do return ring_err_to_os_err(err)
timeouts += 1
io.ios_queued += 1
ferr := flush(io, 1, &timeouts, &etime)
if ferr != os.ERROR_NONE do return ferr
}
for timeouts > 0 {
fcerr := flush_completions(io, 0, &timeouts, &etime)
if fcerr != os.ERROR_NONE do return fcerr
}
return os.ERROR_NONE
}
_listen :: proc(socket: net.TCP_Socket, backlog := 1000) -> net.Network_Error {
errno := os.listen(os.Socket(socket), backlog)
if errno != nil {
return net.Listen_Error(errno.(os.Platform_Error))
}
return nil
}
_accept :: proc(io: ^IO, socket: net.TCP_Socket, user: rawptr, callback: On_Accept) -> ^Completion {
completion := pool_get(&io.completion_pool)
completion.ctx = context
completion.user_data = user
completion.operation = Op_Accept {
callback = callback,
socket = socket,
sockaddrlen = i32(size_of(os.SOCKADDR_STORAGE_LH)),
}
accept_enqueue(io, completion, &completion.operation.(Op_Accept))
return completion
}
_close :: proc(io: ^IO, fd: Closable, user: rawptr, callback: On_Close) -> ^Completion {
completion := pool_get(&io.completion_pool)
completion.ctx = context
completion.user_data = user
handle: os.Handle
//odinfmt:disable
switch h in fd {
case net.TCP_Socket: handle = os.Handle(h)
case net.UDP_Socket: handle = os.Handle(h)
case net.Socket: handle = os.Handle(h)
case os.Handle: handle = h
} //odinfmt:enable
completion.operation = Op_Close {
callback = callback,
fd = handle,
}
close_enqueue(io, completion, &completion.operation.(Op_Close))
return completion
}
_connect :: proc(io: ^IO, endpoint: net.Endpoint, user: rawptr, callback: On_Connect) -> (^Completion, net.Network_Error) {
if endpoint.port == 0 {
return nil, net.Dial_Error.Port_Required
}
family := net.family_from_endpoint(endpoint)
sock, err := net.create_socket(family, .TCP)
if err != nil {
return nil, err
}
if preperr := _prepare_socket(sock); err != nil {
close(io, net.any_socket_to_socket(sock))
return nil, preperr
}
completion := pool_get(&io.completion_pool)
completion.ctx = context
completion.user_data = user
completion.operation = Op_Connect {
callback = callback,
socket = sock.(net.TCP_Socket),
sockaddr = endpoint_to_sockaddr(endpoint),
}
connect_enqueue(io, completion, &completion.operation.(Op_Connect))
return completion, nil
}
_read :: proc(
io: ^IO,
fd: os.Handle,
offset: Maybe(int),
buf: []byte,
user: rawptr,
callback: On_Read,
all := false,
) -> ^Completion {
completion := pool_get(&io.completion_pool)
completion.ctx = context
completion.user_data = user
completion.operation = Op_Read {
callback = callback,
fd = fd,
buf = buf,
offset = offset.? or_else -1,
all = all,
len = len(buf),
}
read_enqueue(io, completion, &completion.operation.(Op_Read))
return completion
}
_recv :: proc(io: ^IO, socket: net.Any_Socket, buf: []byte, user: rawptr, callback: On_Recv, all := false) -> ^Completion {
completion := pool_get(&io.completion_pool)
completion.ctx = context
completion.user_data = user
completion.operation = Op_Recv {
callback = callback,
socket = socket,
buf = buf,
all = all,
len = len(buf),
}
recv_enqueue(io, completion, &completion.operation.(Op_Recv))
return completion
}
_send :: proc(
io: ^IO,
socket: net.Any_Socket,
buf: []byte,
user: rawptr,
callback: On_Sent,
_: Maybe(net.Endpoint) = nil,
all := false,
) -> ^Completion {
completion := pool_get(&io.completion_pool)
completion.ctx = context
completion.user_data = user
completion.operation = Op_Send {
callback = callback,
socket = socket,
buf = buf,
all = all,
len = len(buf),
}
send_enqueue(io, completion, &completion.operation.(Op_Send))
return completion
}
_write :: proc(
io: ^IO,
fd: os.Handle,
offset: Maybe(int),
buf: []byte,
user: rawptr,
callback: On_Write,
all := false,
) -> ^Completion {
completion := pool_get(&io.completion_pool)
completion.ctx = context
completion.user_data = user
completion.operation = Op_Write {
callback = callback,
fd = fd,
buf = buf,
offset = offset.? or_else -1,
all = all,
len = len(buf),
}
write_enqueue(io, completion, &completion.operation.(Op_Write))
return completion
}
_timeout :: proc(io: ^IO, dur: time.Duration, user: rawptr, callback: On_Timeout) -> ^Completion {
completion := pool_get(&io.completion_pool)
completion.ctx = context
completion.user_data = user
nsec := time.duration_nanoseconds(dur)
completion.operation = Op_Timeout {
callback = callback,
expires = linux.Time_Spec{
time_sec = uint(nsec / NANOSECONDS_PER_SECOND),
time_nsec = uint(nsec % NANOSECONDS_PER_SECOND),
},
}
timeout_enqueue(io, completion, &completion.operation.(Op_Timeout))
return completion
}
_next_tick :: proc(io: ^IO, user: rawptr, callback: On_Next_Tick) -> ^Completion {
completion := pool_get(&io.completion_pool)
completion.ctx = context
completion.user_data = user
completion.operation = Op_Next_Tick {
callback = callback,
}
queue.push_back(&io.completed, completion)
return completion
}
_poll :: proc(io: ^IO, fd: os.Handle, event: Poll_Event, multi: bool, user: rawptr, callback: On_Poll) -> ^Completion {
completion := pool_get(&io.completion_pool)
completion.ctx = context
completion.user_data = user
completion.operation = Op_Poll{
callback = callback,
fd = fd,
event = event,
multi = multi,
}
poll_enqueue(io, completion, &completion.operation.(Op_Poll))
return completion
}
_poll_remove :: proc(io: ^IO, fd: os.Handle, event: Poll_Event) -> ^Completion {
completion := pool_get(&io.completion_pool)
completion.ctx = context
completion.operation = Op_Poll_Remove{
fd = fd,
event = event,
}
poll_remove_enqueue(io, completion, &completion.operation.(Op_Poll_Remove))
return completion
}

View file

@ -0,0 +1,346 @@
package nbio
import "core:fmt"
import "core:log"
import "core:mem"
import "core:net"
import "core:os"
import "core:slice"
import "core:testing"
import "core:time"
expect :: testing.expect
@(test)
test_timeout :: proc(t: ^testing.T) {
io: IO
ierr := init(&io)
expect(t, ierr == os.ERROR_NONE, fmt.tprintf("nbio.init error: %v", ierr))
defer destroy(&io)
timeout_fired: bool
timeout(&io, time.Millisecond * 10, &timeout_fired, proc(t_: rawptr) {
timeout_fired := cast(^bool)t_
timeout_fired^ = true
})
start := time.now()
for {
terr := tick(&io)
expect(t, terr == os.ERROR_NONE, fmt.tprintf("nbio.tick error: %v", terr))
if time.since(start) > time.Millisecond * 11 {
expect(t, timeout_fired, "timeout did not run in time")
break
}
}
}
@(test)
test_write_read_close :: proc(t: ^testing.T) {
track: mem.Tracking_Allocator
mem.tracking_allocator_init(&track, context.allocator)
context.allocator = mem.tracking_allocator(&track)
defer {
for _, leak in track.allocation_map {
fmt.printf("%v leaked %v bytes\n", leak.location, leak.size)
}
for bad_free in track.bad_free_array {
fmt.printf("%v allocation %p was freed badly\n", bad_free.location, bad_free.memory)
}
}
{
Test_Ctx :: struct {
t: ^testing.T,
io: ^IO,
done: bool,
fd: os.Handle,
write_buf: [20]byte,
read_buf: [20]byte,
written: int,
read: int,
}
io: IO
init(&io)
defer destroy(&io)
tctx := Test_Ctx {
write_buf = [20]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20},
read_buf = [20]byte{},
}
tctx.t = t
tctx.io = &io
path := "test_write_read_close"
handle, errno := open(
&io,
path,
os.O_RDWR | os.O_CREATE | os.O_TRUNC,
os.S_IRUSR | os.S_IWUSR | os.S_IRGRP | os.S_IROTH when ODIN_OS != .Windows else 0,
)
expect(t, errno == os.ERROR_NONE, fmt.tprintf("open file error: %i", errno))
defer close(&io, handle)
defer os.remove(path)
tctx.fd = handle
write(&io, handle, tctx.write_buf[:], &tctx, write_callback)
for !tctx.done {
terr := tick(&io)
expect(t, terr == os.ERROR_NONE, fmt.tprintf("error ticking: %v", terr))
}
expect(t, tctx.read == 20, "expected to have read 20 bytes")
expect(t, tctx.written == 20, "expected to have written 20 bytes")
expect(t, slice.equal(tctx.write_buf[:], tctx.read_buf[:]))
write_callback :: proc(ctx: rawptr, written: int, err: os.Errno) {
ctx := cast(^Test_Ctx)ctx
expect(ctx.t, err == os.ERROR_NONE, fmt.tprintf("write error: %i", err))
ctx.written = written
read_at(ctx.io, ctx.fd, 0, ctx.read_buf[:], ctx, read_callback)
}
read_callback :: proc(ctx: rawptr, r: int, err: os.Errno) {
ctx := cast(^Test_Ctx)ctx
expect(ctx.t, err == os.ERROR_NONE, fmt.tprintf("read error: %i", err))
ctx.read = r
close(ctx.io, ctx.fd, ctx, close_callback)
}
close_callback :: proc(ctx: rawptr, ok: bool) {
ctx := cast(^Test_Ctx)ctx
expect(ctx.t, ok, "close error")
ctx.done = true
}
}
}
@(test)
test_client_and_server_send_recv :: proc(t: ^testing.T) {
track: mem.Tracking_Allocator
mem.tracking_allocator_init(&track, context.allocator)
context.allocator = mem.tracking_allocator(&track)
defer {
for _, leak in track.allocation_map {
fmt.printf("%v leaked %v bytes\n", leak.location, leak.size)
}
for bad_free in track.bad_free_array {
fmt.printf("%v allocation %p was freed badly\n", bad_free.location, bad_free.memory)
}
}
{
Test_Ctx :: struct {
t: ^testing.T,
io: ^IO,
send_buf: []byte,
recv_buf: []byte,
sent: int,
received: int,
accepted_sock: Maybe(net.TCP_Socket),
done: bool,
ep: net.Endpoint,
}
io: IO
init(&io)
defer destroy(&io)
tctx := Test_Ctx {
send_buf = []byte{1, 0, 1, 0, 1, 0, 1, 0, 1, 0},
recv_buf = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
}
tctx.t = t
tctx.io = &io
tctx.ep = {
address = net.IP4_Loopback,
port = 3131,
}
server, err := open_and_listen_tcp(&io, tctx.ep)
expect(t, err == nil, fmt.tprintf("create socket error: %s", err))
accept(&io, server, &tctx, accept_callback)
terr := tick(&io)
expect(t, terr == os.ERROR_NONE, fmt.tprintf("tick error: %v", terr))
connect(&io, tctx.ep, &tctx, connect_callback)
for !tctx.done {
terr := tick(&io)
expect(t, terr == os.ERROR_NONE, fmt.tprintf("tick error: %v", terr))
}
expect(
t,
len(tctx.send_buf) == int(tctx.sent),
fmt.tprintf("expected sent to be length of buffer: %i != %i", len(tctx.send_buf), tctx.sent),
)
expect(
t,
len(tctx.recv_buf) == int(tctx.received),
fmt.tprintf("expected recv to be length of buffer: %i != %i", len(tctx.recv_buf), tctx.received),
)
expect(
t,
slice.equal(tctx.send_buf[:tctx.received], tctx.recv_buf),
fmt.tprintf("send and received not the same: %v != %v", tctx.send_buf[:tctx.received], tctx.recv_buf),
)
connect_callback :: proc(ctx: rawptr, sock: net.TCP_Socket, err: net.Network_Error) {
ctx := cast(^Test_Ctx)ctx
// I believe this is because we are connecting in the same tick as accepting
// and it goes wrong, might actually be a bug though, can't find anything.
if err != nil {
log.info("connect err, trying again", err)
connect(ctx.io, ctx.ep, ctx, connect_callback)
return
}
send(ctx.io, sock, ctx.send_buf, ctx, send_callback)
}
send_callback :: proc(ctx: rawptr, res: int, err: net.Network_Error) {
ctx := cast(^Test_Ctx)ctx
expect(ctx.t, err == nil, fmt.tprintf("send error: %i", err))
ctx.sent = res
}
accept_callback :: proc(ctx: rawptr, client: net.TCP_Socket, source: net.Endpoint, err: net.Network_Error) {
ctx := cast(^Test_Ctx)ctx
expect(ctx.t, err == nil, fmt.tprintf("accept error: %i", err))
ctx.accepted_sock = client
recv(ctx.io, client, ctx.recv_buf, ctx, recv_callback)
}
recv_callback :: proc(ctx: rawptr, received: int, _: Maybe(net.Endpoint), err: net.Network_Error) {
ctx := cast(^Test_Ctx)ctx
expect(ctx.t, err == nil, fmt.tprintf("recv error: %i", err))
ctx.received = received
ctx.done = true
}
}
}
@test
test_send_all :: proc(t: ^testing.T) {
Test_Ctx :: struct {
t: ^testing.T,
io: ^IO,
send_buf: []byte,
recv_buf: []byte,
sent: int,
received: int,
accepted_sock: Maybe(net.TCP_Socket),
done: bool,
ep: net.Endpoint,
}
io: IO
init(&io)
defer destroy(&io)
tctx := Test_Ctx {
send_buf = make([]byte, mem.Megabyte * 50),
recv_buf = make([]byte, mem.Megabyte * 60),
}
defer delete(tctx.send_buf)
defer delete(tctx.recv_buf)
slice.fill(tctx.send_buf, 1)
tctx.t = t
tctx.io = &io
tctx.ep = {
address = net.IP4_Loopback,
port = 3132,
}
server, err := open_and_listen_tcp(&io, tctx.ep)
expect(t, err == nil, fmt.tprintf("create socket error: %s", err))
defer close(&io, server)
defer close(&io, tctx.accepted_sock.?)
accept(&io, server, &tctx, accept_callback)
terr := tick(&io)
expect(t, terr == os.ERROR_NONE, fmt.tprintf("tick error: %v", terr))
connect(&io, tctx.ep, &tctx, connect_callback)
for !tctx.done {
terr := tick(&io)
expect(t, terr == os.ERROR_NONE, fmt.tprintf("tick error: %v", terr))
}
expect(t, slice.simple_equal(tctx.send_buf, tctx.recv_buf[:mem.Megabyte * 50]), "expected the sent bytes to be the same as the received")
expected := make([]byte, mem.Megabyte * 10)
expect(t, slice.simple_equal(tctx.recv_buf[mem.Megabyte * 50:], expected), "expected the rest of the bytes to be 0")
connect_callback :: proc(ctx: rawptr, sock: net.TCP_Socket, err: net.Network_Error) {
ctx := cast(^Test_Ctx)ctx
send_all(ctx.io, sock, ctx.send_buf, ctx, send_callback)
}
send_callback :: proc(ctx: rawptr, res: int, err: net.Network_Error) {
ctx := cast(^Test_Ctx)ctx
if !expect(ctx.t, err == nil, fmt.tprintf("send error: %i", err)) {
ctx.done = true
}
ctx.sent = res
}
accept_callback :: proc(ctx: rawptr, client: net.TCP_Socket, source: net.Endpoint, err: net.Network_Error) {
ctx := cast(^Test_Ctx)ctx
if !expect(ctx.t, err == nil, fmt.tprintf("accept error: %i", err)) {
ctx.done = true
}
ctx.accepted_sock = client
recv(ctx.io, client, ctx.recv_buf, ctx, recv_callback)
}
recv_callback :: proc(ctx: rawptr, received: int, _: Maybe(net.Endpoint), err: net.Network_Error) {
ctx := cast(^Test_Ctx)ctx
if !expect(ctx.t, err == nil, fmt.tprintf("recv error: %i", err)) {
ctx.done = true
}
ctx.received += received
if ctx.received < mem.Megabyte * 50 {
recv(ctx.io, ctx.accepted_sock.?, ctx.recv_buf[ctx.received:], ctx, recv_callback)
log.infof("received %.0M", received)
} else {
ctx.done = true
}
}
}

View file

@ -0,0 +1,56 @@
#+build darwin, linux
#+private
package nbio
import "core:net"
import "core:os"
_open :: proc(_: ^IO, path: string, mode, perm: int) -> (handle: os.Handle, errno: os.Errno) {
handle, errno = os.open(path, mode, perm)
if errno != os.ERROR_NONE do return
errno = _prepare_handle(handle)
if errno != os.ERROR_NONE do os.close(handle)
return
}
_seek :: proc(_: ^IO, fd: os.Handle, offset: int, whence: Whence) -> (int, os.Errno) {
r, err := os.seek(fd, i64(offset), int(whence))
return int(r), err
}
_prepare_handle :: proc(fd: os.Handle) -> os.Errno {
// NOTE: TCP_Socket gets cast to int right away in net, so this is safe to do.
if err := net.set_blocking(net.TCP_Socket(fd), false); err != nil {
return os.Platform_Error((^i32)(&err)^)
}
return os.ERROR_NONE
}
_open_socket :: proc(
_: ^IO,
family: net.Address_Family,
protocol: net.Socket_Protocol,
) -> (
socket: net.Any_Socket,
err: net.Network_Error,
) {
socket, err = net.create_socket(family, protocol)
if err != nil do return
err = _prepare_socket(socket)
if err != nil do net.close(socket)
return
}
_prepare_socket :: proc(socket: net.Any_Socket) -> net.Network_Error {
net.set_option(socket, .Reuse_Address, true) or_return
// TODO; benchmark this, even if faster it is prob not to be turned on
// by default here, maybe by default for the server, but I don't think this
// will be faster/more efficient.
// net.set_option(socket, .TCP_Nodelay, true) or_return
net.set_blocking(socket, false) or_return
return nil
}

View file

@ -0,0 +1,363 @@
package nbio
import "core:container/queue"
import "core:log"
import "core:net"
import "core:os"
import "core:time"
import win "core:sys/windows"
_init :: proc(io: ^IO, allocator := context.allocator) -> (err: os.Errno) {
io.allocator = allocator
pool_init(&io.completion_pool, allocator = allocator)
queue.init(&io.completed, allocator = allocator)
io.timeouts = make([dynamic]^Completion, allocator)
io.offsets = make(map[os.Handle]u32, allocator = allocator)
win.ensure_winsock_initialized()
defer if err != nil {
assert(win.WSACleanup() == win.NO_ERROR)
}
io.iocp = win.CreateIoCompletionPort(win.INVALID_HANDLE_VALUE, nil, 0, 0)
if io.iocp == nil {
err = os.Platform_Error(win.GetLastError())
return
}
return
}
_destroy :: proc(io: ^IO) {
context.allocator = io.allocator
delete(io.timeouts)
queue.destroy(&io.completed)
pool_destroy(&io.completion_pool)
delete(io.offsets)
// TODO: error handling.
win.CloseHandle(io.iocp)
// win.WSACleanup()
}
_num_waiting :: #force_inline proc(io: ^IO) -> int {
return io.completion_pool.num_waiting
}
_tick :: proc(io: ^IO) -> (err: os.Errno) {
if queue.len(io.completed) == 0 {
next_timeout := flush_timeouts(io)
// Wait a maximum of a ms if there is nothing to do.
// TODO: this is pretty naive, a typical server always has accept completions pending and will be at 100% cpu.
wait_ms: win.DWORD = 1 if io.io_pending == 0 else 0
// But, to counter inaccuracies in low timeouts,
// lets make the call exit immediately if the next timeout is close.
if nt, ok := next_timeout.?; ok && nt <= time.Millisecond * 15 {
wait_ms = 0
}
events: [256]win.OVERLAPPED_ENTRY
entries_removed: win.ULONG
if !win.GetQueuedCompletionStatusEx(io.iocp, &events[0], len(events), &entries_removed, wait_ms, false) {
if terr := win.GetLastError(); terr != win.WAIT_TIMEOUT {
err = os.Platform_Error(terr)
return
}
}
// assert(io.io_pending >= int(entries_removed))
io.io_pending -= int(entries_removed)
for event in events[:entries_removed] {
if event.lpOverlapped == nil {
@static logged: bool
if !logged {
log.warn("You have ran into a strange error some users have ran into on Windows 10 but I can't reproduce, I try to recover from the error but please chime in at https://github.com/laytan/odin-http/issues/34")
logged = true
}
io.io_pending += 1
continue
}
// This is actually pointing at the Completion.over field, but because it is the first field
// It is also a valid pointer to the Completion struct.
completion := cast(^Completion)event.lpOverlapped
queue.push_back(&io.completed, completion)
}
}
// Prevent infinite loop when callback adds to completed by storing length.
n := queue.len(io.completed)
for _ in 0 ..< n {
completion := queue.pop_front(&io.completed)
context = completion.ctx
handle_completion(io, completion)
}
return
}
_listen :: proc(socket: net.TCP_Socket, backlog := 1000) -> (err: net.Network_Error) {
if res := win.listen(win.SOCKET(socket), i32(backlog)); res == win.SOCKET_ERROR {
err = net.Listen_Error(win.WSAGetLastError())
}
return
}
// Basically a copy of `os.open`, where a flag is added to signal async io, and creation of IOCP.
// Specifically the FILE_FLAG_OVERLAPPEd flag.
_open :: proc(io: ^IO, path: string, mode, perm: int) -> (os.Handle, os.Errno) {
if len(path) == 0 {
return os.INVALID_HANDLE, os.ERROR_FILE_NOT_FOUND
}
access: u32
//odinfmt:disable
switch mode & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR) {
case os.O_RDONLY: access = win.FILE_GENERIC_READ
case os.O_WRONLY: access = win.FILE_GENERIC_WRITE
case os.O_RDWR: access = win.FILE_GENERIC_READ | win.FILE_GENERIC_WRITE
}
//odinfmt:enable
if mode & os.O_CREATE != 0 {
access |= win.FILE_GENERIC_WRITE
}
if mode & os.O_APPEND != 0 {
access &~= win.FILE_GENERIC_WRITE
access |= win.FILE_APPEND_DATA
}
share_mode := win.FILE_SHARE_READ | win.FILE_SHARE_WRITE
sa: ^win.SECURITY_ATTRIBUTES = nil
sa_inherit := win.SECURITY_ATTRIBUTES {
nLength = size_of(win.SECURITY_ATTRIBUTES),
bInheritHandle = true,
}
if mode & os.O_CLOEXEC == 0 {
sa = &sa_inherit
}
create_mode: u32
switch {
case mode & (os.O_CREATE | os.O_EXCL) == (os.O_CREATE | os.O_EXCL):
create_mode = win.CREATE_NEW
case mode & (os.O_CREATE | os.O_TRUNC) == (os.O_CREATE | os.O_TRUNC):
create_mode = win.CREATE_ALWAYS
case mode & os.O_CREATE == os.O_CREATE:
create_mode = win.OPEN_ALWAYS
case mode & os.O_TRUNC == os.O_TRUNC:
create_mode = win.TRUNCATE_EXISTING
case:
create_mode = win.OPEN_EXISTING
}
flags := win.FILE_ATTRIBUTE_NORMAL | win.FILE_FLAG_BACKUP_SEMANTICS
// This line is the only thing different from the `os.open` procedure.
// This makes it an asynchronous file that can be used in nbio.
flags |= win.FILE_FLAG_OVERLAPPED
wide_path := win.utf8_to_wstring(path)
handle := os.Handle(win.CreateFileW(wide_path, access, share_mode, sa, create_mode, flags, nil))
if handle == os.INVALID_HANDLE {
err := os.Platform_Error(win.GetLastError())
return os.INVALID_HANDLE, err
}
// Everything past here is custom/not from `os.open`.
handle_iocp := win.CreateIoCompletionPort(win.HANDLE(handle), io.iocp, 0, 0)
assert(handle_iocp == io.iocp)
cmode: byte
cmode |= FILE_SKIP_COMPLETION_PORT_ON_SUCCESS
cmode |= FILE_SKIP_SET_EVENT_ON_HANDLE
if !win.SetFileCompletionNotificationModes(win.HANDLE(handle), cmode) {
win.CloseHandle(win.HANDLE(handle))
return os.INVALID_HANDLE, os.Platform_Error(win.GetLastError())
}
if mode & os.O_APPEND != 0 {
_seek(io, handle, 0, .End)
}
return handle, os.ERROR_NONE
}
_seek :: proc(io: ^IO, fd: os.Handle, offset: int, whence: Whence) -> (int, os.Errno) {
switch whence {
case .Set:
io.offsets[fd] = u32(offset)
case .Curr:
io.offsets[fd] += u32(offset)
case .End:
size: win.LARGE_INTEGER
ok := win.GetFileSizeEx(win.HANDLE(fd), &size)
if !ok {
return 0, os.Platform_Error(win.GetLastError())
}
io.offsets[fd] = u32(size) + u32(offset)
}
return int(io.offsets[fd]), os.ERROR_NONE
}
_open_socket :: proc(
io: ^IO,
family: net.Address_Family,
protocol: net.Socket_Protocol,
) -> (
socket: net.Any_Socket,
err: net.Network_Error,
) {
socket, err = net.create_socket(family, protocol)
if err != nil do return
err = prepare_socket(io, socket)
if err != nil do net.close(socket)
return
}
_accept :: proc(io: ^IO, socket: net.TCP_Socket, user: rawptr, callback: On_Accept) -> ^Completion {
return submit(
io,
user,
Op_Accept{
callback = callback,
socket = win.SOCKET(socket),
client = win.INVALID_SOCKET,
},
)
}
_connect :: proc(io: ^IO, ep: net.Endpoint, user: rawptr, callback: On_Connect) -> (^Completion, net.Network_Error) {
if ep.port == 0 {
return nil, net.Dial_Error.Port_Required
}
return submit(io, user, Op_Connect{
callback = callback,
addr = endpoint_to_sockaddr(ep),
}), nil
}
_close :: proc(io: ^IO, fd: Closable, user: rawptr, callback: On_Close) -> ^Completion {
return submit(io, user, Op_Close{callback = callback, fd = fd})
}
_read :: proc(
io: ^IO,
fd: os.Handle,
offset: Maybe(int),
buf: []byte,
user: rawptr,
callback: On_Read,
all := false,
) -> ^Completion {
return submit(io, user, Op_Read{
callback = callback,
fd = fd,
offset = offset.? or_else -1,
buf = buf,
all = all,
len = len(buf),
})
}
_write :: proc(
io: ^IO,
fd: os.Handle,
offset: Maybe(int),
buf: []byte,
user: rawptr,
callback: On_Write,
all := false,
) -> ^Completion {
return submit(io, user, Op_Write{
callback = callback,
fd = fd,
offset = offset.? or_else -1,
buf = buf,
all = all,
len = len(buf),
})
}
_recv :: proc(io: ^IO, socket: net.Any_Socket, buf: []byte, user: rawptr, callback: On_Recv, all := false) -> ^Completion {
// TODO: implement UDP.
if _, ok := socket.(net.UDP_Socket); ok do unimplemented("nbio.recv with UDP sockets is not yet implemented")
return submit(
io,
user,
Op_Recv{
callback = callback,
socket = socket,
buf = win.WSABUF{len = win.ULONG(len(buf)), buf = raw_data(buf)},
all = all,
len = len(buf),
},
)
}
_send :: proc(
io: ^IO,
socket: net.Any_Socket,
buf: []byte,
user: rawptr,
callback: On_Sent,
endpoint: Maybe(net.Endpoint) = nil,
all := false,
) -> ^Completion {
// TODO: implement UDP.
if _, ok := socket.(net.UDP_Socket); ok do unimplemented("nbio.send with UDP sockets is not yet implemented")
return submit(
io,
user,
Op_Send{
callback = callback,
socket = socket,
buf = win.WSABUF{len = win.ULONG(len(buf)), buf = raw_data(buf)},
all = all,
len = len(buf),
},
)
}
_timeout :: proc(io: ^IO, dur: time.Duration, user: rawptr, callback: On_Timeout) -> ^Completion {
completion := pool_get(&io.completion_pool)
completion.op = Op_Timeout {
callback = callback,
expires = time.time_add(time.now(), dur),
}
completion.user_data = user
completion.ctx = context
append(&io.timeouts, completion)
return completion
}
_next_tick :: proc(io: ^IO, user: rawptr, callback: On_Next_Tick) -> ^Completion {
panic("unimplemented on windows: next_tick")
}
_poll :: proc(io: ^IO, fd: os.Handle, event: Poll_Event, multi: bool, user: rawptr, callback: On_Poll) -> ^Completion {
panic("unimplemented on windows: poll")
}
_poll_remove :: proc(io: ^IO, fd: os.Handle, event: Poll_Event) -> ^Completion {
panic("unimplemented on windows: poll_remove")
}

File diff suppressed because it is too large Load diff

54
odin-http/nbio/pool.odin Normal file
View file

@ -0,0 +1,54 @@
#+private
package nbio
import "core:container/queue"
import "core:mem"
import "core:mem/virtual"
// An object pool where the objects are allocated on a growing arena.
Pool :: struct($T: typeid) {
allocator: mem.Allocator,
arena: virtual.Arena,
objects_allocator: mem.Allocator,
objects: queue.Queue(^T),
num_waiting: int,
}
DEFAULT_STARTING_CAP :: 8
pool_init :: proc(p: ^Pool($T), cap := DEFAULT_STARTING_CAP, allocator := context.allocator) -> mem.Allocator_Error {
virtual.arena_init_growing(&p.arena) or_return
p.objects_allocator = virtual.arena_allocator(&p.arena)
p.allocator = allocator
queue.init(&p.objects, cap, allocator) or_return
for _ in 0 ..< cap {
_ = queue.push_back(&p.objects, new(T, p.objects_allocator)) or_return
}
return nil
}
pool_destroy :: proc(p: ^Pool($T)) {
virtual.arena_destroy(&p.arena)
queue.destroy(&p.objects)
}
pool_get :: proc(p: ^Pool($T)) -> (^T, mem.Allocator_Error) #optional_allocator_error {
p.num_waiting += 1
elem, ok := queue.pop_front_safe(&p.objects)
if !ok {
return new(T, p.objects_allocator)
}
mem.zero_item(elem)
return elem, nil
}
pool_put :: proc(p: ^Pool($T), elem: ^T) -> mem.Allocator_Error {
p.num_waiting -= 1
_, err := queue.push_back(&p.objects, elem)
return err
}

5
odin-http/odinfmt.json Normal file
View file

@ -0,0 +1,5 @@
{
"character_width": 120,
"tabs": true,
"tabs_width": 4
}

View file

@ -0,0 +1 @@
openssl-3.4.1

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -0,0 +1,89 @@
package openssl
import "core:c"
import "core:c/libc"
SHARED :: #config(OPENSSL_SHARED, false)
when ODIN_OS == .Windows {
when SHARED {
foreign import lib {
"./includes/windows/libssl.lib",
"./includes/windows/libcrypto.lib",
}
} else {
@(extra_linker_flags="/nodefaultlib:libcmt")
foreign import lib {
"./includes/windows/libssl_static.lib",
"./includes/windows/libcrypto_static.lib",
"system:ws2_32.lib",
"system:gdi32.lib",
"system:advapi32.lib",
"system:crypt32.lib",
"system:user32.lib",
}
}
} else when ODIN_OS == .Darwin {
foreign import lib {
"system:ssl.3",
"system:crypto.3",
}
} else {
foreign import lib {
"system:ssl",
"system:crypto",
}
}
Version :: bit_field u32 {
pre_release: uint | 4,
patch: uint | 16,
minor: uint | 8,
major: uint | 4,
}
VERSION: Version
@(private, init)
version_check :: proc() {
VERSION = Version(OpenSSL_version_num())
assert(VERSION.major == 3, "invalid OpenSSL library version, expected 3.x")
}
SSL_METHOD :: struct {}
SSL_CTX :: struct {}
SSL :: struct {}
SSL_CTRL_SET_TLSEXT_HOSTNAME :: 55
TLSEXT_NAMETYPE_host_name :: 0
foreign lib {
TLS_client_method :: proc() -> ^SSL_METHOD ---
SSL_CTX_new :: proc(method: ^SSL_METHOD) -> ^SSL_CTX ---
SSL_new :: proc(ctx: ^SSL_CTX) -> ^SSL ---
SSL_set_fd :: proc(ssl: ^SSL, fd: c.int) -> c.int ---
SSL_connect :: proc(ssl: ^SSL) -> c.int ---
SSL_get_error :: proc(ssl: ^SSL, ret: c.int) -> c.int ---
SSL_read :: proc(ssl: ^SSL, buf: [^]byte, num: c.int) -> c.int ---
SSL_write :: proc(ssl: ^SSL, buf: [^]byte, num: c.int) -> c.int ---
SSL_free :: proc(ssl: ^SSL) ---
SSL_CTX_free :: proc(ctx: ^SSL_CTX) ---
ERR_print_errors_fp :: proc(fp: ^libc.FILE) ---
SSL_ctrl :: proc(ssl: ^SSL, cmd: c.int, larg: c.long, parg: rawptr) -> c.long ---
OpenSSL_version_num :: proc() -> c.ulong ---
}
// This is a macro in c land.
SSL_set_tlsext_host_name :: proc(ssl: ^SSL, name: cstring) -> c.int {
return c.int(SSL_ctrl(ssl, SSL_CTRL_SET_TLSEXT_HOSTNAME, TLSEXT_NAMETYPE_host_name, rawptr(name)))
}
ERR_print_errors :: proc {
ERR_print_errors_fp,
ERR_print_errors_stderr,
}
ERR_print_errors_stderr :: proc() {
ERR_print_errors_fp(libc.stderr)
}

66
odin-http/request.odin Normal file
View file

@ -0,0 +1,66 @@
package http
import "core:net"
import "core:strings"
Request :: struct {
// If in a handler, this is always there and never None.
// TODO: we should not expose this as a maybe to package users.
line: Maybe(Requestline),
// Is true if the request is actually a HEAD request,
// line.method will be .Get if Server_Opts.redirect_head_to_get is set.
is_head: bool,
headers: Headers,
url: URL,
client: net.Endpoint,
// Route params/captures.
url_params: []string,
// Internal usage only.
_scanner: ^Scanner,
_body_ok: Maybe(bool),
}
request_init :: proc(r: ^Request, allocator := context.allocator) {
headers_init(&r.headers, allocator)
}
// TODO: call it headers_sanitize because it modifies the headers.
// Validates the headers of a request, from the pov of the server.
headers_validate_for_server :: proc(headers: ^Headers) -> bool {
// RFC 7230 5.4: A server MUST respond with a 400 (Bad Request) status code to any
// HTTP/1.1 request message that lacks a Host header field.
if !headers_has_unsafe(headers^, "host") {
return false
}
return headers_validate(headers)
}
// Validates the headers, use `headers_validate_for_server` if these are request headers
// that should be validated from the server side.
headers_validate :: proc(headers: ^Headers) -> bool {
// RFC 7230 3.3.3: If a Transfer-Encoding header field
// is present in a request and the chunked transfer coding is not
// the final encoding, the message body length cannot be determined
// reliably; the server MUST respond with the 400 (Bad Request)
// status code and then close the connection.
if enc_header, ok := headers_get_unsafe(headers^, "transfer-encoding"); ok {
strings.has_suffix(enc_header, "chunked") or_return
}
// RFC 7230 3.3.3: If a message is received with both a Transfer-Encoding and a
// Content-Length header field, the Transfer-Encoding overrides the
// Content-Length. Such a message might indicate an attempt to
// perform request smuggling (Section 9.5) or response splitting
// (Section 9.4) and ought to be handled as an error.
if headers_has_unsafe(headers^, "transfer-encoding") && headers_has_unsafe(headers^, "content-length") {
headers_delete_unsafe(headers, "content-length")
}
return true
}

424
odin-http/response.odin Normal file
View file

@ -0,0 +1,424 @@
package http
import "core:bytes"
import "core:io"
import "core:log"
import "core:net"
import "core:slice"
import "core:strconv"
import "nbio"
Response :: struct {
// Add your headers and cookies here directly.
headers: Headers,
cookies: [dynamic]Cookie,
// If the response has been sent.
sent: bool,
// NOTE: use `http.response_status` if the response body might have been set already.
status: Status,
// Only for internal usage.
_conn: ^Connection,
// TODO/PERF: with some internal refactoring, we should be able to write directly to the
// connection (maybe a small buffer in this struct).
_buf: bytes.Buffer,
_heading_written: bool,
}
response_init :: proc(r: ^Response, allocator := context.allocator) {
r.status = .Not_Found
r.cookies.allocator = allocator
r._buf.buf.allocator = allocator
headers_init(&r.headers, allocator)
}
/*
Prefer the procedure group `body_set`.
*/
body_set_bytes :: proc(r: ^Response, byts: []byte, loc := #caller_location) {
assert(bytes.buffer_length(&r._buf) == 0, "the response body has already been written", loc)
_response_write_heading(r, len(byts))
bytes.buffer_write(&r._buf, byts)
}
/*
Prefer the procedure group `body_set`.
*/
body_set_str :: proc(r: ^Response, str: string, loc := #caller_location) {
// This is safe because we don't write to the bytes.
body_set_bytes(r, transmute([]byte)str, loc)
}
/*
Sets the response body. After calling this you can no longer add headers to the response.
If, after calling, you want to change the status code, use the `response_status` procedure.
For bodies where you do not know the size or want an `io.Writer`, use the `response_writer_init`
procedure to create a writer.
*/
body_set :: proc{
body_set_str,
body_set_bytes,
}
/*
Sets the status code with the safety of being able to do this after writing (part of) the body.
*/
response_status :: proc(r: ^Response, status: Status) {
if r.status == status do return
r.status = status
// If we have already written the heading, we can address the bytes directly to overwrite,
// this is because of the fact that every status code is of length 3, and because we omit
// the "optional" reason phrase out of the response.
if bytes.buffer_length(&r._buf) > 0 {
OFFSET :: len("HTTP/1.1 ")
status_int_str := status_string(r.status)
if len(status_int_str) < 4 {
status_int_str = "500 "
} else {
status_int_str = status_int_str[0:4]
}
copy(r._buf.buf[OFFSET:OFFSET + 4], status_int_str)
}
}
Response_Writer :: struct {
r: ^Response,
// The writer you can write to.
w: io.Writer,
// A dynamic wrapper over the `buffer` given in `response_writer_init`, doesn't allocate.
buf: [dynamic]byte,
// If destroy or close has been called.
ended: bool,
}
/*
Initialize a writer you can use to write responses. Use the `body_set` procedure group if you have
a string or byte slice.
The buffer can be used to avoid very small writes, like the ones when you use the json package
(each write in the json package is only a few bytes). You are allowed to pass nil which will disable
buffering.
NOTE: You need to call io.destroy to signal the end of the body, OR io.close to send the response.
*/
response_writer_init :: proc(rw: ^Response_Writer, r: ^Response, buffer: []byte) -> io.Writer {
headers_set_unsafe(&r.headers, "transfer-encoding", "chunked")
_response_write_heading(r, -1)
rw.buf = slice.into_dynamic(buffer)
rw.r = r
rw.w = io.Stream{
procedure = proc(stream_data: rawptr, mode: io.Stream_Mode, p: []byte, offset: i64, whence: io.Seek_From) -> (n: i64, err: io.Error) {
ws :: bytes.buffer_write_string
write_chunk :: proc(b: ^bytes.Buffer, chunk: []byte) {
plen := i64(len(chunk))
if plen == 0 do return
log.debugf("response_writer chunk of size: %i", plen)
bytes.buffer_grow(b, 16)
size_buf := _dynamic_unwritten(b.buf)
size := strconv.append_int(size_buf, plen, 16)
_dynamic_add_len(&b.buf, len(size))
ws(b, "\r\n")
bytes.buffer_write(b, chunk)
ws(b, "\r\n")
}
rw := (^Response_Writer)(stream_data)
b := &rw.r._buf
#partial switch mode {
case .Flush:
assert(!rw.ended)
write_chunk(b, rw.buf[:])
clear(&rw.buf)
return 0, nil
case .Destroy:
assert(!rw.ended)
// Write what is left.
write_chunk(b, rw.buf[:])
// Signals the end of the body.
ws(b, "0\r\n\r\n")
rw.ended = true
return 0, nil
case .Close:
// Write what is left.
write_chunk(b, rw.buf[:])
if !rw.ended {
// Signals the end of the body.
ws(b, "0\r\n\r\n")
rw.ended = true
}
// Send the response.
respond(rw.r)
return 0, nil
case .Write:
assert(!rw.ended)
// No space, first write rw.buf, then check again for space, if still no space,
// fully write the given p.
if len(rw.buf) + len(p) > cap(rw.buf) {
write_chunk(b, rw.buf[:])
clear(&rw.buf)
if len(p) > cap(rw.buf) {
write_chunk(b, p)
} else {
append(&rw.buf, ..p)
}
} else {
// Space, append bytes to the buffer.
append(&rw.buf, ..p)
}
return i64(len(p)), .None
case .Query:
return io.query_utility({.Write, .Flush, .Destroy, .Close})
}
return 0, .Empty
},
data = rw,
}
return rw.w
}
/*
Writes the response status and headers to the buffer.
This is automatically called before writing anything to the Response.body or before calling a procedure
that sends the response.
You can pass `content_length < 0` to omit the content-length header, note that this header is
required on most responses, but there are things like transfer-encodings that could leave it out.
*/
_response_write_heading :: proc(r: ^Response, content_length: int) {
if r._heading_written do return
r._heading_written = true
ws :: bytes.buffer_write_string
conn := r._conn
b := &r._buf
MIN :: len("HTTP/1.1 200 \r\ndate: \r\ncontent-length: 1000\r\n") + DATE_LENGTH
AVG_HEADER_SIZE :: 20
reserve_size := MIN + content_length + (AVG_HEADER_SIZE * headers_count(r.headers))
bytes.buffer_grow(&r._buf, reserve_size)
// According to RFC 7230 3.1.2 the reason phrase is insignificant,
// because not doing so (and the fact that a status code is always length 3), we can change
// the status code when we are already writing a body by just addressing the 3 bytes directly.
status_int_str := status_string(r.status)
if len(status_int_str) < 4 {
status_int_str = "500 "
} else {
status_int_str = status_int_str[0:4]
}
ws(b, "HTTP/1.1 ")
ws(b, status_int_str)
ws(b, "\r\n")
// Per RFC 9910 6.6.1 a Date header must be added in 2xx, 3xx, 4xx responses.
if r.status >= .OK && r.status <= .Internal_Server_Error && !headers_has_unsafe(r.headers, "date") {
ws(b, "date: ")
ws(b, server_date(conn.server))
ws(b, "\r\n")
}
if (
content_length > -1 &&
!headers_has_unsafe(r.headers, "content-length") &&
response_needs_content_length(r, conn) \
) {
if content_length == 0 {
ws(b, "content-length: 0\r\n")
} else {
ws(b, "content-length: ")
assert(content_length < 1000000000000000000 && content_length > -1000000000000000000)
buf: [20]byte
ws(b, strconv.itoa(buf[:], content_length))
ws(b, "\r\n")
}
}
bstream := bytes.buffer_to_stream(b)
for header, value in r.headers._kv {
ws(b, header) // already has newlines escaped.
ws(b, ": ")
write_escaped_newlines(bstream, value)
ws(b, "\r\n")
}
for cookie in r.cookies {
cookie_write(bstream, cookie)
ws(b, "\r\n")
}
// Empty line denotes end of headers and start of body.
ws(b, "\r\n")
}
// Sends the response over the connection.
// Frees the allocator (should be a request scoped allocator).
// Closes the connection or starts the handling of the next request.
@(private)
response_send :: proc(r: ^Response, conn: ^Connection, loc := #caller_location) {
assert(!r.sent, "response has already been sent", loc)
r.sent = true
check_body :: proc(res: rawptr, body: Body, err: Body_Error) {
res := cast(^Response)res
will_close: bool
if err != nil {
// Any read error should close the connection.
response_status(res, body_error_status(err))
headers_set_close(&res.headers)
will_close = true
}
response_send_got_body(res, will_close)
}
// RFC 7230 6.3: A server MUST read
// the entire request message body or close the connection after sending
// its response, since otherwise the remaining data on a persistent
// connection would be misinterpreted as the next request.
if !response_must_close(&conn.loop.req, r) {
// Body has been drained during handling.
if _, got_body := conn.loop.req._body_ok.?; got_body {
response_send_got_body(r, false)
} else {
body(&conn.loop.req, Max_Post_Handler_Discard_Bytes, r, check_body)
}
} else {
response_send_got_body(r, true)
}
}
@(private)
response_send_got_body :: proc(r: ^Response, will_close: bool) {
conn := r._conn
if will_close {
if !connection_set_state(r._conn, .Will_Close) do return
}
if bytes.buffer_length(&r._buf) == 0 {
_response_write_heading(r, 0)
}
buf := bytes.buffer_to_bytes(&r._buf)
nbio.send_all(&td.io, conn.socket, buf, conn, on_response_sent)
}
@(private)
on_response_sent :: proc(conn_: rawptr, sent: int, err: net.Network_Error) {
conn := cast(^Connection)conn_
if err != nil {
log.errorf("could not send response: %v", err)
if !connection_set_state(conn, .Will_Close) do return
}
clean_request_loop(conn)
}
// Response has been sent, clean up and close/handle next.
@(private)
clean_request_loop :: proc(conn: ^Connection, close: Maybe(bool) = nil) {
// blocks, size, used := allocator_free_all(&conn.temp_allocator)
// log.debugf("temp_allocator had %d blocks of a total size of %m of which %m was used", blocks, size, used)
free_all(context.temp_allocator)
scanner_reset(&conn.scanner)
conn.loop.req = {}
conn.loop.res = {}
if c, ok := close.?; (ok && c) || conn.state == .Will_Close {
connection_close(conn)
} else {
if !connection_set_state(conn, .Idle) do return
conn_handle_req(conn, context.temp_allocator)
}
}
// A server MUST NOT send a Content-Length header field in any response
// with a status code of 1xx (Informational) or 204 (No Content). A
// server MUST NOT send a Content-Length header field in any 2xx
// (Successful) response to a CONNECT request.
@(private)
response_needs_content_length :: proc(r: ^Response, conn: ^Connection) -> bool {
if status_is_informational(r.status) || r.status == .No_Content {
return false
}
line := conn.loop.req.line.?
if status_is_success(r.status) && line.method == .Connect {
return false
}
return true
}
// Determines if the connection needs to be closed after sending the response.
@(private)
response_must_close :: proc(req: ^Request, res: ^Response) -> bool {
// If the request we are responding to indicates it is closing the connection, close our side too.
if req, req_has := headers_get_unsafe(req.headers, "connection"); req_has && req == "close" {
return true
}
// If we are responding with a close connection header, make sure we close.
if res, res_has := headers_get_unsafe(res.headers, "connection"); res_has && res == "close" {
return true
}
// If the body was tried to be received, but failed, close.
if body_ok, got_body := req._body_ok.?; got_body && !body_ok {
headers_set_close(&res.headers)
return true
}
// If the connection's state indicates closing, close.
if res._conn.state >= .Will_Close {
headers_set_close(&res.headers)
return true
}
// HTTP 1.0 does not have persistent connections.
line := req.line.?
if line.version == {1, 0} {
return true
}
return false
}

196
odin-http/responses.odin Normal file
View file

@ -0,0 +1,196 @@
package http
import "core:bytes"
import "core:encoding/json"
import "core:io"
import "core:log"
import "core:os"
import "core:path/filepath"
import "core:strings"
import "nbio"
// Sets the response to one that sends the given HTML.
respond_html :: proc(r: ^Response, html: string, status: Status = .OK, loc := #caller_location) {
r.status = status
headers_set_content_type(&r.headers, mime_to_content_type(Mime_Type.Html))
body_set(r, html, loc)
respond(r, loc)
}
// Sets the response to one that sends the given plain text.
respond_plain :: proc(r: ^Response, text: string, status: Status = .OK, loc := #caller_location) {
r.status = status
headers_set_content_type(&r.headers, mime_to_content_type(Mime_Type.Plain))
body_set(r, text, loc)
respond(r, loc)
}
@(private)
ENOENT :: os.ERROR_FILE_NOT_FOUND when ODIN_OS == .Windows else os.ENOENT
/*
Sends the content of the file at the given path as the response.
This procedure uses non blocking IO and only allocates the size of the file in the body's buffer,
no other allocations or temporary buffers, this is to make it as fast as possible.
The content type is taken from the path, optionally overwritten using the parameter.
If the file doesn't exist, a 404 response is sent.
If any other error occurs, a 500 is sent and the error is logged.
*/
respond_file :: proc(r: ^Response, path: string, content_type: Maybe(Mime_Type) = nil, loc := #caller_location) {
// PERF: we are still putting the content into the body buffer, we could stream it.
assert_has_td(loc)
assert(!r.sent, "response has already been sent", loc)
io := &td.io
handle, errno := nbio.open(io, path)
if errno != os.ERROR_NONE {
if errno == ENOENT {
log.debugf("respond_file, open %q, no such file or directory", path)
} else {
log.warnf("respond_file, open %q error: %i", path, errno)
}
respond(r, Status.Not_Found)
return
}
size, err := nbio.seek(io, handle, 0, .End)
if err != os.ERROR_NONE {
log.errorf("Could not seek the file size of file at %q, error number: %i", path, err)
respond(r, Status.Internal_Server_Error)
nbio.close(io, handle)
return
}
mime := mime_from_extension(path)
content_type := mime_to_content_type(mime)
headers_set_content_type(&r.headers, content_type)
_response_write_heading(r, size)
bytes.buffer_grow(&r._buf, size)
buf := _dynamic_unwritten(r._buf.buf)[:size]
on_read :: proc(user: rawptr, read: int, err: os.Errno) {
r := cast(^Response)user
handle := os.Handle(uintptr(context.user_ptr))
_dynamic_add_len(&r._buf.buf, read)
if err != os.ERROR_NONE {
log.errorf("Reading file from respond_file failed, error number: %i", err)
respond(r, Status.Internal_Server_Error)
nbio.close(&td.io, handle)
return
}
respond(r, Status.OK)
nbio.close(&td.io, handle)
}
// Using the context.user_ptr to point to the file handle.
context.user_ptr = rawptr(uintptr(handle))
nbio.read_at_all(io, handle, 0, buf, r, on_read)
}
/*
Responds with the given content, determining content type from the given path.
This is very useful when you want to `#load(path)` at compile time and respond with that.
*/
respond_file_content :: proc(r: ^Response, path: string, content: []byte, status: Status = .OK, loc := #caller_location) {
mime := mime_from_extension(path)
content_type := mime_to_content_type(mime)
r.status = status
headers_set_content_type(&r.headers, content_type)
body_set(r, content, loc)
respond(r, loc)
}
/*
Sets the response to one that, based on the request path, returns a file.
base: The base of the request path that should be removed when retrieving the file.
target: The path to the directory to serve.
request: The request path.
Path traversal is detected and cleaned up.
The Content-Type is set based on the file extension, see the MimeType enum for known file extensions.
*/
respond_dir :: proc(r: ^Response, base, target, request: string, loc := #caller_location) {
if !strings.has_prefix(request, base) {
respond(r, Status.Not_Found)
return
}
// Detect path traversal attacks.
req_clean := filepath.clean(request, context.temp_allocator)
base_clean := filepath.clean(base, context.temp_allocator)
if !strings.has_prefix(req_clean, base_clean) {
respond(r, Status.Not_Found)
return
}
file_path := filepath.join([]string{"./", target, strings.trim_prefix(req_clean, base_clean)}, context.temp_allocator)
respond_file(r, file_path, loc = loc)
}
// Sets the response to one that returns the JSON representation of the given value.
respond_json :: proc(r: ^Response, v: any, status: Status = .OK, opt: json.Marshal_Options = {}, loc := #caller_location) -> (err: json.Marshal_Error) {
opt := opt
r.status = status
headers_set_content_type(&r.headers, mime_to_content_type(Mime_Type.Json))
// Going to write a MINIMUM of 128 bytes at a time.
rw: Response_Writer
buf: [128]byte
response_writer_init(&rw, r, buf[:])
// Ends the body and sends the response.
defer io.close(rw.w)
if err = json.marshal_to_writer(rw.w, v, &opt); err != nil {
headers_set_close(&r.headers)
response_status(r, .Internal_Server_Error)
}
return
}
/*
Prefer the procedure group `respond`.
*/
respond_with_none :: proc(r: ^Response, loc := #caller_location) {
assert_has_td(loc)
conn := r._conn
req := conn.loop.req
// Respond as head request if we set it to get.
if rline, ok := req.line.(Requestline); ok && req.is_head && conn.server.opts.redirect_head_to_get {
rline.method = .Head
}
response_send(r, conn, loc)
}
/*
Prefer the procedure group `respond`.
*/
respond_with_status :: proc(r: ^Response, status: Status, loc := #caller_location) {
response_status(r, status)
respond(r, loc)
}
// Sends the response back to the client, handlers should call this.
respond :: proc {
respond_with_none,
respond_with_status,
}

295
odin-http/routing.odin Normal file
View file

@ -0,0 +1,295 @@
package http
import "base:runtime"
import "core:log"
import "core:net"
import "core:strconv"
import "core:strings"
import "core:text/match"
URL :: struct {
raw: string, // All other fields are views/slices into this string.
scheme: string,
host: string,
path: string,
query: string,
}
url_parse :: proc(raw: string) -> (url: URL) {
url.raw = raw
s := raw
i := strings.index(s, "://")
if i >= 0 {
url.scheme = s[:i]
s = s[i+3:]
}
i = strings.index(s, "?")
if i != -1 {
url.query = s[i+1:]
s = s[:i]
}
i = strings.index(s, "/")
if i == -1 {
url.host = s
} else {
url.host = s[:i]
url.path = s[i:]
}
return
}
Query_Entry :: struct {
key, value: string,
}
query_iter :: proc(query: ^string) -> (entry: Query_Entry, ok: bool) {
if len(query) == 0 do return
ok = true
i := strings.index(query^, "=")
if i < 0 {
entry.key = query^
query^ = ""
return
}
entry.key = query[:i]
query^ = query[i+1:]
i = strings.index(query^, "&")
if i < 0 {
entry.value = query^
query^ = ""
return
}
entry.value = query[:i]
query^ = query[i+1:]
return
}
query_get :: proc(url: URL, key: string) -> (val: string, ok: bool) #optional_ok {
q := url.query
for entry in #force_inline query_iter(&q) {
if entry.key == key {
return entry.value, true
}
}
return
}
query_get_percent_decoded :: proc(url: URL, key: string, allocator := context.temp_allocator) -> (val: string, ok: bool) {
str := query_get(url, key) or_return
return net.percent_decode(str, allocator)
}
query_get_bool :: proc(url: URL, key: string) -> (result, set: bool) #optional_ok {
str := query_get(url, key) or_return
set = true
switch str {
case "", "false", "0", "no":
case:
result = true
}
return
}
query_get_int :: proc(url: URL, key: string, base := 0) -> (result: int, ok: bool, set: bool) {
str := query_get(url, key) or_return
set = true
result, ok = strconv.parse_int(str, base)
return
}
query_get_uint :: proc(url: URL, key: string, base := 0) -> (result: uint, ok: bool, set: bool) {
str := query_get(url, key) or_return
set = true
result, ok = strconv.parse_uint(str, base)
return
}
Route :: struct {
handler: Handler,
pattern: string,
}
Router :: struct {
allocator: runtime.Allocator,
routes: map[Method][dynamic]Route,
all: [dynamic]Route,
}
router_init :: proc(router: ^Router, allocator := context.allocator) {
router.allocator = allocator
router.routes = make(map[Method][dynamic]Route, len(Method), allocator)
}
router_destroy :: proc(router: ^Router) {
context.allocator = router.allocator
for route in router.all {
delete(route.pattern)
}
delete(router.all)
for _, routes in router.routes {
for route in routes {
delete(route.pattern)
}
delete(routes)
}
delete(router.routes)
}
// Returns a handler that matches against the given routes.
router_handler :: proc(router: ^Router) -> Handler {
h: Handler
h.user_data = router
h.handle = proc(handler: ^Handler, req: ^Request, res: ^Response) {
router := (^Router)(handler.user_data)
rline := req.line.(Requestline)
if routes_try(router.routes[rline.method], req, res) {
return
}
if routes_try(router.all, req, res) {
return
}
log.infof("no route matched %s %s", method_string(rline.method), rline.target)
res.status = .Not_Found
respond(res)
}
return h
}
route_get :: proc(router: ^Router, pattern: string, handler: Handler) {
route_add(
router,
.Get,
Route{handler = handler, pattern = strings.concatenate([]string{"^", pattern, "$"}, router.allocator)},
)
}
route_post :: proc(router: ^Router, pattern: string, handler: Handler) {
route_add(
router,
.Post,
Route{handler = handler, pattern = strings.concatenate([]string{"^", pattern, "$"}, router.allocator)},
)
}
// NOTE: this does not get called when `Server_Opts.redirect_head_to_get` is set to true.
route_head :: proc(router: ^Router, pattern: string, handler: Handler) {
route_add(
router,
.Head,
Route{handler = handler, pattern = strings.concatenate([]string{"^", pattern, "$"}, router.allocator)},
)
}
route_put :: proc(router: ^Router, pattern: string, handler: Handler) {
route_add(
router,
.Put,
Route{handler = handler, pattern = strings.concatenate([]string{"^", pattern, "$"}, router.allocator)},
)
}
route_patch :: proc(router: ^Router, pattern: string, handler: Handler) {
route_add(
router,
.Patch,
Route{handler = handler, pattern = strings.concatenate([]string{"^", pattern, "$"}, router.allocator)},
)
}
route_trace :: proc(router: ^Router, pattern: string, handler: Handler) {
route_add(
router,
.Trace,
Route{handler = handler, pattern = strings.concatenate([]string{"^", pattern, "$"}, router.allocator)},
)
}
route_delete :: proc(router: ^Router, pattern: string, handler: Handler) {
route_add(
router,
.Delete,
Route{handler = handler, pattern = strings.concatenate([]string{"^", pattern, "$"}, router.allocator)},
)
}
route_connect :: proc(router: ^Router, pattern: string, handler: Handler) {
route_add(
router,
.Connect,
Route{handler = handler, pattern = strings.concatenate([]string{"^", pattern, "$"}, router.allocator)},
)
}
route_options :: proc(router: ^Router, pattern: string, handler: Handler) {
route_add(
router,
.Options,
Route{handler = handler, pattern = strings.concatenate([]string{"^", pattern, "$"}, router.allocator)},
)
}
// Adds a catch-all fallback route (all methods, ran if no other routes match).
route_all :: proc(router: ^Router, pattern: string, handler: Handler) {
if router.all == nil {
router.all = make([dynamic]Route, 0, 1, router.allocator)
}
append(
&router.all,
Route{handler = handler, pattern = strings.concatenate([]string{"^", pattern, "$"}, router.allocator)},
)
}
@(private)
route_add :: proc(router: ^Router, method: Method, route: Route) {
if method not_in router.routes {
router.routes[method] = make([dynamic]Route, router.allocator)
}
append(&router.routes[method], route)
}
@(private)
routes_try :: proc(routes: [dynamic]Route, req: ^Request, res: ^Response) -> bool {
try_captures: [match.MAX_CAPTURES]match.Match = ---
for route in routes {
n, err := match.find_aux(req.url.path, route.pattern, 0, true, &try_captures)
if err != .OK {
log.errorf("Error matching route: %v", err)
continue
}
if n > 0 {
captures := make([]string, n - 1, context.temp_allocator)
for cap, i in try_captures[1:n] {
captures[i] = req.url.path[cap.byte_start:cap.byte_end]
}
req.url_params = captures
rh := route.handler
rh.handle(&rh, req, res)
return true
}
}
return false
}

244
odin-http/scanner.odin Normal file
View file

@ -0,0 +1,244 @@
#+private
package http
import "base:intrinsics"
import "core:bufio"
import "core:net"
import "nbio"
Scan_Callback :: #type proc(user_data: rawptr, token: string, err: bufio.Scanner_Error)
Split_Proc :: #type proc(split_data: rawptr, data: []byte, at_eof: bool) -> (advance: int, token: []byte, err: bufio.Scanner_Error, final_token: bool)
scan_lines :: proc(split_data: rawptr, data: []byte, at_eof: bool) -> (advance: int, token: []byte, err: bufio.Scanner_Error, final_token: bool) {
return bufio.scan_lines(data, at_eof)
}
scan_num_bytes :: proc(split_data: rawptr, data: []byte, at_eof: bool) -> (advance: int, token: []byte, err: bufio.Scanner_Error, final_token: bool) {
assert(split_data != nil)
n := int(uintptr(split_data))
assert(n >= 0)
if at_eof && len(data) < n {
return
}
if len(data) < n {
return
}
return n, data[:n], nil, false
}
// A callback based scanner over the connection based on nbio.
Scanner :: struct #no_copy {
connection: ^Connection,
split: Split_Proc,
split_data: rawptr,
buf: [dynamic]byte,
max_token_size: int,
start: int,
end: int,
token: []byte,
_err: bufio.Scanner_Error,
consecutive_empty_reads: int,
max_consecutive_empty_reads: int,
successive_empty_token_count: int,
done: bool,
could_be_too_short: bool,
user_data: rawptr,
callback: Scan_Callback,
}
INIT_BUF_SIZE :: 1024
DEFAULT_MAX_CONSECUTIVE_EMPTY_READS :: 128
scanner_init :: proc(s: ^Scanner, c: ^Connection, buf_allocator := context.allocator) {
s.connection = c
s.split = scan_lines
s.max_token_size = bufio.DEFAULT_MAX_SCAN_TOKEN_SIZE
s.buf.allocator = buf_allocator
}
scanner_destroy :: proc(s: ^Scanner) {
delete(s.buf)
}
scanner_reset :: proc(s: ^Scanner) {
remove_range(&s.buf, 0, s.start)
s.end -= s.start
s.start = 0
s.split = scan_lines
s.split_data = nil
s.max_token_size = bufio.DEFAULT_MAX_SCAN_TOKEN_SIZE
s.token = nil
s._err = nil
s.consecutive_empty_reads = 0
s.max_consecutive_empty_reads = DEFAULT_MAX_CONSECUTIVE_EMPTY_READS
s.successive_empty_token_count = 0
s.done = false
s.could_be_too_short = false
s.user_data = nil
s.callback = nil
}
scanner_scan :: proc(
s: ^Scanner,
user_data: rawptr,
callback: proc(user_data: rawptr, token: string, err: bufio.Scanner_Error),
) {
set_err :: proc(s: ^Scanner, err: bufio.Scanner_Error) {
switch s._err {
case nil, .EOF:
s._err = err
}
}
if s.done {
callback(user_data, "", .EOF)
return
}
// Check if a token is possible with what is available
// Allow the split procedure to recover if it fails
if s.start < s.end || s._err != nil {
advance, token, err, final_token := s.split(s.split_data, s.buf[s.start:s.end], s._err != nil)
if final_token {
s.token = token
s.done = true
callback(user_data, "", .EOF)
return
}
if err != nil {
set_err(s, err)
callback(user_data, "", s._err)
return
}
// Do advance
if advance < 0 {
set_err(s, .Negative_Advance)
callback(user_data, "", s._err)
return
}
if advance > s.end - s.start {
set_err(s, .Advanced_Too_Far)
callback(user_data, "", s._err)
return
}
s.start += advance
s.token = token
if s.token != nil {
if s._err == nil || advance > 0 {
s.successive_empty_token_count = 0
} else {
s.successive_empty_token_count += 1
if s.successive_empty_token_count > s.max_consecutive_empty_reads {
set_err(s, .No_Progress)
callback(user_data, "", s._err)
return
}
}
s.consecutive_empty_reads = 0
s.callback = nil
s.user_data = nil
callback(user_data, string(token), s._err)
return
}
}
// If an error is hit, no token can be created
if s._err != nil {
s.start = 0
s.end = 0
callback(user_data, "", s._err)
return
}
could_be_too_short := false
// Resize the buffer if full
if s.end == len(s.buf) {
if s.max_token_size <= 0 {
s.max_token_size = bufio.DEFAULT_MAX_SCAN_TOKEN_SIZE
}
if s.end - s.start >= s.max_token_size {
set_err(s, .Too_Long)
callback(user_data, "", s._err)
return
}
// TODO: write over the part of the buffer already used
// overflow check
new_size := INIT_BUF_SIZE
if len(s.buf) > 0 {
overflowed: bool
if new_size, overflowed = intrinsics.overflow_mul(len(s.buf), 2); overflowed {
set_err(s, .Too_Long)
callback(user_data, "", s._err)
return
}
}
old_size := len(s.buf)
resize(&s.buf, new_size)
could_be_too_short = old_size >= len(s.buf)
}
// Read data into the buffer
s.consecutive_empty_reads += 1
s.user_data = user_data
s.callback = callback
s.could_be_too_short = could_be_too_short
assert_has_td()
// TODO: some kinda timeout on this.
nbio.recv(&td.io, s.connection.socket, s.buf[s.end:len(s.buf)], s, scanner_on_read)
}
scanner_on_read :: proc(s: rawptr, n: int, _: Maybe(net.Endpoint), e: net.Network_Error) {
s := (^Scanner)(s)
defer scanner_scan(s, s.user_data, s.callback)
if e != nil {
#partial switch ee in e {
case net.TCP_Recv_Error:
#partial switch ee {
case .Connection_Closed, net.TCP_Recv_Error(9):
// 9 for EBADF (bad file descriptor) happens when OS closes socket.
s._err = .EOF
return
}
}
s._err = .Unknown
return
}
// When n == 0, connection is closed or buffer is of length 0.
if n == 0 {
s._err = .EOF
return
}
if n < 0 || len(s.buf) - s.end < n {
s._err = .Bad_Read_Count
return
}
s.end += n
if n > 0 {
s.successive_empty_token_count = 0
return
}
}

656
odin-http/server.odin Normal file
View file

@ -0,0 +1,656 @@
package http
import "base:runtime"
import "core:bufio"
import "core:bytes"
import "core:c/libc"
import "core:fmt"
import "core:log"
import "core:mem"
import "core:mem/virtual"
import "core:net"
import "core:os"
import "core:slice"
import "core:sync"
import "core:thread"
import "core:time"
import "nbio"
Server_Opts :: struct {
// Whether the server should accept every request that sends a "Expect: 100-continue" header automatically.
// Defaults to true.
auto_expect_continue: bool,
// When this is true, any HEAD request is automatically redirected to the handler as a GET request.
// Then, when the response is sent, the body is removed from the response.
// Defaults to true.
redirect_head_to_get: bool,
// Limit the maximum number of bytes to read for the request line (first line of request containing the URI).
// The HTTP spec does not specify any limits but in practice it is safer.
// RFC 7230 3.1.1 says:
// Various ad hoc limitations on request-line length are found in
// practice. It is RECOMMENDED that all HTTP senders and recipients
// support, at a minimum, request-line lengths of 8000 octets.
// defaults to 8000.
limit_request_line: int,
// Limit the length of the headers.
// The HTTP spec does not specify any limits but in practice it is safer.
// defaults to 8000.
limit_headers: int,
// The thread count to use, defaults to your core count - 1.
thread_count: int,
// // The initial size of the temp_allocator for each connection, defaults to 256KiB and doubles
// // each time it needs to grow.
// // NOTE: this value is assigned globally, running multiple servers with a different value will
// // not work.
// initial_temp_block_cap: uint,
// // The amount of free blocks each thread is allowed to hold on to before deallocating excess.
// // Defaults to 64.
// max_free_blocks_queued: uint,
}
Default_Server_Opts := Server_Opts {
auto_expect_continue = true,
redirect_head_to_get = true,
limit_request_line = 8000,
limit_headers = 8000,
// initial_temp_block_cap = 256 * mem.Kilobyte,
// max_free_blocks_queued = 64,
}
@(init, private)
server_opts_init :: proc() {
when ODIN_OS == .Linux || ODIN_OS == .Darwin {
Default_Server_Opts.thread_count = os.processor_core_count()
} else {
Default_Server_Opts.thread_count = 1
}
}
Server_State :: enum {
Uninitialized,
Idle,
Listening,
Serving,
Running,
Closing,
Cleaning,
Closed,
}
Server :: struct {
opts: Server_Opts,
tcp_sock: net.TCP_Socket,
conn_allocator: mem.Allocator,
handler: Handler,
main_thread: int,
threads: []^thread.Thread,
// Once the server starts closing/shutdown this is set to true, all threads will check it
// and start their thread local shutdown procedure.
//
// NOTE: This is only ever set from false to true, and checked repeatedly,
// so it doesn't have to be atomic, this is purely to keep the thread sanitizer happy.
closing: Atomic(bool),
// Threads will decrement the wait group when they have fully closed/shutdown.
// The main thread waits on this to clean up global data and return.
threads_closed: sync.Wait_Group,
// Updated every second with an updated date, this speeds up the server considerably
// because it would otherwise need to call time.now() and format the date on each response.
date: Server_Date,
}
Server_Thread :: struct {
conns: map[net.TCP_Socket]^Connection,
state: Server_State,
io: nbio.IO,
// free_temp_blocks: map[int]queue.Queue(^Block),
// free_temp_blocks_count: int,
}
@(private, disabled = ODIN_DISABLE_ASSERT)
assert_has_td :: #force_inline proc(loc := #caller_location) {
assert(td.state != .Uninitialized, "The thread you are calling from is not a server/handler thread", loc)
}
@(thread_local)
td: Server_Thread
Default_Endpoint := net.Endpoint {
address = net.IP4_Any,
port = 8080,
}
listen :: proc(
s: ^Server,
endpoint: net.Endpoint = Default_Endpoint,
opts: Server_Opts = Default_Server_Opts,
) -> (
err: net.Network_Error,
) {
s.opts = opts
s.conn_allocator = context.allocator
s.main_thread = sync.current_thread_id()
// initial_block_cap = int(s.opts.initial_temp_block_cap)
// max_free_blocks_queued = int(s.opts.max_free_blocks_queued)
errno := nbio.init(&td.io)
// TODO: error handling.
assert(errno == os.ERROR_NONE)
s.tcp_sock, err = nbio.open_and_listen_tcp(&td.io, endpoint)
if err != nil {server_shutdown(s)}
return
}
serve :: proc(s: ^Server, h: Handler) -> (err: net.Network_Error) {
s.handler = h
thread_count := max(0, s.opts.thread_count - 1)
sync.wait_group_add(&s.threads_closed, thread_count)
s.threads = make([]^thread.Thread, thread_count, s.conn_allocator)
for i in 0 ..< thread_count {
s.threads[i] = thread.create_and_start_with_poly_data(s, _server_thread_init, context)
}
// Start keeping track of and caching the date for the required date header.
server_date_start(s)
sync.wait_group_add(&s.threads_closed, 1)
_server_thread_init(s)
sync.wait(&s.threads_closed)
log.debug("server threads are done, shutting down")
net.close(s.tcp_sock)
for t in s.threads do thread.destroy(t)
delete(s.threads)
return nil
}
listen_and_serve :: proc(
s: ^Server,
h: Handler,
endpoint: net.Endpoint = Default_Endpoint,
opts: Server_Opts = Default_Server_Opts,
) -> (
err: net.Network_Error,
) {
listen(s, endpoint, opts) or_return
return serve(s, h)
}
_server_thread_init :: proc(s: ^Server) {
td.conns = make(map[net.TCP_Socket]^Connection)
// td.free_temp_blocks = make(map[int]queue.Queue(^Block))
if sync.current_thread_id() != s.main_thread {
errno := nbio.init(&td.io)
// TODO: error handling.
assert(errno == os.ERROR_NONE)
}
log.debug("accepting connections")
nbio.accept(&td.io, s.tcp_sock, s, on_accept)
log.debug("starting event loop")
td.state = .Serving
for {
if atomic_load(&s.closing) do _server_thread_shutdown(s)
if td.state == .Closed do break
if td.state == .Cleaning do continue
errno := nbio.tick(&td.io)
if errno != os.ERROR_NONE {
// TODO: check how this behaves on Windows.
when ODIN_OS != .Windows do if errno == os.EINTR {
server_shutdown(s)
continue
}
log.errorf("non-blocking io tick error: %v", errno)
break
}
}
log.debug("event loop end")
sync.wait_group_done(&s.threads_closed)
}
// The time between checks and closes of connections in a graceful shutdown.
@(private)
SHUTDOWN_INTERVAL :: time.Millisecond * 100
// Starts a graceful shutdown.
//
// Some error logs will be generated but all active connections are finished
// before closing them and all connections and threads are freed.
//
// 1. Stops 'server_start' from accepting new connections.
// 2. Close and free non-active connections.
// 3. Repeat 2 every SHUTDOWN_INTERVAL until no more connections are open.
// 4. Close the main socket.
// 5. Signal 'server_start' it can return.
server_shutdown :: proc(s: ^Server) {
atomic_store(&s.closing, true)
}
_server_thread_shutdown :: proc(s: ^Server, loc := #caller_location) {
assert_has_td(loc)
td.state = .Closing
defer delete(td.conns)
// defer {
// blocks: int
// for _, &bucket in td.free_temp_blocks {
// for block in queue.pop_front_safe(&bucket) {
// blocks += 1
// free(block)
// }
// queue.destroy(&bucket)
// }
// delete(td.free_temp_blocks)
// log.infof("had %i temp blocks to spare", blocks)
// }
for i := 0;; i += 1 {
for sock, conn in td.conns {
#partial switch conn.state {
case .Active:
log.infof("shutdown: connection %i still active", sock)
case .New, .Idle, .Pending:
log.infof("shutdown: closing connection %i", sock)
connection_close(conn)
case .Closing:
// Only logging this every 10_000 calls to avoid spam.
if i % 10_000 == 0 do log.debugf("shutdown: connection %i is closing", sock)
case .Closed:
log.warn("closed connection in connections map, maybe a race or logic error")
}
}
if len(td.conns) == 0 {
break
}
err := nbio.tick(&td.io)
fmt.assertf(err == os.ERROR_NONE, "IO tick error during shutdown: %v")
}
td.state = .Cleaning
nbio.destroy(&td.io)
td.state = .Closed
log.info("shutdown: done")
}
@(private)
on_interrupt_server: ^Server
@(private)
on_interrupt_context: runtime.Context
// Registers a signal handler to shutdown the server gracefully on interrupt signal.
// Can only be called once in the lifetime of the program because of a hacky interaction with libc.
server_shutdown_on_interrupt :: proc(s: ^Server) {
on_interrupt_server = s
on_interrupt_context = context
libc.signal(
libc.SIGINT,
proc "cdecl" (_: i32) {
context = on_interrupt_context
// Force close on second signal.
if td.state == .Closing {
os.exit(1)
}
server_shutdown(on_interrupt_server)
},
)
}
// Taken from Go's implementation,
// The maximum amount of bytes we will read (if handler did not)
// in order to get the connection ready for the next request.
@(private)
Max_Post_Handler_Discard_Bytes :: 256 << 10
// How long to wait before actually closing a connection.
// This is to make sure the client can fully receive the response.
@(private)
Conn_Close_Delay :: time.Millisecond * 500
Connection_State :: enum {
Pending, // Pending a client to attach.
New, // Got client, waiting to service first request.
Active, // Servicing request.
Idle, // Waiting for next request.
Will_Close, // Closing after the current response is sent.
Closing, // Going to close, cleaning up.
Closed, // Fully closed.
}
@(private)
connection_set_state :: proc(c: ^Connection, s: Connection_State) -> bool {
if s < .Closing && c.state >= .Closing {
return false
}
if s == .Closing && c.state == .Closed {
return false
}
c.state = s
return true
}
// TODO/PERF: pool the connections, saves having to allocate scanner buf and temp_allocator every time.
Connection :: struct {
server: ^Server,
socket: net.TCP_Socket,
state: Connection_State,
scanner: Scanner,
temp_allocator: virtual.Arena,
loop: Loop,
}
// Loop/request cycle state.
@(private)
Loop :: struct {
conn: ^Connection,
req: Request,
res: Response,
}
@(private)
connection_close :: proc(c: ^Connection, loc := #caller_location) {
assert_has_td(loc)
if c.state >= .Closing {
log.infof("connection %i already closing/closed", c.socket)
return
}
log.debugf("closing connection: %i", c.socket)
c.state = .Closing
// RFC 7230 6.6.
// Close read side of the connection, then wait a little bit, allowing the client
// to process the closing and receive any remaining data.
net.shutdown(c.socket, net.Shutdown_Manner.Send)
scanner_destroy(&c.scanner)
nbio.timeout(
&td.io,
Conn_Close_Delay,
c,
proc(c: rawptr) {
c := cast(^Connection)c
nbio.close(
&td.io,
c.socket,
c,
proc(c: rawptr, ok: bool) {
c := cast(^Connection)c
log.debugf("closed connection: %i", c.socket)
c.state = .Closed
// allocator_destroy(&c.temp_allocator)
virtual.arena_destroy(&c.temp_allocator)
delete_key(&td.conns, c.socket)
free(c, c.server.conn_allocator)
},
)
},
)
}
@(private)
on_accept :: proc(server: rawptr, sock: net.TCP_Socket, source: net.Endpoint, err: net.Network_Error) {
server := cast(^Server)server
if err != nil {
#partial switch e in err {
case net.Accept_Error:
#partial switch e {
case .No_Socket_Descriptors_Available_For_Client_Socket:
log.error("Connection limit reached, trying again in a bit")
nbio.timeout(&td.io, time.Second, server, proc(server: rawptr) {
server := cast(^Server)server
nbio.accept(&td.io, server.tcp_sock, server, on_accept)
})
return
}
}
fmt.panicf("accept error: %v", err)
}
// Accept next connection.
nbio.accept(&td.io, server.tcp_sock, server, on_accept)
c := new(Connection, server.conn_allocator)
c.state = .New
c.server = server
c.socket = sock
td.conns[c.socket] = c
log.debugf("new connection with thread, got %d conns", len(td.conns))
conn_handle_reqs(c)
}
@(private)
conn_handle_reqs :: proc(c: ^Connection) {
// TODO/PERF: not sure why this is allocated on the connections allocator, can't it use the arena?
scanner_init(&c.scanner, c, c.server.conn_allocator)
// allocator_init(&c.temp_allocator, c.server.conn_allocator)
// context.temp_allocator = allocator(&c.temp_allocator)
err := virtual.arena_init_growing(&c.temp_allocator)
assert(err == nil)
context.temp_allocator = virtual.arena_allocator(&c.temp_allocator)
conn_handle_req(c, context.temp_allocator)
}
@(private)
conn_handle_req :: proc(c: ^Connection, allocator := context.temp_allocator) {
on_rline1 :: proc(loop: rawptr, token: string, err: bufio.Scanner_Error) {
l := cast(^Loop)loop
if !connection_set_state(l.conn, .Active) do return
if err != nil {
if err == .EOF {
log.debugf("client disconnected (EOF)")
} else {
log.warnf("request scanner error: %v", err)
}
clean_request_loop(l.conn, close = true)
return
}
// In the interest of robustness, a server that is expecting to receive
// and parse a request-line SHOULD ignore at least one empty line (CRLF)
// received prior to the request-line.
if len(token) == 0 {
log.debug("first request line empty, skipping in interest of robustness")
scanner_scan(&l.conn.scanner, loop, on_rline2)
return
}
on_rline2(loop, token, err)
}
on_rline2 :: proc(loop: rawptr, token: string, err: bufio.Scanner_Error) {
l := cast(^Loop)loop
if err != nil {
log.warnf("request scanning error: %v", err)
clean_request_loop(l.conn, close = true)
return
}
rline, err := requestline_parse(token, context.temp_allocator)
switch err {
case .Method_Not_Implemented:
log.infof("request-line %q invalid method", token)
headers_set_close(&l.res.headers)
l.res.status = .Not_Implemented
respond(&l.res)
return
case .Invalid_Version_Format, .Not_Enough_Fields:
log.warnf("request-line %q invalid: %s", token, err)
clean_request_loop(l.conn, close = true)
return
case .None:
l.req.line = rline
}
// Might need to support more versions later.
if rline.version.major != 1 || rline.version.minor > 1 {
log.infof("request http version not supported %v", rline.version)
headers_set_close(&l.res.headers)
l.res.status = .HTTP_Version_Not_Supported
respond(&l.res)
return
}
l.req.url = url_parse(rline.target.(string))
l.conn.scanner.max_token_size = l.conn.server.opts.limit_headers
scanner_scan(&l.conn.scanner, loop, on_header_line)
}
on_header_line :: proc(loop: rawptr, token: string, err: bufio.Scanner_Error) {
l := cast(^Loop)loop
if err != nil {
log.warnf("request scanning error: %v", err)
clean_request_loop(l.conn, close = true)
return
}
// The first empty line denotes the end of the headers section.
if len(token) == 0 {
on_headers_end(l)
return
}
if _, ok := header_parse(&l.req.headers, token); !ok {
log.warnf("header-line %s is invalid", token)
headers_set_close(&l.res.headers)
l.res.status = .Bad_Request
respond(&l.res)
return
}
l.conn.scanner.max_token_size -= len(token)
if l.conn.scanner.max_token_size <= 0 {
log.warn("request headers too large")
headers_set_close(&l.res.headers)
l.res.status = .Request_Header_Fields_Too_Large
respond(&l.res)
return
}
scanner_scan(&l.conn.scanner, loop, on_header_line)
}
on_headers_end :: proc(l: ^Loop) {
if !headers_validate_for_server(&l.req.headers) {
log.warn("request headers are invalid")
headers_set_close(&l.res.headers)
l.res.status = .Bad_Request
respond(&l.res)
return
}
l.req.headers.readonly = true
l.conn.scanner.max_token_size = bufio.DEFAULT_MAX_SCAN_TOKEN_SIZE
// Automatically respond with a continue status when the client has the Expect: 100-continue header.
if expect, ok := headers_get_unsafe(l.req.headers, "expect");
ok && expect == "100-continue" && l.conn.server.opts.auto_expect_continue {
l.res.status = .Continue
respond(&l.res)
return
}
rline := &l.req.line.(Requestline)
// An options request with the "*" is a no-op/ping request to
// check for server capabilities and should not be sent to handlers.
if rline.method == .Options && rline.target.(string) == "*" {
l.res.status = .OK
respond(&l.res)
} else {
// Give the handler this request as a GET, since the HTTP spec
// says a HEAD is identical to a GET but just without writing the body,
// handlers shouldn't have to worry about it.
is_head := rline.method == .Head
if is_head && l.conn.server.opts.redirect_head_to_get {
l.req.is_head = true
rline.method = .Get
}
l.conn.server.handler.handle(&l.conn.server.handler, &l.req, &l.res)
}
}
c.loop.conn = c
c.loop.res._conn = c
c.loop.req._scanner = &c.scanner
request_init(&c.loop.req, allocator)
response_init(&c.loop.res, allocator)
c.scanner.max_token_size = c.server.opts.limit_request_line
scanner_scan(&c.scanner, &c.loop, on_rline1)
}
// A buffer that will contain the date header for the current second.
@(private)
Server_Date :: struct {
buf_backing: [DATE_LENGTH]byte,
buf: bytes.Buffer,
}
@(private)
server_date_start :: proc(s: ^Server) {
s.date.buf.buf = slice.into_dynamic(s.date.buf_backing[:])
server_date_update(s)
}
// Updates the time and schedules itself for after a second.
@(private)
server_date_update :: proc(s: rawptr) {
s := cast(^Server)s
nbio.timeout(&td.io, time.Second, s, server_date_update)
bytes.buffer_reset(&s.date.buf)
date_write(bytes.buffer_to_stream(&s.date.buf), time.now())
}
@(private)
server_date :: proc(s: ^Server) -> string {
return string(s.date.buf_backing[:])
}

150
odin-http/status.odin Normal file
View file

@ -0,0 +1,150 @@
package http
import "core:fmt"
import "core:strings"
Status :: enum {
Continue = 100,
Switching_Protocols = 101,
Processing = 102,
Early_Hints = 103,
OK = 200,
Created = 201,
Accepted = 202,
Non_Authoritative_Information = 203,
No_Content = 204,
Reset_Content = 205,
Partial_Content = 206,
Multi_Status = 207,
Already_Reported = 208,
IM_Used = 226,
Multiple_Choices = 300,
Moved_Permanently = 301,
Found = 302,
See_Other = 303,
Not_Modified = 304,
Use_Proxy = 305, // Deprecated.
Unused = 306, // Deprecated.
Temporary_Redirect = 307,
Permanent_Redirect = 308,
Bad_Request = 400,
Unauthorized = 401,
Payment_Required = 402,
Forbidden = 403,
Not_Found = 404,
Method_Not_Allowed = 405,
Not_Acceptable = 406,
Proxy_Authentication_Required = 407,
Request_Timeout = 408,
Conflict = 409,
Gone = 410,
Length_Required = 411,
Precondition_Failed = 412,
Payload_Too_Large = 413,
URI_Too_Long = 414,
Unsupported_Media_Type = 415,
Range_Not_Satisfiable = 416,
Expectation_Failed = 417,
Im_A_Teapot = 418,
Misdirected_Request = 421,
Unprocessable_Content = 422,
Locked = 423,
Failed_Dependency = 424,
Too_Early = 425,
Upgrade_Required = 426,
Precondition_Required = 428,
Too_Many_Requests = 429,
Request_Header_Fields_Too_Large = 431,
Unavailable_For_Legal_Reasons = 451,
Internal_Server_Error = 500,
Not_Implemented = 501,
Bad_Gateway = 502,
Service_Unavailable = 503,
Gateway_Timeout = 504,
HTTP_Version_Not_Supported = 505,
Variant_Also_Negotiates = 506,
Insufficient_Storage = 507,
Loop_Detected = 508,
Not_Extended = 510,
Network_Authentication_Required = 511,
}
_status_strings: [max(Status) + Status(1)]string
// Populates the status_strings like a map from status to their string representation.
// Where an empty string means an invalid code.
@(init, private)
status_strings_init :: proc() {
for field in Status {
name, ok := fmt.enum_value_to_string(field)
assert(ok)
b: strings.Builder
strings.write_int(&b, int(field))
strings.write_byte(&b, ' ')
// Some edge cases aside, replaces underscores in the enum name with spaces.
#partial switch field {
case .Non_Authoritative_Information: strings.write_string(&b, "Non-Authoritative Information")
case .Multi_Status: strings.write_string(&b, "Multi-Status")
case .Im_A_Teapot: strings.write_string(&b, "I'm a teapot")
case:
for c in name {
switch c {
case '_': strings.write_rune(&b, ' ')
case: strings.write_rune(&b, c)
}
}
}
_status_strings[field] = strings.to_string(b)
}
}
status_string :: proc(s: Status) -> string {
if s >= Status(0) && s <= max(Status) {
return _status_strings[s]
}
return ""
}
status_valid :: proc(s: Status) -> bool {
return status_string(s) != ""
}
status_from_string :: proc(s: string) -> (Status, bool) {
if len(s) < 3 do return {}, false
code_int := int(s[0]-'0')*100 + (int(s[1]-'0')*10) + int(s[2]-'0')
if !status_valid(Status(code_int)) {
return {}, false
}
return Status(code_int), true
}
status_is_informational :: proc(s: Status) -> bool {
return s >= Status(100) && s < Status(200)
}
status_is_success :: proc(s: Status) -> bool {
return s >= Status(200) && s < Status(300)
}
status_is_redirect :: proc(s: Status) -> bool {
return s >= Status(300) && s < Status(400)
}
status_is_client_error :: proc(s: Status) -> bool {
return s >= Status(400) && s < Status(500)
}
status_is_server_error :: proc(s: Status) -> bool {
return s >= Status(500) && s < Status(600)
}