text
stringlengths 32
314k
| url
stringlengths 93
243
|
---|---|
const std = @import("std");
fn fib(x: u64) u64 {
if (x <= 1) return x;
return fib(x - 1) + fib(x - 2);
}
pub fn main() void {
std.debug.warn("{}", fib(47));
}
| https://raw.githubusercontent.com/drujensen/fib/578c15d13690fb36b1b3d8a419c5517c84abcd06/fib.zig |
// MIT License
// Copyright (c) 2019 Vexu
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
const std = @import("std");
const mem = std.mem;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const parseUnsigned = std.fmt.parseUnsigned;
const net = std.net;
const testing = std.testing;
const expect = testing.expect;
const expectEqualStrings = testing.expectEqualStrings;
const ValueMap = std.StringHashMap([]const u8);
pub const Uri = struct {
scheme: []const u8,
username: []const u8,
password: []const u8,
host: Host,
port: ?u16,
path: []const u8,
query: []const u8,
fragment: []const u8,
len: usize,
/// possible uri host values
pub const Host = union(enum) {
ip: net.Address,
name: []const u8,
};
/// map query string into a hashmap of key value pairs with no value being an empty string
pub fn mapQuery(allocator: *Allocator, query: []const u8) Allocator.Error!ValueMap {
if (query.len == 0) {
return ValueMap.init(allocator);
}
var map = ValueMap.init(allocator);
errdefer map.deinit();
var start: usize = 0;
var mid: usize = 0;
for (query) |c, i| {
if (c == '&') {
if (mid != 0) {
_ = try map.put(query[start..mid], query[mid + 1 .. i]);
} else {
_ = try map.put(query[start..i], "");
}
start = i + 1;
mid = 0;
} else if (c == '=') {
mid = i;
}
}
if (mid != 0) {
_ = try map.put(query[start..mid], query[mid + 1 ..]);
} else {
_ = try map.put(query[start..], "");
}
return map;
}
/// possible errors for decode and encode
pub const EncodeError = error{
InvalidCharacter,
OutOfMemory,
};
/// decode path if it is percent encoded
pub fn decode(allocator: *Allocator, path: []const u8) EncodeError!?[]u8 {
var ret: ?[]u8 = null;
errdefer if (ret) |some| allocator.free(some);
var ret_index: usize = 0;
var i: usize = 0;
while (i < path.len) : (i += 1) {
if (path[i] == '%') {
if (!isPchar(path[i..])) {
return error.InvalidCharacter;
}
if (ret == null) {
ret = try allocator.alloc(u8, path.len);
mem.copy(u8, ret.?, path[0..i]);
ret_index = i;
}
// charToDigit can't fail because the chars are validated earlier
var new = (std.fmt.charToDigit(path[i + 1], 16) catch unreachable) << 4;
new |= std.fmt.charToDigit(path[i + 2], 16) catch unreachable;
ret.?[ret_index] = new;
ret_index += 1;
i += 2;
} else if (path[i] != '/' and !isPchar(path[i..])) {
return error.InvalidCharacter;
} else if (ret != null) {
ret.?[ret_index] = path[i];
ret_index += 1;
}
}
if (ret) |some| return allocator.shrink(some, ret_index);
return null;
}
/// percent encode if path contains characters not allowed in paths
pub fn encode(allocator: *Allocator, path: []const u8) EncodeError!?[]u8 {
var ret: ?[]u8 = null;
var ret_index: usize = 0;
for (path) |c, i| {
if (c != '/' and !isPchar(path[i..])) {
if (ret == null) {
ret = try allocator.alloc(u8, path.len * 3);
mem.copy(u8, ret.?, path[0..i]);
ret_index = i;
}
const hex_digits = "0123456789ABCDEF";
ret.?[ret_index] = '%';
ret.?[ret_index + 1] = hex_digits[(c & 0xF0) >> 4];
ret.?[ret_index + 2] = hex_digits[c & 0x0F];
ret_index += 3;
} else if (ret != null) {
ret.?[ret_index] = c;
ret_index += 1;
}
}
if (ret) |some| return allocator.shrink(some, ret_index);
return null;
}
/// resolves `path`, leaves trailing '/'
/// assumes `path` to be valid
pub fn resolvePath(allocator: *Allocator, path: []const u8) error{OutOfMemory}![]u8 {
assert(path.len > 0);
var list = std.ArrayList([]const u8).init(allocator);
defer list.deinit();
var it = mem.tokenize(u8, path, "/");
while (it.next()) |p| {
if (mem.eql(u8, p, ".")) {
continue;
} else if (mem.eql(u8, p, "..")) {
_ = list.popOrNull();
} else {
try list.append(p);
}
}
var buf = try allocator.alloc(u8, path.len);
errdefer allocator.free(buf);
var len: usize = 0;
for (list.items) |s| {
buf[len] = '/';
len += 1;
mem.copy(u8, buf[len..], s);
len += s.len;
}
if (path[path.len - 1] == '/') {
buf[len] = '/';
len += 1;
}
return allocator.shrink(buf, len);
}
pub const scheme_to_port = std.ComptimeStringMap(u16, .{
.{ "acap", 674 },
.{ "afp", 548 },
.{ "dict", 2628 },
.{ "dns", 53 },
.{ "ftp", 21 },
.{ "git", 9418 },
.{ "gopher", 70 },
.{ "http", 80 },
.{ "https", 443 },
.{ "imap", 143 },
.{ "ipp", 631 },
.{ "ipps", 631 },
.{ "irc", 194 },
.{ "ircs", 6697 },
.{ "ldap", 389 },
.{ "ldaps", 636 },
.{ "mms", 1755 },
.{ "msrp", 2855 },
.{ "mtqp", 1038 },
.{ "nfs", 111 },
.{ "nntp", 119 },
.{ "nntps", 563 },
.{ "pop", 110 },
.{ "prospero", 1525 },
.{ "redis", 6379 },
.{ "rsync", 873 },
.{ "rtsp", 554 },
.{ "rtsps", 322 },
.{ "rtspu", 5005 },
.{ "sftp", 22 },
.{ "smb", 445 },
.{ "snmp", 161 },
.{ "ssh", 22 },
.{ "svn", 3690 },
.{ "telnet", 23 },
.{ "ventrilo", 3784 },
.{ "vnc", 5900 },
.{ "wais", 210 },
.{ "ws", 80 },
.{ "wss", 443 },
});
/// possible errors for parse
pub const Error = error{
/// input is not a valid uri due to a invalid character
/// mostly a result of invalid ipv6
InvalidCharacter,
/// given input was empty
EmptyUri,
};
/// parse URI from input
/// empty input is an error
/// if assume_auth is true then `example.com` will result in `example.com` being the host instead of path
pub fn parse(input: []const u8, assume_auth: bool) Error!Uri {
if (input.len == 0) {
return error.EmptyUri;
}
var uri = Uri{
.scheme = "",
.username = "",
.password = "",
.host = .{ .name = "" },
.port = null,
.path = "",
.query = "",
.fragment = "",
.len = 0,
};
switch (input[0]) {
'a'...'z', 'A'...'Z' => {
uri.parseMaybeScheme(input);
},
else => {},
}
if (input.len > uri.len + 2 and input[uri.len] == '/' and input[uri.len + 1] == '/') {
uri.len += 2; // for the '//'
try uri.parseAuth(input[uri.len..]);
} else if (assume_auth) {
try uri.parseAuth(input[uri.len..]);
}
// make host ip4 address if possible
if (uri.host == .name and uri.host.name.len > 0) blk: {
var a = net.Address.parseIp4(uri.host.name, 0) catch break :blk;
uri.host = .{ .ip = a }; // workaround for https://github.com/ziglang/zig/issues/3234
}
if (uri.host == .ip and uri.port != null) {
uri.host.ip.setPort(uri.port.?);
}
uri.parsePath(input[uri.len..]);
if (input.len > uri.len + 1 and input[uri.len] == '?') {
uri.parseQuery(input[uri.len + 1 ..]);
}
if (input.len > uri.len + 1 and input[uri.len] == '#') {
uri.parseFragment(input[uri.len + 1 ..]);
}
return uri;
}
fn parseMaybeScheme(u: *Uri, input: []const u8) void {
for (input) |c, i| {
switch (c) {
'a'...'z', 'A'...'Z', '0'...'9', '+', '-', '.' => {
// allowed characters
},
':' => {
u.scheme = input[0..i];
u.port = scheme_to_port.get(u.scheme);
u.len += u.scheme.len + 1; // +1 for the ':'
return;
},
else => {
// not a valid scheme
return;
},
}
}
}
fn parseAuth(u: *Uri, input: []const u8) Error!void {
var i: u32 = 0;
var at_index = i;
while (i < input.len) : (i += 1) {
switch (input[i]) {
'@' => at_index = i,
'[' => {
if (i != 0) return error.InvalidCharacter;
return u.parseIP6(input);
},
else => if (!isPchar(input[i..])) break,
}
}
if (at_index != 0) {
u.username = input[0..at_index];
if (mem.indexOfScalar(u8, u.username, ':')) |colon| {
u.password = u.username[colon + 1 ..];
u.username = u.username[0..colon];
}
at_index += 1;
}
u.host.name = input[at_index..i];
u.len += i;
if (mem.indexOfScalar(u8, u.host.name, ':')) |colon| {
u.port = parseUnsigned(u16, u.host.name[colon + 1 ..], 10) catch return error.InvalidCharacter;
u.host.name = u.host.name[0..colon];
}
}
fn parseIP6(u: *Uri, input: []const u8) Error!void {
const end = mem.indexOfScalar(u8, input, ']') orelse return error.InvalidCharacter;
const addr = net.Address.parseIp6(input[1..end], 0) catch return error.InvalidCharacter;
u.host = .{ .ip = addr };
u.len += end + 1;
if (input.len > end + 2 and input[end + 1] == ':') {
u.len += 1;
try u.parsePort(input[end + 2 ..]);
}
}
fn parsePort(u: *Uri, input: []const u8) Error!void {
var i: u32 = 0;
while (i < input.len) : (i += 1) {
switch (input[i]) {
'0'...'9' => {}, // digits
else => break,
}
}
if (i == 0) return error.InvalidCharacter;
u.port = parseUnsigned(u16, input[0..i], 10) catch return error.InvalidCharacter;
u.len += i;
}
fn parsePath(u: *Uri, input: []const u8) void {
for (input) |c, i| {
if (c != '/' and (c == '?' or c == '#' or !isPchar(input[i..]))) {
u.path = input[0..i];
u.len += u.path.len;
return;
}
}
u.path = input[0..];
u.len += u.path.len;
}
fn parseQuery(u: *Uri, input: []const u8) void {
u.len += 1; // +1 for the '?'
for (input) |c, i| {
if (c == '#' or (c != '/' and c != '?' and !isPchar(input[i..]))) {
u.query = input[0..i];
u.len += u.query.len;
return;
}
}
u.query = input;
u.len += input.len;
}
fn parseFragment(u: *Uri, input: []const u8) void {
u.len += 1; // +1 for the '#'
for (input) |c, i| {
if (c != '/' and c != '?' and !isPchar(input[i..])) {
u.fragment = input[0..i];
u.len += u.fragment.len;
return;
}
}
u.fragment = input;
u.len += u.fragment.len;
}
/// returns true if str starts with a valid path character or a percent encoded octet
pub fn isPchar(str: []const u8) bool {
assert(str.len > 0);
return switch (str[0]) {
'a'...'z', 'A'...'Z', '0'...'9', '-', '.', '_', '~', '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=', ':', '@' => true,
'%' => str.len > 3 and isHex(str[1]) and isHex(str[2]),
else => false,
};
}
/// returns true if c is a hexadecimal digit
pub fn isHex(c: u8) bool {
return switch (c) {
'0'...'9', 'a'...'f', 'A'...'F' => true,
else => false,
};
}
};
test "basic url" {
const uri = try Uri.parse("https://ziglang.org:80/documentation/master/?test#toc-Introduction", false);
try expectEqualStrings("https", uri.scheme);
try expectEqualStrings("", uri.username);
try expectEqualStrings("", uri.password);
try expectEqualStrings("ziglang.org", uri.host.name);
try expect(uri.port.? == 80);
try expectEqualStrings("/documentation/master/", uri.path);
try expectEqualStrings("test", uri.query);
try expectEqualStrings("toc-Introduction", uri.fragment);
try expect(uri.len == 66);
}
test "short" {
const uri = try Uri.parse("telnet://192.0.2.16:80/", false);
try expectEqualStrings("telnet", uri.scheme);
try expectEqualStrings("", uri.username);
try expectEqualStrings("", uri.password);
var buf = [_]u8{0} ** 100;
var ip = std.fmt.bufPrint(buf[0..], "{}", .{uri.host.ip}) catch unreachable;
try expectEqualStrings("192.0.2.16:80", ip);
try expect(uri.port.? == 80);
try expectEqualStrings("/", uri.path);
try expectEqualStrings("", uri.query);
try expectEqualStrings("", uri.fragment);
try expect(uri.len == 23);
}
test "single char" {
const uri = try Uri.parse("a", false);
try expectEqualStrings("", uri.scheme);
try expectEqualStrings("", uri.username);
try expectEqualStrings("", uri.password);
try expectEqualStrings("", uri.host.name);
try expect(uri.port == null);
try expectEqualStrings("a", uri.path);
try expectEqualStrings("", uri.query);
try expectEqualStrings("", uri.fragment);
try expect(uri.len == 1);
}
test "ipv6" {
const uri = try Uri.parse("ldap://[2001:db8::7]/c=GB?objectClass?one", false);
try expectEqualStrings("ldap", uri.scheme);
try expectEqualStrings("", uri.username);
try expectEqualStrings("", uri.password);
var buf = [_]u8{0} ** 100;
var ip = std.fmt.bufPrint(buf[0..], "{}", .{uri.host.ip}) catch unreachable;
try expectEqualStrings("[2001:db8::7]:389", ip);
try expect(uri.port.? == 389);
try expectEqualStrings("/c=GB", uri.path);
try expectEqualStrings("objectClass?one", uri.query);
try expectEqualStrings("", uri.fragment);
try expect(uri.len == 41);
}
test "mailto" {
const uri = try Uri.parse("mailto:[email protected]", false);
try expectEqualStrings("mailto", uri.scheme);
try expectEqualStrings("", uri.username);
try expectEqualStrings("", uri.password);
try expectEqualStrings("", uri.host.name);
try expect(uri.port == null);
try expectEqualStrings("[email protected]", uri.path);
try expectEqualStrings("", uri.query);
try expectEqualStrings("", uri.fragment);
try expect(uri.len == 27);
}
test "tel" {
const uri = try Uri.parse("tel:+1-816-555-1212", false);
try expectEqualStrings("tel", uri.scheme);
try expectEqualStrings("", uri.username);
try expectEqualStrings("", uri.password);
try expectEqualStrings("", uri.host.name);
try expect(uri.port == null);
try expectEqualStrings("+1-816-555-1212", uri.path);
try expectEqualStrings("", uri.query);
try expectEqualStrings("", uri.fragment);
try expect(uri.len == 19);
}
test "urn" {
const uri = try Uri.parse("urn:oasis:names:specification:docbook:dtd:xml:4.1.2", false);
try expectEqualStrings("urn", uri.scheme);
try expectEqualStrings("", uri.username);
try expectEqualStrings("", uri.password);
try expectEqualStrings("", uri.host.name);
try expect(uri.port == null);
try expectEqualStrings("oasis:names:specification:docbook:dtd:xml:4.1.2", uri.path);
try expectEqualStrings("", uri.query);
try expectEqualStrings("", uri.fragment);
try expect(uri.len == 51);
}
test "userinfo" {
const uri = try Uri.parse("ftp://username:[email protected]/", false);
try expectEqualStrings("ftp", uri.scheme);
try expectEqualStrings("username", uri.username);
try expectEqualStrings("password", uri.password);
try expectEqualStrings("host.com", uri.host.name);
try expect(uri.port.? == 21);
try expectEqualStrings("/", uri.path);
try expectEqualStrings("", uri.query);
try expectEqualStrings("", uri.fragment);
try expect(uri.len == 33);
}
test "map query" {
const uri = try Uri.parse("https://ziglang.org:80/documentation/master/?test;1=true&false#toc-Introduction", false);
try expectEqualStrings("https", uri.scheme);
try expectEqualStrings("", uri.username);
try expectEqualStrings("", uri.password);
try expectEqualStrings("ziglang.org", uri.host.name);
try expect(uri.port.? == 80);
try expectEqualStrings("/documentation/master/", uri.path);
try expectEqualStrings("test;1=true&false", uri.query);
try expectEqualStrings("toc-Introduction", uri.fragment);
var map = try Uri.mapQuery(std.testing.allocator, uri.query);
defer map.deinit();
try expectEqualStrings("true", map.get("test;1").?);
try expectEqualStrings("", map.get("false").?);
}
test "ends in space" {
const uri = try Uri.parse("https://ziglang.org/documentation/master/ something else", false);
try expectEqualStrings("https", uri.scheme);
try expectEqualStrings("", uri.username);
try expectEqualStrings("", uri.password);
try expectEqualStrings("ziglang.org", uri.host.name);
try expectEqualStrings("/documentation/master/", uri.path);
try expect(uri.len == 41);
}
test "assume auth" {
const uri = try Uri.parse("ziglang.org", true);
try expectEqualStrings("ziglang.org", uri.host.name);
try expect(uri.len == 11);
}
test "username contains @" {
const uri = try Uri.parse("https://1.1.1.1&@2.2.2.2%[email protected]", false);
try expectEqualStrings("https", uri.scheme);
try expectEqualStrings("1.1.1.1&@2.2.2.2%23", uri.username);
try expectEqualStrings("", uri.password);
var buf = [_]u8{0} ** 100;
var ip = std.fmt.bufPrint(buf[0..], "{}", .{uri.host.ip}) catch unreachable;
try expectEqualStrings("3.3.3.3:443", ip);
try expect(uri.port.? == 443);
try expectEqualStrings("", uri.path);
try expect(uri.len == 35);
}
test "encode" {
const path = (try Uri.encode(testing.allocator, "/안녕하세요.html")).?;
defer testing.allocator.free(path);
try expectEqualStrings("/%EC%95%88%EB%85%95%ED%95%98%EC%84%B8%EC%9A%94.html", path);
}
test "decode" {
const path = (try Uri.decode(testing.allocator, "/%EC%95%88%EB%85%95%ED%95%98%EC%84%B8%EC%9A%94.html")).?;
defer testing.allocator.free(path);
try expectEqualStrings("/안녕하세요.html", path);
}
test "resolvePath" {
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena.deinit();
const alloc = &arena.allocator;
var a = try Uri.resolvePath(alloc, "/a/b/..");
try expectEqualStrings("/a", a);
a = try Uri.resolvePath(alloc, "/a/b/../");
try expectEqualStrings("/a/", a);
a = try Uri.resolvePath(alloc, "/a/b/c/../d/../");
try expectEqualStrings("/a/b/", a);
a = try Uri.resolvePath(alloc, "/a/b/c/../d/..");
try expectEqualStrings("/a/b", a);
a = try Uri.resolvePath(alloc, "/a/b/c/../d/.././");
try expectEqualStrings("/a/b/", a);
a = try Uri.resolvePath(alloc, "/a/b/c/../d/../.");
try expectEqualStrings("/a/b", a);
a = try Uri.resolvePath(alloc, "/a/../../");
try expectEqualStrings("/", a);
}
| https://raw.githubusercontent.com/lithdew/rheia/162293d0f0e8d6572a8954c0add83f13f76b3cc6/uri.zig |
//***************************************************************************
//
// Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership. The
// ASF licenses this file to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance with the
// License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
//
//***************************************************************************
//! PinePhone Allwinner A64 MIPI DPHY (Display Physical Layer) Driver for Apache NuttX RTOS
//! See https://lupyuen.github.io/articles/dsi#appendix-enable-mipi-display-physical-layer-dphy
//! "A64 Page ???" refers to Allwinner A64 User Manual: https://github.com/lupyuen/pinephone-nuttx/releases/download/doc/Allwinner_A64_User_Manual_V1.1.pdf
/// Import the Zig Standard Library
const std = @import("std");
/// Import NuttX Functions from C
const c = @cImport({
// NuttX Defines
@cDefine("__NuttX__", "");
@cDefine("NDEBUG", "");
@cDefine("FAR", "");
// NuttX Header Files
@cInclude("arch/types.h");
@cInclude("../../nuttx/include/limits.h");
@cInclude("nuttx/config.h");
@cInclude("inttypes.h");
@cInclude("unistd.h");
@cInclude("stdlib.h");
@cInclude("stdio.h");
});
/// Base Address of Allwinner A64 CCU Controller (A64 Page 82)
const CCU_BASE_ADDRESS = 0x01C2_0000;
/// Base Address of Allwinner A64 MIPI DPHY Controller (A64 Page 74)
const DPHY_BASE_ADDRESS = 0x01CA_1000;
/// Enable MIPI Display Physical Layer (DPHY).
/// Based on https://lupyuen.github.io/articles/dsi#appendix-enable-mipi-display-physical-layer-dphy
pub export fn dphy_enable() void {
debug("dphy_enable: start", .{});
defer { debug("dphy_enable: end", .{}); }
// Set DSI Clock to 150 MHz (600 MHz / 4)
// MIPI_DSI_CLK_REG: CCU Offset 0x168 (A64 Page 122)
// Set DSI_DPHY_GATING (Bit 15) to 1 (DSI DPHY Clock is On)
// Set DSI_DPHY_SRC_SEL (Bits 8 to 9) to 0b10 (DSI DPHY Clock Source is PLL_PERIPH0(1X))
// Set DPHY_CLK_DIV_M (Bits 0 to 3) to 3 (DSI DPHY Clock divide ratio - 1)
debug("Set DSI Clock to 150 MHz", .{});
const MIPI_DSI_CLK_REG = CCU_BASE_ADDRESS + 0x168;
comptime{ assert(MIPI_DSI_CLK_REG == 0x1c20168); }
const DSI_DPHY_GATING: u16 = 1 << 15;
const DSI_DPHY_SRC_SEL: u10 = 0b10 << 8;
const DPHY_CLK_DIV_M: u4 = 3 << 0;
const MIPI_DSI_CLK = DSI_DPHY_GATING
| DSI_DPHY_SRC_SEL
| DPHY_CLK_DIV_M;
comptime{ assert(MIPI_DSI_CLK == 0x8203); }
putreg32(MIPI_DSI_CLK, MIPI_DSI_CLK_REG); // TODO: DMB
// Power on DPHY Tx (Undocumented)
// DPHY_TX_CTL_REG: DPHY Offset 0x04
// Set to 0x1000 0000
debug("Power on DPHY Tx", .{});
const DPHY_TX_CTL_REG = DPHY_BASE_ADDRESS + 0x04;
comptime{ assert(DPHY_TX_CTL_REG == 0x1ca1004); }
putreg32(0x10000000, DPHY_TX_CTL_REG); // TODO: DMB
// DPHY_TX_TIME0_REG: DPHY Offset 0x10
// Set to 0xa06 000e
const DPHY_TX_TIME0_REG = DPHY_BASE_ADDRESS + 0x10;
comptime{ assert(DPHY_TX_TIME0_REG == 0x1ca1010); }
putreg32(0xa06000e, DPHY_TX_TIME0_REG); // TODO: DMB
// DPHY_TX_TIME1_REG: DPHY Offset 0x14
// Set to 0xa03 3207
const DPHY_TX_TIME1_REG = DPHY_BASE_ADDRESS + 0x14;
comptime{ assert(DPHY_TX_TIME1_REG == 0x1ca1014); }
putreg32(0xa033207, DPHY_TX_TIME1_REG); // TODO: DMB
// DPHY_TX_TIME2_REG: DPHY Offset 0x18
// Set to 0x1e
const DPHY_TX_TIME2_REG = DPHY_BASE_ADDRESS + 0x18;
comptime{ assert(DPHY_TX_TIME2_REG == 0x1ca1018); }
putreg32(0x1e, DPHY_TX_TIME2_REG); // TODO: DMB
// DPHY_TX_TIME3_REG: DPHY Offset 0x1c
// Set to 0x0
const DPHY_TX_TIME3_REG = DPHY_BASE_ADDRESS + 0x1c;
comptime{ assert(DPHY_TX_TIME3_REG == 0x1ca101c); }
putreg32(0x0, DPHY_TX_TIME3_REG); // TODO: DMB
// DPHY_TX_TIME4_REG: DPHY Offset 0x20
// Set to 0x303
const DPHY_TX_TIME4_REG = DPHY_BASE_ADDRESS + 0x20;
comptime{ assert(DPHY_TX_TIME4_REG == 0x1ca1020); }
putreg32(0x303, DPHY_TX_TIME4_REG); // TODO: DMB
// Enable DPHY (Undocumented)
// DPHY_GCTL_REG: DPHY Offset 0x00 (Enable DPHY)
// Set to 0x31
debug("Enable DPHY", .{});
const DPHY_GCTL_REG = DPHY_BASE_ADDRESS + 0x00;
comptime{ assert(DPHY_GCTL_REG == 0x1ca1000); }
putreg32(0x31, DPHY_GCTL_REG); // TODO: DMB
// DPHY_ANA0_REG: DPHY Offset 0x4c (PWS)
// Set to 0x9f00 7f00
const DPHY_ANA0_REG = DPHY_BASE_ADDRESS + 0x4c;
comptime{ assert(DPHY_ANA0_REG == 0x1ca104c); }
putreg32(0x9f007f00, DPHY_ANA0_REG); // TODO: DMB
// DPHY_ANA1_REG: DPHY Offset 0x50 (CSMPS)
// Set to 0x1700 0000
const DPHY_ANA1_REG = DPHY_BASE_ADDRESS + 0x50;
comptime{ assert(DPHY_ANA1_REG == 0x1ca1050); }
putreg32(0x17000000, DPHY_ANA1_REG); // TODO: DMB
// DPHY_ANA4_REG: DPHY Offset 0x5c (CKDV)
// Set to 0x1f0 1555
const DPHY_ANA4_REG = DPHY_BASE_ADDRESS + 0x5c;
comptime{ assert(DPHY_ANA4_REG == 0x1ca105c); }
putreg32(0x1f01555, DPHY_ANA4_REG); // TODO: DMB
// DPHY_ANA2_REG: DPHY Offset 0x54 (ENIB)
// Set to 0x2
const DPHY_ANA2_REG = DPHY_BASE_ADDRESS + 0x54;
comptime{ assert(DPHY_ANA2_REG == 0x1ca1054); }
putreg32(0x2, DPHY_ANA2_REG); // TODO: DMB
// Wait 5 microseconds
_ = c.usleep(5);
// Enable LDOR, LDOC, LDOD (Undocumented)
// DPHY_ANA3_REG: DPHY Offset 0x58 (Enable LDOR, LDOC, LDOD)
// Set to 0x304 0000
debug("Enable LDOR, LDOC, LDOD", .{});
const DPHY_ANA3_REG = DPHY_BASE_ADDRESS + 0x58;
comptime{ assert(DPHY_ANA3_REG == 0x1ca1058); }
putreg32(0x3040000, DPHY_ANA3_REG); // TODO: DMB
// Wait 1 microsecond
_ = c.usleep(1);
// DPHY_ANA3_REG: DPHY Offset 0x58 (Enable VTTC, VTTD)
// Set bits 0xf800 0000
comptime{ assert(DPHY_ANA3_REG == 0x1ca1058); }
const EnableVTTC = 0xf8000000;
modreg32(EnableVTTC, EnableVTTC, DPHY_ANA3_REG); // TODO: DMB
// Wait 1 microsecond
_ = c.usleep(1);
// DPHY_ANA3_REG: DPHY Offset 0x58 (Enable DIV)
// Set bits 0x400 0000
comptime{ assert(DPHY_ANA3_REG == 0x1ca1058); }
const EnableDIV = 0x4000000;
modreg32(EnableDIV, EnableDIV, DPHY_ANA3_REG); // TODO: DMB
// Wait 1 microsecond
_ = c.usleep(1);
// DPHY_ANA2_REG: DPHY Offset 0x54 (Enable CK_CPU)
comptime{ assert(DPHY_ANA2_REG == 0x1ca1054); }
const EnableCKCPU = 0x10;
modreg32(EnableCKCPU, EnableCKCPU, DPHY_ANA2_REG); // TODO: DMB
// Set bits 0x10
// Wait 1 microsecond
_ = c.usleep(1);
// DPHY_ANA1_REG: DPHY Offset 0x50 (VTT Mode)
// Set bits 0x8000 0000
comptime{ assert(DPHY_ANA1_REG == 0x1ca1050); }
const VTTMode = 0x80000000;
modreg32(VTTMode, VTTMode, DPHY_ANA1_REG); // TODO: DMB
// DPHY_ANA2_REG: DPHY Offset 0x54 (Enable P2S CPU)
// Set bits 0xf00 0000
comptime{ assert(DPHY_ANA2_REG == 0x1ca1054); }
const EnableP2SCPU = 0xf000000;
modreg32(EnableP2SCPU, EnableP2SCPU, DPHY_ANA2_REG); // TODO: DMB
}
/// Modify the specified bits in a memory mapped register.
/// Based on https://github.com/apache/nuttx/blob/master/arch/arm64/src/common/arm64_arch.h#L473
fn modreg32(
comptime val: u32, // Bits to set, like (1 << bit)
comptime mask: u32, // Bits to clear, like (1 << bit)
addr: u64 // Address to modify
) void {
comptime { assert(val & mask == val); }
debug(" *0x{x}: clear 0x{x}, set 0x{x}", .{ addr, mask, val & mask });
putreg32(
(getreg32(addr) & ~(mask))
| ((val) & (mask)),
(addr)
);
}
/// Get the 32-bit value at the address
fn getreg32(addr: u64) u32 {
const ptr = @intToPtr(*const volatile u32, addr);
return ptr.*;
}
/// Set the 32-bit value at the address
fn putreg32(val: u32, addr: u64) void {
if (enableLog) { debug(" *0x{x} = 0x{x}", .{ addr, val }); }
const ptr = @intToPtr(*volatile u32, addr);
ptr.* = val;
}
/// Set to False to disable log
var enableLog = true;
///////////////////////////////////////////////////////////////////////////////
// Panic Handler
/// Called by Zig when it hits a Panic. We print the Panic Message, Stack Trace and halt. See
/// https://andrewkelley.me/post/zig-stack-traces-kernel-panic-bare-bones-os.html
/// https://github.com/ziglang/zig/blob/master/lib/std/builtin.zig#L763-L847
pub fn panic(
message: []const u8,
_stack_trace: ?*std.builtin.StackTrace
) noreturn {
// Print the Panic Message
_ = _stack_trace;
_ = puts("\n!ZIG PANIC!");
_ = puts(@ptrCast([*c]const u8, message));
// Print the Stack Trace
_ = puts("Stack Trace:");
var it = std.debug.StackIterator.init(@returnAddress(), null);
while (it.next()) |return_address| {
_ = printf("%p\n", return_address);
}
// Halt
c.exit(1);
}
///////////////////////////////////////////////////////////////////////////////
// Logging
/// Called by Zig for `std.log.debug`, `std.log.info`, `std.log.err`, ...
/// https://gist.github.com/leecannon/d6f5d7e5af5881c466161270347ce84d
pub fn log(
comptime _message_level: std.log.Level,
comptime _scope: @Type(.EnumLiteral),
comptime format: []const u8,
args: anytype,
) void {
_ = _message_level;
_ = _scope;
// Format the message
var buf: [100]u8 = undefined; // Limit to 100 chars
var slice = std.fmt.bufPrint(&buf, format, args)
catch { _ = puts("*** log error: buf too small"); return; };
// Terminate the formatted message with a null
var buf2: [buf.len + 1 : 0]u8 = undefined;
std.mem.copy(
u8,
buf2[0..slice.len],
slice[0..slice.len]
);
buf2[slice.len] = 0;
// Print the formatted message
_ = puts(&buf2);
}
///////////////////////////////////////////////////////////////////////////////
// Imported Functions and Variables
/// For safety, we import these functions ourselves to enforce Null-Terminated Strings.
/// We changed `[*c]const u8` to `[*:0]const u8`
extern fn printf(format: [*:0]const u8, ...) c_int;
extern fn puts(str: [*:0]const u8) c_int;
/// Aliases for Zig Standard Library
const assert = std.debug.assert;
const debug = std.log.debug;
| https://raw.githubusercontent.com/lupyuen/pinephone-nuttx/700afea277d94090efa3926a96352b4a6319e99f/dphy.zig |
const root = @import("root");
pub const c = if (@hasDecl(root, "loadable_extension"))
@import("c/loadable_extension.zig")
else
@cImport({
@cInclude("sqlite3.h");
@cInclude("workaround.h");
});
// versionGreaterThanOrEqualTo returns true if the SQLite version is >= to the major.minor.patch provided.
pub fn versionGreaterThanOrEqualTo(major: u8, minor: u8, patch: u8) bool {
return c.SQLITE_VERSION_NUMBER >= @as(u32, major) * 1000000 + @as(u32, minor) * 1000 + @as(u32, patch);
}
comptime {
if (!versionGreaterThanOrEqualTo(3, 21, 0)) {
@compileError("must use SQLite >= 3.21.0");
}
}
| https://raw.githubusercontent.com/vrischmann/zig-sqlite/91e5fedd15c5ea3cb42ccceefb3d0f4bb9bad68f/c.zig |
pub fn main() !void {
try renderer.render(.{
.Shader = SimpleBlendShader,
//.Shader = CheckerShader,
//.Shader = BandingShader,
//.Shader = CliffordAttractorShader,
//.Shader = JuliaSetShader,
//.Shader = SimplexNoiseShader,
//.Shader = GeometryShader,
//.Shader = QuantizeShader,
//.Shader = IntNoiseShader,
//.Shader = SurfaceNormalShader,
.preview = true,
.memoryLimitMiB = 128,
.ssaa = 3,
.preview_ssaa = 1,
.preview_samples = 600000,
.frames = 1,
//.frames = 30 * 8, // ffmpeg -r 30 -f image2 -i 'frame-%06d.png' -vcodec libx264 -pix_fmt yuv420p -profile:v main -level 3.1 -preset medium -crf 23 -x264-params ref=4 -movflags +faststart out.mp4
.path = "out/out.png",
.frameTemplate = "out/frame-{d:0>6}.png",
.res = Resolutions.Instagram.square,
//.res = Resolutions.Instagram.portrait,
//.res = Resolutions.Instagram.landscape,
//.res = Resolutions.Prints._8x10,
//.res = comptime Resolutions.Prints._8x10.landscape(),
//.res = Resolutions.Screen._4k,
//.res = Resolutions.Screen._1080p,
//.res = Resolutions.Wallpapers.iosParallax,
//.res = comptime Resolutions.Prints._5x15.landscape(),
//.res = Resolutions.Prints._5x15,
//.res = @import("lib/resolutions.zig").Res{ .width = 256, .height = 256 },
});
}
const SimpleBlendShader = struct {
const Self = @This();
pub fn init(allocator: *Allocator, config: renderer.ShaderConfig) !Self {
return Self{};
}
pub fn deinit(self: *const Self, allocator: *Allocator) void {}
pub fn shade(self: *const Self, x: f64, y: f64) Jazbz {
return mix(
mix(colors.goldenYellow, colors.seaBlue, saturate(x)),
mix(colors.navyBlue, colors.bloodRed, saturate(x)),
saturate(y),
);
}
};
const CheckerShader = struct {
const Self = @This();
pub fn init(allocator: *Allocator, config: renderer.ShaderConfig) !Self {
return Self{};
}
pub fn deinit(self: *const Self, allocator: *Allocator) void {}
pub fn shade(self: *const Self, x: f64, y: f64) Jazbz {
return (comptime @import("lib/debug_shaders.zig").CheckedBackground(16)).content(colors.neonGreen, x, y);
}
};
const BandingShader = struct {
const Self = @This();
pub fn init(allocator: *Allocator, config: renderer.ShaderConfig) !Self {
return Self{};
}
pub fn deinit(self: *const Self, allocator: *Allocator) void {}
pub fn shade(self: *const Self, x: f64, y: f64) Jazbz {
if (x >= 0 and x <= 1 and y >= 0 and y <= 1) {
const banding = @import("lib/banding.zig").Banding(pattern, (1 << 6) * phi, 640).sample(x, y);
return mix(colors.goldenYellow, colors.bloodRed, banding);
} else {
return colors.navyBlue;
}
}
fn pattern(x: f64, y: f64) [2]f64 {
return [_]f64{
x * y,
y + x * x,
};
}
};
const CliffordAttractorShader = struct {
const Self = @This();
const Pixel = struct {
count: usize = 0,
};
const Screen = @import("lib/screen.zig").Screen;
const PixelScreen = Screen(Pixel);
screen: PixelScreen,
countCorrection: f64 = 1,
pub fn init(allocator: *Allocator, config: renderer.ShaderConfig) !Self {
var self = Self{
.screen = try PixelScreen.init(allocator, config.res.width, config.res.height, .{ .count = 0 }),
};
errdefer self.screen.deinit();
var n: usize = 4 << 20;
const a = 1.7;
const b = 1.7;
const c = 0.6;
const d = 1.2;
const scale = comptime math.max(if (c < 0) -c else c, if (d < 0) -d else d) + 1.0;
var x: f64 = a;
var y: f64 = b;
while (n != 0) : (n -= 1) {
if (self.screen.ref(coMix(-scale, scale, x), coMix(-scale, scale, y))) |pixel| {
pixel.count += 1;
}
const x1 = math.sin(a * y) + c * math.cos(a * x);
const y1 = math.sin(b * x) + d * math.cos(b * y);
x = x1;
y = y1;
}
var highest: usize = 1;
for (self.screen.cells) |pixel| {
if (pixel.count > highest) {
highest = pixel.count;
}
}
self.countCorrection = 1 / @intToFloat(f64, highest);
return self;
}
pub fn deinit(self: *const Self, allocator: *Allocator) void {
self.screen.deinit();
}
pub fn shade(self: *const Self, x: f64, y: f64) Jazbz {
if (self.screen.get(x, y)) |pixel| {
const count = @intToFloat(f64, pixel.count) * self.countCorrection;
return mix(colors.white, colors.darkGreen, gmath.mapDynamicRange(0, 1, 0, 1, 0.3, 0.5, 1.0, count));
} else {
return colors.white;
}
}
};
const JuliaSetShader = struct {
const Self = @This();
pub fn init(allocator: *Allocator, config: renderer.ShaderConfig) !Self {
return Self{};
}
pub fn deinit(self: *const Self, allocator: *Allocator) void {}
pub fn shade(self: *const Self, x: f64, y: f64) Jazbz {
const nLimit: usize = 1 << 9;
const cx = -0.76;
const cy = -0.09;
var zx = mix(-0.8, 0.8, y);
var zy = mix(-0.8, 0.8, x);
var xx = zx * zx;
var yy = zy * zy;
var n: usize = nLimit;
while (n != 0 and xx + yy < 4) : (n -= 1) {
zy *= zx;
zy *= 2;
zy += cy;
zx = xx - yy + cx;
xx = zx * zx;
yy = zy * zy;
}
const n01 = coMix(0, comptime @intToFloat(f64, nLimit), @intToFloat(f64, n));
return rainbowRamp(n01).scaleJ(vignette(x, y));
}
fn rainbowRamp(x: f64) Jazbz {
return Jazbz{
.j = mix(0.0, 0.7, gmath.quantize(1.0 / 8.0, gmath.sigmoidC3(sq(x)))),
.azbz = AzBz.initCh(0.6, fract(x * 12)),
};
}
fn vignette(x: f64, y: f64) f64 {
return mix(0.4, 1, 1.3 - (1 - (1 - sq(x)) * (1 - sq(y))));
}
};
const SimplexNoiseShader = struct {
const sn = @import("lib/simplexnoise1234.zig");
const Self = @This();
pub fn init(allocator: *Allocator, config: renderer.ShaderConfig) !Self {
return Self{};
}
pub fn deinit(self: *const Self, allocator: *Allocator) void {}
pub fn shade(self: *const Self, x: f64, y: f64) Jazbz {
const h1 = sn.noise2(mix(100, 104, x), mix(200, 204, y)) * 1.0;
const h2 = sn.noise2(mix(300, 308, x), mix(400, 408, y)) * 0.5;
const h3 = sn.noise2(mix(500, 516, x), mix(600, 616, y)) * 0.25;
const cloud = coMix(-1.75, 1.75, h1 + h2 + h3);
var result = mix(colors.goldenYellow, colors.darkPurple, cloud);
result.j = gmath.sigmoidSkew(mix(0.0, 0.4, y), 0.5, result.j);
return result;
}
};
const GeometryShader = struct {
const geom = @import("lib/geom.zig");
const sdf2 = @import("lib/sdf2.zig");
const brdf = @import("lib/brdf.zig");
const sn = @import("lib/simplexnoise1234.zig");
const Self = @This();
pub fn init(allocator: *Allocator, config: renderer.ShaderConfig) !Self {
return Self{};
}
pub fn deinit(self: *const Self, allocator: *Allocator) void {}
pub fn shade(self: *const Self, x: f64, y: f64) Jazbz {
const circleRadius = comptime mix(0.1, 0.16666666666666666, 0.5);
const inset = 0.33333333333333333;
const offset = comptime v2(0.025, -0.0125);
const Dp1 = DotPipe(comptime v2(inset, inset).add(offset), circleRadius, V2.degree90);
const Dp2 = DotPipe(comptime v2(1 - inset, 1 - inset).add(offset), circleRadius, V2.degree0);
const Dp3 = DotPipe(comptime v2(inset, 1 - inset).add(offset), circleRadius, V2.degree315);
const p = v2(x, y);
const p1 = Dp1.signedDists(p);
const p2 = Dp2.signedDists(p);
const p3 = Dp3.signedDists(p);
const dotSd = p1.dot.merge(p2.dot).merge(p3.dot);
const pipeSd = dotSd.merge(p1.pipe).merge(p2.pipe).merge(p3.pipe);
const redMat = Surface{
.material = .{
.baseColor = mix(colors.leafGreen, colors.black, mix(0.0, 0.25, y)),
.reflectance = 0.2,
.roughness = 0.5,
},
.noise = 1,
.noiseSize = 192,
};
const blackMat = Surface{
.material = .{
.baseColor = colors.almostBlack,
.metallic = 1,
.clearcoat = 1,
.clearcoatRoughness = 0.35,
},
.noise = 0,
.noiseSize = 192,
};
const whiteMat = Surface{
.material = .{
.baseColor = colors.eggShell,
},
.noise = 0,
.noiseSize = 192,
};
const smooth = 0.001;
var mat = redMat;
mat = mix(mat, blackMat, pipeSd.smoothstepC3(smooth, 0));
mat = mix(mat, whiteMat, dotSd.smoothstepC3(smooth, 0));
const prepared = mat.material.prepare();
const point = v3(p.x, p.y, 0);
const h1 = sn.noise2(mix(100, 100 + mat.noiseSize, x), mix(200, 200 + mat.noiseSize, y));
const h2 = sn.noise2(mix(300, 300 + mat.noiseSize, x), mix(400, 400 + mat.noiseSize, y));
const normal = v3(h1 * mat.noise, h2 * mat.noise, 1).normalize();
const camera = v3(0.5, 0.5, 128);
const light1 = comptime v3(inset, inset, 0.5);
const light2 = comptime v3(inset, 1 - inset, 0.5);
const light3 = comptime v3(1 - inset, 1 - inset, 0.5);
const sample1 = prepared.brdf(normal, camera.sub(point).normalize(), light1.sub(point).normalize()).scaleJ(1.2);
const sample2 = prepared.brdf(normal, camera.sub(point).normalize(), light2.sub(point).normalize()).scaleJ(0.7);
const sample3 = prepared.brdf(normal, camera.sub(point).normalize(), light3.sub(point).normalize()).scaleJ(0.8);
var result = sample1.addLight(sample2).addLight(sample3).toJazbz();
const blackPoint = 0.03;
const whitePoint = 0.75;
result.j = gmath.filmicDynamicRange(blackPoint, whitePoint, 0.4, 0.5, result.j);
result.j = gmath.sigmoidSkew(0.3, 1 - y, result.j);
result.j = saturate(result.j);
return result;
}
const Surface = struct {
material: brdf.Material,
noise: f64 = 0,
noiseSize: f64 = 0,
pub fn mix(self: @This(), other: @This(), alpha: f64) @This() {
return .{
.material = gmath.mix(self.material, other.material, alpha),
.noise = gmath.mix(self.noise, other.noise, alpha),
.noiseSize = gmath.mix(self.noiseSize, other.noiseSize, alpha),
};
}
};
fn DotPipe(c: V2, r: f64, dir: V2) type {
const n = dir;
const e = n.rotate90();
const s = n.rotate180();
const w = n.rotate270();
const circle = geom.Circle.rp(r, c);
const line1 = geom.Line.pn(c.add(e.scale(r)), s);
const line2 = geom.Line.pn(c.add(w.scale(r)), n);
const line3 = geom.Line.pn(c, e);
return struct {
dot: sdf2.Sd,
pipe: sdf2.Sd,
fn signedDists(p: V2) @This() {
return .{
.dot = dotSd(p),
.pipe = pipeSd(p),
};
}
fn dotSd(p: V2) sdf2.Sd {
return circle.signedDist(p);
}
fn pipeSd(p: V2) sdf2.Sd {
const sd1 = line1.signedDistBefore(p);
const sd2 = line2.signedDistBefore(p);
const sd3 = line3.signedDistBefore(p);
return sd1.match(sd2).cut(sd3);
}
};
}
};
const QuantizeShader = struct {
const sqn = @import("lib/squirrel3noise.zig");
const Self = @This();
pub fn init(allocator: *Allocator, config: renderer.ShaderConfig) !Self {
return Self{};
}
pub fn deinit(self: *const Self, allocator: *Allocator) void {}
pub fn shade(self: *const Self, x: f64, y: f64) Jazbz {
const xq = gmath.quantize(0.1, x);
const yq = gmath.quantize(0.1, y);
const xf = gmath.fract(x / 0.1);
const yf = gmath.fract(y / 0.1);
var result = mix(
mix(colors.white, colors.black, xq),
mix(colors.navyBlue, colors.leafGreen, xq),
yq,
);
result.j = mix(result.j, xf, mix(0.05, 0.0, yf));
return result;
}
};
const IntNoiseShader = struct {
const gs = @import("lib/gridsize.zig");
const sqn = @import("lib/squirrel3noise.zig");
const Self = @This();
const Gs = gs.GridSize(7, 7);
const Cell = struct {
vertex: V2,
color: Jazbz,
};
grid: [Gs.len]Cell,
pub fn init(allocator: *Allocator, config: renderer.ShaderConfig) !Self {
var self = Self{
.grid = undefined,
};
var rng = sqn.squirrelRng(0);
for (self.grid) |*cell| {
cell.vertex = .{
.x = rng.f01(),
.y = rng.f01(),
};
cell.color = Jazbz.initJch(rng.mixf(0.5, 0.8), 0.3, rng.f01());
}
return self;
}
pub fn deinit(self: *const Self, allocator: *Allocator) void {}
pub fn shade(self: *const Self, x: f64, y: f64) Jazbz {
var result = colors.black;
if (Gs.pos(x, y)) |centerPos| {
var win_d: f64 = 1;
var win_color = colors.white;
for (centerPos.neighbors9()) |candidatePos| {
if (candidatePos) |pos| {
const q = v2(pos.cellx(x), pos.celly(y));
const cell = &self.grid[pos.index];
const c = cell.vertex;
const d = saturate(c.distTo(q));
if (d < win_d) {
win_d = d;
win_color = cell.color;
}
}
}
result = mix(result, win_color, coSq(1 - win_d));
result.j = gmath.sigmoidSkew(0.3, 1 - y, result.j);
}
return result;
}
};
const SurfaceNormalShader = struct {
const geom = @import("lib/geom.zig");
const sdf2 = @import("lib/sdf2.zig");
const brdf = @import("lib/brdf.zig");
const surf = @import("lib/surfacenormal.zig");
const Self = @This();
pub fn init(allocator: *Allocator, config: renderer.ShaderConfig) !Self {
return Self{};
}
pub fn deinit(self: *const Self, allocator: *Allocator) void {}
pub fn shade(self: *const Self, x: f64, y: f64) Jazbz {
const p = v2(x, y);
const circle = geom.Circle.rp(0.3, v2(0.5, 0.5));
var layer = mix(colors.bloodRed, colors.goldenYellow, x);
if (surf.EllipticalTorus.forCircle(circle, 0.15, 0.2, p)) |surface| {
const material = brdf.Material{
.baseColor = mix(colors.bloodRed, colors.goldenYellow, 1 - x),
.reflectance = 0.4,
.roughness = 0.6,
.clearcoat = 1,
.clearcoatRoughness = 0.3,
};
const shaded = surf.shade(surface, &material.prepare());
layer = mix(layer, shaded, surface.blend.smoothstepC3(0.001, 0));
}
layer.j = gmath.sigmoidSkew(0.3, 1 - y, layer.j);
layer.j = saturate(layer.j);
return layer;
}
};
pub const enable_segfault_handler: bool = true;
const std = @import("std");
const math = std.math;
const Allocator = std.mem.Allocator;
const renderer = @import("lib/renderer.zig");
const Resolutions = @import("lib/resolutions.zig").Resolutions;
const V2 = @import("lib/affine.zig").V2;
const V3 = @import("lib/affine.zig").V3;
const v2 = V2.init;
const v3 = V3.init;
const Jazbz = @import("lib/jabz.zig").Jazbz(f64);
const AzBz = Jazbz.AzBz;
const colors = @import("lib/colors.zig").Colors(Jazbz);
const gmath = @import("lib/gmath.zig").gmath(f64);
const fract = gmath.fract;
const clamp = gmath.clamp;
const saturate = gmath.saturate;
const linearstep = gmath.linearstep;
const smoothstepC1 = gmath.smoothstepC1;
const smoothstepC2 = gmath.smoothstepC2;
const smoothstepC3 = gmath.smoothstepC3;
const mix = gmath.mix;
const coMix = gmath.coMix;
const sq = gmath.sq;
const coSq = gmath.coSq;
const pi = gmath.pi;
const invPi = gmath.invPi;
const tau = gmath.tau;
const invTau = gmath.invTau;
const phi = gmath.phi;
const invPhi = gmath.invPhi;
const sqrt2 = gmath.sqrt2;
const invSqrt2 = gmath.invSqrt2;
const sqrt3 = gmath.sqrt3;
const halfSqrt3 = gmath.halfSqrt3;
| https://raw.githubusercontent.com/quag/zig-generative-template/c59b72641ba7baef4c0f49e71f4576a67a4ec66c/main.zig |
pub const Cairo = @import("gdk/Cairo.zig");
pub const Display = @import("gdk/Display.zig").Display;
pub const Pixbuf = @import("gdk/Pixbuf.zig").Pixbuf;
pub const Wayland = @import("gdk/Wayland.zig");
fn refAllDeclsRecursive(comptime T: type) void {
comptime {
for (@import("std").meta.declarations(T)) |decl| {
if (decl.is_pub) {
switch (decl.data) {
.Type => |T2| refAllDeclsRecursive(T2),
else => _ = decl,
}
}
}
}
}
test {
@setEvalBranchQuota(100000);
refAllDeclsRecursive(@This());
}
| https://raw.githubusercontent.com/davidmhewitt/zig-gtk/8130922d5437aeb296d1f4b928d7a76f04ca27be/gdk.zig |
// Copyright (C) 2021-2024 Chadwain Holness
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
pub const token = @import("source/token.zig");
pub const Tokenizer = @import("source/Tokenizer.zig");
pub const Dom = @import("source/Dom.zig");
pub const tree_construction = @import("source/tree_construction.zig");
pub const Parser = @import("source/Parser.zig");
pub const util = @import("source/util.zig");
comptime {
if (@import("builtin").is_test) {
@import("std").testing.refAllDecls(@This());
}
}
| https://raw.githubusercontent.com/chadwain/rem/68dcb476a9090c9bbe0044dd26914ee1266924ed/rem.zig |
const std = @import("std");
pub fn main() void {
var sum: i64 = 0;
var i: i64 = 1;
while (i <= 1000000000): (i += 1) {
sum += i;
}
std.debug.print("{any}\n", .{sum});
}
| https://raw.githubusercontent.com/clarkzjw/one-two-three...infinity/f944fe3d68923f8e2bf3e5ec75bfc53fe4e52618/Zig.zig |
pub fn puts(str: [*:0]const u8) c_int {
return asm volatile (
\\ li $9, 0x3f
\\ j 0xa0
: [ret] "={r2}" (-> c_int)
: [str] "{r4}" (str)
);
}
| https://raw.githubusercontent.com/XaviDCR92/psx-zig/d1e2090f46938fff210c3cf2d79b60b1f8e98d25/puts.zig |
const std = @import("std");
pub const json = @import("./json.zig");
const Map = json.DeserializeMap;
pub const glTF = struct {
extensionsUsed: [][]const u8 = &[_][]const u8{},
extensionsRequired: [][]const u8 = &[_][]const u8{},
accessors: []Accessor = &[_]Accessor{},
//animations: []Animation,
asset: Asset,
buffers: []Buffer = &[_]Buffer{},
bufferViews: []BufferView = &[_]BufferView{},
//cameras: []Camera,
images: []Image = &[_]Image{},
materials: []Material = &[_]Material{},
meshes: []Mesh = &[_]Mesh{},
nodes: []Node = &[_]Node{},
samplers: []Sampler = &[_]Sampler{},
scene: ?usize = null,
scenes: []Scene = &[_]Scene{},
//skins: []Skin,
textures: []Texture = &[_]Texture{},
extensions: ?Map(json.Value) = null,
extras: ?json.Value = null,
};
pub const BYTE = 5120;
pub const UNSIGNED_BYTE = 5121;
pub const SHORT = 5122;
pub const UNSIGNED_SHORT = 5123;
pub const UNSIGNED_INT = 5125;
pub const FLOAT = 5126;
/// A typed view into a bufferView. A bufferView contains raw binary data. An accessor provides a
/// typed view into a bufferView or a subset of a bufferView similar to how WebGL's
/// vertexAttribPointer() defines an attribute in a buffer.
pub const Accessor = struct {
/// The index of the bufferView. When not defined, accessor must be initialized with zeros;
/// sparse property or extensions could override zeros with actual values.
bufferView: ?usize = null,
/// The offset relative to the start of the bufferView in bytes. This must be a multiple of the
/// size of the component datatype.
byteOffset: usize = 0,
/// The datatype of components in the attribute. All valid values correspond to WebGL enums. The
/// corresponding typed arrays are Int8Array, Uint8Array, Int16Array, Uint16Array, Uint32Array,
/// and Float32Array, respectively. 5125 (UNSIGNED_INT) is only allowed when the accessor
/// contains indices, i.e., the accessor is only referenced by primitive.indices.
componentType: ComponentType,
/// Specifies whether integer data values should be normalized (true) to [0, 1] (for unsigned
/// types) or [-1, 1] (for signed types), or converted directly (false) when they are accessed.
/// This property is defined only for accessors that contain vertex attributes or animation
/// output data.
normalized: bool = false,
/// The number of attributes referenced by this accessor, not to be confused with the number of
/// bytes or number of components.
count: usize,
// TODO: maybe change this from enum to a string? Extensions may require it
/// Specifies if the attribute is a scalar, vector, or matrix.
@"type": enum {
SCALAR,
VEC2,
VEC3,
VEC4,
MAT2,
MAT3,
MAT4,
pub fn size(self: @This()) usize {
return switch (self) {
.SCALAR => 1,
.VEC2 => 2,
.VEC3 => 3,
.VEC4 => 4,
.MAT2 => 4,
.MAT3 => 9,
.MAT4 => 16,
};
}
},
/// Maximum value of each component in this attribute. Array elements must be treated as having
/// the same data type as accessor's `componentType`. Both min and max arrays have the same
/// length. The length is determined by the value of the type property; it can be 1, 2, 3, 4, 9,
/// or 16.
///
/// `normalized` property has no effect on array values: they always correspond to the actual
/// values stored in the buffer. When accessor is sparse, this property must contain max values
/// of accessor data with sparse substitution applied.
max: ?[]f64 = null,
/// Maximum value of each component in this attribute. Array elements must be treated as having
/// the same data type as accessor's `componentType`. Both min and max arrays have the same
/// length. The length is determined by the value of the type property; it can be 1, 2, 3, 4, 9,
/// or 16.
///
/// `normalized` property has no effect on array values: they always correspond to the actual
/// values stored in the buffer. When accessor is sparse, this property must contain max values
/// of accessor data with sparse substitution applied.
min: ?[]f64 = null,
/// Sparse storage of attributes that deviate from their initialization value.
sparse: ?Sparse = null,
/// The user-defined name of this object. This is not necessarily unique, e.g., an accessor and
/// a buffer could have the same name, or two accessors could even have the same name.
name: ?[]const u8 = null,
/// Dictionary object with extension-specific objects.
extensions: ?Map(json.Value) = null,
/// Application-specific data
extras: ?json.Value = null,
pub const ComponentType = enum(u32) {
Byte = BYTE,
UnsignedByte = UNSIGNED_BYTE,
Short = SHORT,
UnsignedShort = UNSIGNED_SHORT,
UnsignedInt = UNSIGNED_INT,
Float = FLOAT,
};
pub const Sparse = struct {
count: usize,
indices: struct {
bufferView: usize,
byteOffset: usize,
componentType: ComponentType,
},
values: struct {
bufferView: usize,
byteOffset: usize,
},
};
};
pub const Asset = struct {
copyright: ?[]const u8 = null,
generator: ?[]const u8 = null,
version: []const u8,
minVersion: ?[]const u8 = null,
extensions: ?Map(json.Value) = null,
extras: ?json.Value = null,
};
pub const Buffer = struct {
uri: ?[]const u8 = null,
byteLength: usize,
name: ?[]const u8 = null,
extensions: ?Map(json.Value) = null,
extras: ?json.Value = null,
};
pub const ARRAY_BUFFER = 34962;
pub const ELEMENT_ARRAY_BUFFER = 34963;
pub const BufferView = struct {
buffer: usize,
byteOffset: usize = 0,
byteLength: usize,
stride: ?usize = null,
target: ?enum(u32) {
ArrayBuffer = ARRAY_BUFFER,
ElementArrayBuffer = ELEMENT_ARRAY_BUFFER,
} = null,
name: ?[]const u8 = null,
extensions: ?Map(json.Value) = null,
extras: ?json.Value = null,
};
pub const Image = struct {
uri: ?[]const u8 = null,
mimeType: ?[]const u8 = null,
bufferView: ?usize = null,
name: ?[]const u8 = null,
extensions: ?Map(json.Value) = null,
extras: ?json.Value = null,
};
pub const Material = struct {
name: ?[]const u8 = null,
extensions: ?Map(json.Value) = null,
extras: ?json.Value = null,
pbrMetallicRoughness: PBR_MetallicRoughness = PBR_MetallicRoughness{},
normalTexture: ?NormalTexture = null,
occlusionTexture: ?OcclusionTexture = null,
emissiveTexture: ?EmissiveTexture = null,
emissiveFactor: [3]f64 = [_]f64{ 0, 0, 0 },
alphaMode: enum {
OPAQUE,
MASK,
BLEND,
} = .OPAQUE,
alphaCutoff: f64 = 0.5,
doubleSided: bool = false,
pub const PBR_MetallicRoughness = struct {
baseColorFactor: [4]f64 = [4]f64{ 1, 1, 1, 1 },
baseColorTexture: ?Map(json.Value) = null,
metallicFactor: f64 = 1,
roughnessFactor: f64 = 1,
metallicRoughnessTexture: ?Map(json.Value) = null,
extensions: ?Map(json.Value) = null,
extras: ?json.Value = null,
};
pub const NormalTexture = struct {
index: usize,
texCoord: usize = 0,
scale: f64 = 1,
extensions: ?Map(json.Value) = null,
extras: ?json.Value = null,
};
pub const OcclusionTexture = struct {
index: usize,
texCoord: usize = 0,
strength: f64 = 1,
extensions: ?Map(json.Value) = null,
extras: ?json.Value = null,
};
pub const EmissiveTexture = struct {
index: usize,
texCoord: usize = 0,
extensions: ?Map(json.Value) = null,
extras: ?json.Value = null,
};
};
const POINTS = 0;
const LINES = 1;
const LINE_LOOP = 2;
const LINE_STRIP = 3;
const TRIANGLES = 4;
const TRIANGLE_STRIP = 5;
const TRIANGLE_FAN = 6;
pub const Mesh = struct {
primitives: []Primitive,
weights: ?[]f64 = null,
name: ?[]const u8 = null,
extensions: ?Map(json.Value) = null,
extras: ?json.Value = null,
pub const Primitive = struct {
attributes: Map(usize),
indices: ?usize = null,
material: ?usize = null,
mode: enum {
Points = POINTS,
Lines = LINES,
LineLoop = LINE_LOOP,
LineStrip = LINE_STRIP,
Triangles = TRIANGLES,
TriangleStrip = TRIANGLE_STRIP,
TriangleFan = TRIANGLE_FAN,
} = .Triangles,
targets: ?[]Map(usize) = null,
extensions: ?Map(json.Value) = null,
extras: ?json.Value = null,
};
};
pub const Node = struct {
camera: ?usize = null,
children: []usize = &[_]usize{},
skin: ?usize = null,
matrix: ?[16]f64 = null,
mesh: ?usize = null,
rotation: ?[4]f64 = null,
scale: ?[3]f64 = null,
translation: ?[3]f64 = null,
weights: ?[]f64 = null,
name: ?[]const u8 = null,
extensions: ?Map(json.Value) = null,
extras: ?json.Value = null,
};
pub const NEAREST = 9728;
pub const LINEAR = 9729;
pub const NEAREST_MIPMAP_NEAREST = 9984;
pub const LINEAR_MIPMAP_NEAREST = 9985;
pub const NEAREST_MIPMAP_LINEAR = 9986;
pub const LINEAR_MIPMAP_LINEAR = 9987;
pub const CLAMP_TO_EDGE = 33071;
pub const MIRRORED_REPEAT = 33648;
pub const REPEAT = 10497;
pub const Sampler = struct {
magFilter: ?enum(u32) {
Nearest = NEAREST,
Linear = LINEAR,
} = null,
minFilter: ?enum(u32) {
Nearest = NEAREST,
Linear = LINEAR,
NearestMipmapNearest = NEAREST_MIPMAP_NEAREST,
LinearMipmapNearest = LINEAR_MIPMAP_NEAREST,
NearestMipmapLinear = NEAREST_MIPMAP_LINEAR,
LinearMipmapLinear = LINEAR_MIPMAP_LINEAR,
} = null,
wrapS: WrappingMode = .Repeat,
wrapT: WrappingMode = .Repeat,
name: ?[]const u8 = null,
extensions: ?Map(json.Value) = null,
extras: ?json.Value = null,
pub const WrappingMode = enum(u32) {
ClampToEdge = CLAMP_TO_EDGE,
MirroredRepeat = MIRRORED_REPEAT,
Repeat = REPEAT,
};
};
pub const Scene = struct {
nodes: []usize = &[_]usize{},
name: ?[]const u8 = null,
extensions: ?Map(json.Value) = null,
extras: ?json.Value = null,
};
pub const Texture = struct {
sampler: ?usize = null,
source: ?usize = null,
name: ?[]const u8 = null,
extensions: ?Map(json.Value) = null,
extras: ?json.Value = null,
};
| https://raw.githubusercontent.com/leroycep/gltf-zig/9df88e095a578fed8d456911171f3ec7110eeae4/gltf.zig |
const std = @import("std");
const data = @embedFile("./day2.data");
// const data =
// \\forward 5
// \\down 5
// \\forward 8
// \\up 3
// \\down 8
// \\forward 2
// ;
pub fn main() !void {
var it = std.mem.split(u8, data, "\n");
var x: u64 = 0;
var y: u64 = 0;
var aim: u64 = 0;
while(it.next()) |token| {
if (std.mem.indexOf(u8, token, "forward") != null) {
const n = try std.fmt.parseInt(u32, token[8..], 10);
// increases your horizontal position by n units
x += n;
// increases your depth by your aim multiplied by n
y += aim * n;
} else if(std.mem.indexOf(u8, token, "down") != null) {
const n = try std.fmt.parseInt(u32, token[5..], 10);
// down n increases your aim by n units
aim += n;
} else {
// up
const n = try std.fmt.parseInt(u32, token[3..], 10);
// up n decreases your aim by n units
aim -= n;
}
// std.debug.print("{s} | x = {d}, y = {d}, aim = {d} \n", .{token, x, y, aim});
}
std.debug.print("x = {d}, y = {d}, product = {d} \n", .{x, y, x * y});
} | https://raw.githubusercontent.com/IwanKaramazow/adventofcode21/0531d3835cd2fa4908829948d3800c5aee675bf4/day2.zig |
const std = @import("std");
pub fn is_alsa_playing() !bool {
var argv = [_][]const u8{ "sh", "-c", "grep RUNNING /proc/asound/card*/pcm*/sub*/status" };
var result = try std.ChildProcess.exec(.{ .allocator = std.heap.page_allocator, .argv = &argv });
// std.debug.print("stderr={s}\n", .{result.stderr});
// std.debug.print("stdout={s}\n", .{result.stdout});
return result.stdout.len != 0;
}
| https://raw.githubusercontent.com/Ryp/gpio-zig/fcca36f13cfbe0f223dcbe41b68ecb33eafd80c2/alsa.zig |
const std = @import("std");
const expect = std.testing.expect;
const test_allocator = std.testing.allocator;
test "stack" {
const string = "(()())";
var stack = std.ArrayList(usize).init(
test_allocator,
);
defer stack.deinit();
const Pair = struct { open: usize, close: usize };
var pairs = std.ArrayList(Pair).init(
test_allocator,
);
defer pairs.deinit();
for (string, 0..) |char, i| {
if (char == '(') try stack.append(i);
if (char == ')')
try pairs.append(.{
.open = stack.pop(),
.close = i,
});
}
for (pairs.items, 0..) |pair, i| {
try expect(std.meta.eql(pair, switch (i) {
0 => Pair{ .open = 1, .close = 2 },
1 => Pair{ .open = 3, .close = 4 },
2 => Pair{ .open = 0, .close = 5 },
else => unreachable,
}));
}
} | https://raw.githubusercontent.com/akalmannakarmi/zig-try/d3ed6d7e54ae8044ed760340d8139f353d3e0d3e/e4j.zig |
const std = @import("std");
pub const Format = enum {
unsigned8,
signed16_lsb,
signed24_lsb,
signed32_lsb,
pub fn getNumBytes(self: Format) u16 {
return switch (self) {
.unsigned8 => 1,
.signed16_lsb => 2,
.signed24_lsb => 3,
.signed32_lsb => 4,
};
}
};
pub const PreloadedInfo = struct {
num_channels: usize,
sample_rate: usize,
format: Format,
num_samples: usize,
pub fn getNumBytes(self: PreloadedInfo) usize {
return self.num_samples * self.num_channels * self.format.getNumBytes();
}
};
// verbose is comptime so we can avoid using std.debug.warn which doesn't
// exist on some targets (e.g. wasm)
pub fn Loader(comptime Reader: type, comptime verbose: bool) type {
return struct {
fn readIdentifier(reader: *Reader) ![4]u8 {
var quad: [4]u8 = undefined;
try reader.readNoEof(&quad);
return quad;
}
fn toIdentifier(reader: *Reader, id: [4]u8) !void {
while (true) {
const quad = try readIdentifier(reader);
if (std.mem.eql(u8, &quad, &id))
return;
const size = try reader.readIntLittle(u32);
try reader.skipBytes(size, .{});
}
}
fn preloadError(comptime message: []const u8) !PreloadedInfo {
if (verbose) {
std.debug.warn("{s}\n", .{message});
}
return error.WavLoadFailed;
}
pub fn preload(reader: *Reader) !PreloadedInfo {
// read RIFF chunk descriptor (12 bytes)
const chunk_id = try readIdentifier(reader);
if (!std.mem.eql(u8, &chunk_id, "RIFF")) {
return preloadError("missing \"RIFF\" header");
}
try reader.skipBytes(4, .{}); // ignore chunk_size
const format_id = try readIdentifier(reader);
if (!std.mem.eql(u8, &format_id, "WAVE")) {
return preloadError("missing \"WAVE\" identifier");
}
// read "fmt" sub-chunk
const subchunk1_id = try readIdentifier(reader);
if (!std.mem.eql(u8, &subchunk1_id, "fmt ")) {
return preloadError("missing \"fmt \" header");
}
const subchunk1_size = try reader.readIntLittle(u32);
if (subchunk1_size != 16) {
return preloadError("not PCM (subchunk1_size != 16)");
}
const audio_format = try reader.readIntLittle(u16);
if (audio_format != 1) {
return preloadError("not integer PCM (audio_format != 1)");
}
const num_channels = try reader.readIntLittle(u16);
const sample_rate = try reader.readIntLittle(u32);
const byte_rate = try reader.readIntLittle(u32);
const block_align = try reader.readIntLittle(u16);
const bits_per_sample = try reader.readIntLittle(u16);
if (num_channels < 1 or num_channels > 16) {
return preloadError("invalid number of channels");
}
if (sample_rate < 1 or sample_rate > 192000) {
return preloadError("invalid sample_rate");
}
const format: Format = switch (bits_per_sample) {
8 => .unsigned8,
16 => .signed16_lsb,
24 => .signed24_lsb,
32 => .signed32_lsb,
else => return preloadError("invalid number of bits per sample"),
};
const bytes_per_sample = format.getNumBytes();
if (byte_rate != sample_rate * num_channels * bytes_per_sample) {
return preloadError("invalid byte_rate");
}
if (block_align != num_channels * bytes_per_sample) {
return preloadError("invalid block_align");
}
// read "data" sub-chunk header
toIdentifier(reader, "data".*) catch |e| switch (e) {
error.EndOfStream => return preloadError("missing \"data\" header"),
else => return e,
};
const subchunk2_size = try reader.readIntLittle(u32);
if ((subchunk2_size % (num_channels * bytes_per_sample)) != 0) {
return preloadError("invalid subchunk2_size");
}
const num_samples = subchunk2_size / (num_channels * bytes_per_sample);
return PreloadedInfo{
.num_channels = num_channels,
.sample_rate = sample_rate,
.format = format,
.num_samples = num_samples,
};
}
pub fn load(
reader: *Reader,
preloaded: PreloadedInfo,
out_buffer: []u8,
) !void {
const num_bytes = preloaded.getNumBytes();
std.debug.assert(out_buffer.len >= num_bytes);
try reader.readNoEof(out_buffer[0..num_bytes]);
}
};
}
pub const SaveInfo = struct {
num_channels: usize,
sample_rate: usize,
format: Format,
};
pub fn Saver(comptime Writer: type) type {
const data_chunk_pos: u32 = 36; // location of "data" header
return struct {
fn writeHelper(writer: Writer, info: SaveInfo, maybe_data: ?[]const u8) !void {
const bytes_per_sample = info.format.getNumBytes();
const num_channels = try std.math.cast(u16, info.num_channels);
const sample_rate = try std.math.cast(u32, info.sample_rate);
const byte_rate = sample_rate * @as(u32, num_channels) * bytes_per_sample;
const block_align: u16 = num_channels * bytes_per_sample;
const bits_per_sample: u16 = bytes_per_sample * 8;
const data_len = if (maybe_data) |data| try std.math.cast(u32, data.len) else 0;
try writer.writeAll("RIFF");
if (maybe_data != null) {
try writer.writeIntLittle(u32, data_chunk_pos + 8 + data_len - 8);
} else {
try writer.writeIntLittle(u32, 0);
}
try writer.writeAll("WAVE");
try writer.writeAll("fmt ");
try writer.writeIntLittle(u32, 16); // PCM
try writer.writeIntLittle(u16, 1); // uncompressed
try writer.writeIntLittle(u16, num_channels);
try writer.writeIntLittle(u32, sample_rate);
try writer.writeIntLittle(u32, byte_rate);
try writer.writeIntLittle(u16, block_align);
try writer.writeIntLittle(u16, bits_per_sample);
try writer.writeAll("data");
if (maybe_data) |data| {
try writer.writeIntLittle(u32, data_len);
try writer.writeAll(data);
} else {
try writer.writeIntLittle(u32, 0);
}
}
// write wav header with placeholder values for length. use this when
// you are going to stream to the wav file and won't know the length
// till you are done.
pub fn writeHeader(writer: Writer, info: SaveInfo) !void {
try writeHelper(writer, info, null);
}
// after streaming, call this to seek back and patch the wav header
// with length values.
pub fn patchHeader(writer: Writer, seeker: anytype, data_len: usize) !void {
const data_len_u32 = try std.math.cast(u32, data_len);
try seeker.seekTo(4);
try writer.writeIntLittle(u32, data_chunk_pos + 8 + data_len_u32 - 8);
try seeker.seekTo(data_chunk_pos + 4);
try writer.writeIntLittle(u32, data_len_u32);
}
// save a prepared wav (header and data) in one shot.
pub fn save(writer: Writer, data: []const u8, info: SaveInfo) !void {
try writeHelper(writer, info, data);
}
};
}
test "basic coverage (loading)" {
const null_wav = [_]u8{
0x52, 0x49, 0x46, 0x46, 0x7C, 0x00, 0x00, 0x00, 0x57, 0x41, 0x56,
0x45, 0x66, 0x6D, 0x74, 0x20, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00,
0x01, 0x00, 0x44, 0xAC, 0x00, 0x00, 0x88, 0x58, 0x01, 0x00, 0x02,
0x00, 0x10, 0x00, 0x64, 0x61, 0x74, 0x61, 0x58, 0x00, 0x00, 0x00,
0x00, 0x00, 0xFF, 0xFF, 0x02, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0x00,
0x00, 0xFF, 0xFF, 0x02, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0xFE, 0xFF, 0x01, 0x00, 0x01,
0x00, 0xFE, 0xFF, 0x03, 0x00, 0xFD, 0xFF, 0x02, 0x00, 0xFF, 0xFF,
0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0xFF, 0xFF, 0x01, 0x00, 0xFE,
0xFF, 0x02, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0x01, 0x00, 0xFF, 0xFF,
0x00, 0x00, 0x01, 0x00, 0xFE, 0xFF, 0x02, 0x00, 0xFF, 0xFF, 0x00,
0x00, 0x00, 0x00, 0xFF, 0xFF, 0x03, 0x00, 0xFC, 0xFF, 0x03, 0x00,
};
var reader = std.io.fixedBufferStream(&null_wav).reader();
const MyLoader = Loader(@TypeOf(reader), true);
const preloaded = try MyLoader.preload(&reader);
std.testing.expectEqual(@as(usize, 1), preloaded.num_channels);
std.testing.expectEqual(@as(usize, 44100), preloaded.sample_rate);
std.testing.expectEqual(@as(Format, .signed16_lsb), preloaded.format);
std.testing.expectEqual(@as(usize, 44), preloaded.num_samples);
var buffer: [88]u8 = undefined;
try MyLoader.load(&reader, preloaded, &buffer);
}
test "basic coverage (saving)" {
var buffer: [1000]u8 = undefined;
var writer = std.io.fixedBufferStream(&buffer).writer();
try Saver(@TypeOf(writer)).save(writer, &[_]u8{ 0, 0, 0, 0, 0, 0, 0, 0 }, .{
.num_channels = 1,
.sample_rate = 44100,
.format = .signed16_lsb,
});
std.testing.expectEqualSlices(u8, "RIFF", buffer[0..4]);
}
test "basic coverage (streaming out)" {
var buffer: [1000]u8 = undefined;
var fbs = std.io.fixedBufferStream(&buffer);
const MySaver = Saver(@TypeOf(fbs).Writer);
try MySaver.writeHeader(fbs.writer(), .{
.num_channels = 1,
.sample_rate = 44100,
.format = .signed16_lsb,
});
std.testing.expectEqual(@as(u64, 44), try fbs.getPos());
std.testing.expectEqual(@as(u32, 0), std.mem.readIntLittle(u32, buffer[4..8]));
std.testing.expectEqual(@as(u32, 0), std.mem.readIntLittle(u32, buffer[40..44]));
const data = &[_]u8{ 0, 0, 0, 0, 0, 0, 0, 0 };
try fbs.writer().writeAll(data);
std.testing.expectEqual(@as(u64, 52), try fbs.getPos());
try MySaver.patchHeader(fbs.writer(), fbs.seekableStream(), data.len);
std.testing.expectEqual(@as(u32, 44), std.mem.readIntLittle(u32, buffer[4..8]));
std.testing.expectEqual(@as(u32, 8), std.mem.readIntLittle(u32, buffer[40..44]));
}
| https://raw.githubusercontent.com/marler8997/audio-deinterlacer/9a30af1f0d8ca50155b8168773bf56763b084d44/wav.zig |
// This is free and unencumbered software released into the public domain.
pub usingnamespace @import("src/angle.zig");
pub usingnamespace @import("src/latitude.zig");
pub usingnamespace @import("src/longitude.zig");
test "Dogma" { // zig test dogma.zig
const meta = @import("std").meta;
meta.refAllDecls(@This());
}
| https://raw.githubusercontent.com/dogmatists/dogma.zig/15c0227896fb5b37b06cd4e3386d28ca58f9a3e3/dogma.zig |
const std = @import("std");
pub fn build(b: *std.Build) void {
const target = b.standardTargetOptions(.{});
const optimize = b.standardOptimizeOption(.{});
const tomlZigName = "toml-zig";
const dep_opts = .{ .target = target, .optimize = optimize };
const tomlZig = b.dependency(tomlZigName, dep_opts).module(tomlZigName);
// const lib = b.addStaticLibrary(.{
// .name = "tinytask",
// .root_source_file = .{ .path = "src/root.zig" },
// .target = target,
// .optimize = optimize,
// });
// lib.root_module.addImport(tomlZigName, tomlZig);
// b.installArtifact(lib);
const exe = b.addExecutable(.{
.name = "tinytask",
.root_source_file = .{ .path = "src/main.zig" },
.target = target,
.optimize = optimize,
});
exe.root_module.addImport(tomlZigName, tomlZig);
b.installArtifact(exe);
const run_cmd = b.addRunArtifact(exe);
run_cmd.step.dependOn(b.getInstallStep());
// zig build run -- arg1 arg2 etc`
if (b.args) |args| {
run_cmd.addArgs(args);
}
const run_step = b.step("run", "Run the app");
run_step.dependOn(&run_cmd.step);
// const lib_unit_tests = b.addTest(.{
// .root_source_file = .{ .path = "src/root.zig" },
// .target = target,
// .optimize = optimize,
// });
// const run_lib_unit_tests = b.addRunArtifact(lib_unit_tests);
const exe_unit_tests = b.addTest(.{
.root_source_file = .{ .path = "src/main.zig" },
.target = target,
.optimize = optimize,
});
const run_exe_unit_tests = b.addRunArtifact(exe_unit_tests);
const test_step = b.step("test", "Run unit tests");
// test_step.dependOn(&run_lib_unit_tests.step);
test_step.dependOn(&run_exe_unit_tests.step);
}
| https://raw.githubusercontent.com/sirenkovladd/tinytask/3374a7fde03125e1ae07e6db6d7fcc9e43fd5083/build.zig |
const std = @import("std");
const cli = @import("cli");
const dawn = @import("dawn");
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const allocator = gpa.allocator();
var port_receive_option = cli.Option{
.long_name = "port_receive",
.help = "port to receive osc messages on (optional).",
.value = cli.OptionValue{ .int = 2502 },
};
var port_send_option = cli.Option{
.long_name = "port_send",
.help = "port to send osc messages to (optional).",
.value = cli.OptionValue{ .int = 2501 },
};
var app = &cli.App{
.name = "dawn",
.version = dawn.version,
.description =
\\dawn is a modular synth.
,
.options = &.{ &port_send_option, &port_receive_option },
.action = run_dawn,
.help_config = cli.HelpConfig{
.color_usage = cli.ColorUsage.never,
},
};
pub fn log(
comptime level: std.log.Level,
comptime scope: @Type(.EnumLiteral),
comptime format: []const u8,
args: anytype,
) void {
const stdout = std.io.getStdOut().writer();
const prefix = "[" ++ comptime level.asText() ++ "] " ++ "(" ++ @tagName(scope) ++ ") ";
nosuspend stdout.print(prefix ++ format ++ "\n", args) catch return;
}
pub const std_options = struct {
pub const log_level = .info;
pub const logFn = log;
};
var patch: *dawn.Patch = undefined;
fn run_dawn(_: []const []const u8) !void {
var port_send = port_send_option.value.int orelse unreachable;
var port_receive = port_receive_option.value.int orelse unreachable;
const logger = std.log.scoped(.dawn);
logger.info("start of session", .{});
defer logger.info("end of session", .{});
patch = try dawn.Patch.create(allocator);
patch.port_receive = @intCast(port_receive);
patch.port_send = @intCast(port_send);
logger.info("initialized patch", .{});
defer patch.destroy(allocator);
logger.info("adding opensoundcontrol module to patch", .{});
try patch.add_module("opensoundcontrol", "opensoundcontrol");
logger.info("adding soundio module to patch", .{});
try patch.add_module("soundio", "soundio");
while (true) {
std.time.sleep(std.time.ns_per_s); // TODO: reconsider
}
}
pub fn main() !void {
return cli.run(app, allocator);
}
| https://raw.githubusercontent.com/dawnsynth/dawn/c9062931757d1df06766026b0bdac5a572ece30b/dawn.zig |
//! https://adventofcode.com/2023/day/4
const std = @import("std");
const A = std.BoundedArray(u32, 32);
fn parseWins(line: []const u8, ns: *A) !usize {
ns.len = 0;
var win_count: usize = 0;
var it = std.mem.splitScalar(u8, line, ':');
_ = it.next().?;
const x = it.next().?;
var it2 = std.mem.splitScalar(u8, x, '|');
const ws = it2.next().?;
const nums = it2.next().?;
var winiter = std.mem.tokenizeAny(u8, ws, ", ");
while (winiter.next()) |win|
try ns.append(try std.fmt.parseInt(u32, win, 10));
var numiter = std.mem.tokenizeAny(u8, nums, ", ");
while (numiter.next()) |num| {
const n = try std.fmt.parseInt(u32, num, 10);
if (std.mem.indexOfScalar(u32, ns.constSlice(), n)) |_|
win_count += 1;
}
return win_count;
}
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const alloc = arena.allocator();
const args = try std.process.argsAlloc(alloc);
const file = try std.fs.cwd().openFile(args[1], .{});
defer file.close();
const input = try file.readToEndAlloc(alloc, std.math.maxInt(u32));
var part1: usize = 0;
var part2: usize = 0;
// init copies for part 2
var lines = std.mem.tokenizeScalar(u8, input, '\n');
var lines_count: usize = 0;
while (lines.next()) |_| lines_count += 1;
lines.reset();
var copies = try std.ArrayList(u32).initCapacity(alloc, lines_count);
copies.expandToCapacity();
@memset(copies.items, 1);
var ns = try A.init(0);
var cardid: usize = 0;
while (lines.next()) |line| : (cardid += 1) {
const wins = try parseWins(line, &ns);
part1 += if (wins > 0)
@as(usize, 1) << @as(u6, @intCast(wins)) - 1
else
wins;
for (0..wins) |win_idx|
copies.items[win_idx + cardid + 1] += copies.items[cardid];
part2 += copies.items[cardid];
}
std.debug.print("part1 {} part2 {}\n", .{ part1, part2 });
// std.debug.assert(part1 == 21088);
// std.debug.assert(part2 == 6874754);
}
| https://raw.githubusercontent.com/travisstaloch/advent-of-code-2023/e9b7cc6003191bd45bf55277567531e9e5fe9a4a/04.zig |
const root = @import("root");
pub const c = if (@hasDecl(root, "loadable_extension"))
@import("c/loadable_extension.zig")
else
@cImport({
@cInclude("sqlite3.h");
});
// versionGreaterThanOrEqualTo returns true if the SQLite version is >= to the major.minor.patch provided.
pub fn versionGreaterThanOrEqualTo(major: u8, minor: u8, patch: u8) bool {
return c.SQLITE_VERSION_NUMBER >= @as(u32, major) * 1000000 + @as(u32, minor) * 1000 + @as(u32, patch);
}
comptime {
if (!versionGreaterThanOrEqualTo(3, 21, 0)) {
@compileError("must use SQLite >= 3.21.0");
}
}
| https://raw.githubusercontent.com/malcolmstill/clerk/06d23ad09ece6aaf74127a316d9512ff23cb883a/lib/zig-sqlite/c.zig |
const std = @import("std.zig");
const avr = @import("atmega328p.zig");
const led_pin: u8 = 5;
const loop_ms = 0x0a52;
const one_second = 63974;
fn bit(comptime b: u3) comptime u8 {
return (1 << b);
}
fn flipLed() void {
avr.portb.* ^= bit(led_pin);
}
// Timer interrupt
// When this uses callconv(.Interrupt) llvm emits an extra sei
// instruction. The callconv(.Signal) avoids that.
export fn __vector_13() callconv(.Signal) void {
flipLed();
avr.tcnt1.* = one_second;
}
export fn main() noreturn {
avr.ddrb.* = bit(led_pin);
avr.portb.* = bit(led_pin);
avr.tcnt1.* = one_second;
avr.tccr1a.* = 0;
avr.tccr1b.* = bit(0) | bit(2); // clock select: clkio/1024
avr.timsk1.* = bit(0); // Interrupt on overflow enable
avr.sei();
while (true) {}
}
| https://raw.githubusercontent.com/ryanplusplus/avr-zig/7e240340a63d358665e04875d9cde7906d63e061/main.zig |
const std = @import("std");
pub fn insertionSort(comptime T: type, slice: []T) void {
for (1..slice.len) |i| {
var base = slice[i];
var j = i;
while (j >= 1 and slice[j - 1] > base) : (j -= 1) {
slice[j] = slice[j - 1];
}
slice[j] = base;
}
}
fn partition(comptime T: type, slice: []T, left: usize, right: usize) usize {
var med = blk: {
var mid = @divFloor(left + right, 2);
if ((slice[left] < slice[mid]) != (slice[left] < slice[right])) {
break :blk left;
} else if ((slice[mid] < slice[left]) != (slice[mid] < slice[right])) {
break :blk mid;
}
break :blk right;
};
std.mem.swap(T, &slice[left], &slice[med]);
var i = left;
var j = right;
while (i < j) {
while (i < j and slice[j] >= slice[left]) j -= 1;
while (i < j and slice[i] <= slice[left]) i += 1;
std.mem.swap(T, &slice[i], &slice[j]);
}
std.mem.swap(T, &slice[i], &slice[left]);
return i;
}
pub fn quickSort(comptime T: type, slice: []T) void {
if (slice.len == 0) return;
var left: usize = 0;
var right = slice.len - 1;
while (left < right) {
var pivot = partition(T, slice, left, right);
if (pivot - left < right - pivot) {
quickSort(T, slice[left..pivot]);
left = pivot + 1;
} else {
quickSort(T, slice[pivot + 1 .. right + 1]);
right = pivot - 1;
}
}
}
pub fn mergeSort(comptime T: type, slice: []T) !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const allocator = gpa.allocator();
try mergePart(T, allocator, slice);
}
fn mergePart(comptime T: type, gpa: std.mem.Allocator, slice: []T) !void {
if (slice.len <= 1) return;
var mid = @divFloor(slice.len, 2);
try mergePart(T, gpa, slice[0..mid]);
try mergePart(T, gpa, slice[mid..]);
try merge(T, gpa, slice, mid);
}
fn merge(comptime T: type, gpa: std.mem.Allocator, slice: []T, mid: usize) !void {
var tmp = try gpa.alloc(T, slice.len);
defer gpa.free(tmp);
@memcpy(tmp, slice);
var i: usize = 0;
var j = mid;
var k: usize = 0;
while (k < slice.len) : (k += 1) {
if (i > mid - 1) {
slice[k] = tmp[j];
j += 1;
} else if (j >= slice.len or tmp[i] <= tmp[j]) {
slice[k] = tmp[i];
i += 1;
} else {
slice[k] = tmp[j];
j += 1;
}
}
}
pub fn ListMergeSort(comptime T: type) type {
return struct {
const L = std.SinglyLinkedList(T);
fn innerMerge(_a: ?*L.Node, _b: ?*L.Node) ?*L.Node {
var head = L.Node{ .data = 0 };
var c: ?*L.Node = &head;
var a = _a;
var b = _b;
while (a != null and b != null) {
if (a.?.data < b.?.data) {
c.?.next = a;
c = a;
a = a.?.next;
} else {
c.?.next = b;
c = b;
b = b.?.next;
}
}
c.?.next = if (a == null) b else a;
return head.next;
}
pub fn sort(list: *std.SinglyLinkedList(T)) void {
list.first = innerSort(list.first);
}
fn innerSort(first: ?*L.Node) ?*L.Node {
if (first == null or first.?.next == null) return first;
var c = first;
var a = c;
var b = c.?.next;
while (b != null and b.?.next != null) {
c = c.?.next;
b = b.?.next.?.next;
}
b = c.?.next;
c.?.next = null;
return innerMerge(innerSort(a), innerSort(b));
}
};
}
pub fn countingSort(slice: []u32) !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const allocator = gpa.allocator();
var max: usize = 0;
for (slice) |el| {
if (el > max) max = @intCast(el);
}
var counter = try allocator.alloc(usize, max + 1);
defer allocator.free(counter);
@memset(counter, 0);
for (slice) |el| {
counter[@intCast(el)] += 1;
}
for (0..max) |i| {
counter[i + 1] += counter[i];
}
var tmp = try allocator.dupe(u32, slice);
defer allocator.free(tmp);
var pos = tmp.len;
while (pos > 0) : (pos -= 1) {
var num = tmp[pos - 1];
counter[num] -= 1;
slice[counter[num]] = num;
}
}
inline fn digit(el: u32, exp: u32) usize {
return @intCast(@rem(@divFloor(el, exp), 10));
}
fn countingSortDigit(gpa: std.mem.Allocator, slice: []u32, exp: u32) !void {
var counter = [_]u32{0} ** 10;
for (slice) |el| {
counter[digit(el, exp)] += 1;
}
for (1..10) |i| {
counter[i] += counter[i - 1];
}
var tmp = try gpa.dupe(u32, slice);
defer gpa.free(tmp);
var pos = tmp.len;
while (pos > 0) : (pos -= 1) {
var num = tmp[pos - 1];
var d = digit(num, exp);
counter[d] -= 1;
slice[counter[d]] = num;
}
}
pub fn radixSort(slice: []u32) !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const allocator = gpa.allocator();
var max: usize = 0;
for (slice) |el| {
if (el > max) max = @intCast(el);
}
var exp: u32 = 1;
while (exp <= max) : (exp *= 10) {
try countingSortDigit(allocator, slice, exp);
}
}
pub fn main() !void {
const printSlice = @import("print_util.zig").printSlice;
var arr = [_]u32{ 9, 2, 4, 1, 12, 0, 3, 14, 5, 8, 6, 7, 10, 15, 13, 11 };
quickSort(u32, &arr);
printSlice(u32, &arr);
var arr2 = [_]u32{ 9, 2, 4, 1, 12, 0, 3, 14, 5, 8, 6, 7, 10, 15, 13, 11 };
insertionSort(u32, &arr2);
printSlice(u32, &arr2);
var arr3 = [_]u32{ 9, 2, 4, 1, 12, 0, 3, 14, 5, 8, 6, 7, 10, 15, 13, 11 };
mergeSort(u32, &arr3) catch unreachable;
printSlice(u32, &arr3);
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const allocator = gpa.allocator();
const Link = std.SinglyLinkedList(u32);
var link = Link{};
var arr4 = [_]u32{ 9, 2, 4, 1, 12, 0, 3, 14, 5, 8, 6, 7, 10, 15, 13, 11 };
for (arr4) |val| {
var node = try allocator.create(Link.Node);
node.* = .{ .data = val };
link.prepend(node);
}
ListMergeSort(u32).sort(&link);
var it = link.first;
while (it) |ptr| : (it = it.?.next) {
std.debug.print("{d}{s}", .{ ptr.data, if (ptr.next != null) ", " else "\n" });
}
var arr5 = [_]u32{ 1, 0, 1, 2, 0, 4, 0, 2, 2, 4 };
try countingSort(&arr5);
printSlice(u32, &arr5);
var arr6 = [_]u32{ 10546151, 35663510, 42865989, 34862445, 81883077, 88906420, 72429244, 30524779, 82060337, 63832996 };
try radixSort(&arr6);
printSlice(u32, &arr6);
}
| https://raw.githubusercontent.com/rsphing/algo.zig/66b4ddfcc48f9cd006e5f726145f2408b55a7848/sort.zig |
const std = @import("std");
const stdx = @import("./stdx.zig");
const MAX = 64;
const Stack = std.BoundedArray(u8, MAX);
const Stacks = [MAX]Stack;
const Move = struct {
from: u32,
to: u32,
count: u32,
fn parse(line: []u8) !Move {
var rest = line;
_ = stdx.cut(&rest, "move ") orelse return error.BadInput;
const count_str = stdx.cut(&rest, " ") orelse return error.BadInput;
_ = stdx.cut(&rest, "from ") orelse return error.BadInput;
const from_str = stdx.cut(&rest, " ") orelse return error.BadInput;
_ = stdx.cut(&rest, "to ") orelse return error.BadInput;
const to_str = rest;
return Move{
.from = (try std.fmt.parseInt(u32, from_str, 10)) - 1,
.to = (try std.fmt.parseInt(u32, to_str, 10)) - 1,
.count = try std.fmt.parseInt(u32, count_str, 10),
};
}
};
const Moves = std.ArrayList(Move);
const Pos = struct {
stack: usize,
offset: usize,
};
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
var lines = stdx.StdinLines.new();
const stdout = std.io.getStdOut().writer();
var stacks: Stacks = undefined;
std.mem.set(Stack, &stacks, Stack.init(0) catch unreachable);
var moves = Moves.init(allocator);
while (true) {
const line = try lines.next();
if (std.mem.indexOf(u8, line, "[") == null) break;
var i: usize = 0;
while (i < line.len) : (i += 4) {
const item = line[i + 1];
if (item != ' ') stacks[i / 4].addOneAssumeCapacity().* = item;
}
}
const stack_count = blk: {
var count: usize = 0;
var it = std.mem.tokenize(u8, lines.curr, " ");
while (it.next() != null) count += 1;
break :blk count;
};
{
const line = try lines.next();
if (line.len != 0) return error.BadInput;
}
while (try lines.next_opt()) |line| {
const move = try Move.parse(line);
(try moves.addOne()).* = move;
}
var states: [MAX]Pos = undefined;
for (states) |*s, i| s.* = Pos{ .stack = i, .offset = 0 };
{
var i: usize = moves.items.len;
while (i > 0) {
i -= 1;
const move = moves.items[i];
std.debug.assert(move.from != move.to);
for (states) |*s| {
if (s.stack == move.from) {
s.offset += move.count;
}
if (s.stack == move.to) {
if (s.offset < move.count) {
s.stack = move.from;
// s.offset = move.count - s.offset - 1;
} else {
s.offset -= move.count;
}
}
}
}
}
var result = std.BoundedArray(u8, MAX).init(stack_count) catch unreachable;
for (result.slice()) |*slot, i| {
const pos = states[i];
const stack = stacks[pos.stack].slice();
slot.* = stack[pos.offset];
}
try stdout.print("{s}\n", .{result.slice()});
}
| https://raw.githubusercontent.com/matklad/aoc2022/67800cfb0aa3dcb0103da06c0b6f2103f1494dc7/day5.zig |
const std = @import("std");
const testing = std.testing;
pub const SpinLock = struct {
pub const Held = struct {
self: *SpinLock,
pub fn release(held: Held) void {
@atomicStore(bool, &held.self.locked, false, .Release);
}
};
locked: bool = false,
pub fn acquire(self: *SpinLock) Held {
while (@atomicRmw(bool, &self.locked, .Xchg, true, .Acquire)) {
std.Thread.spinLoopHint();
}
return Held{ .self = self };
}
};
test {
testing.refAllDecls(@This());
}
test "sync/spin_lock: acquire and release" {
var lock: SpinLock = .{};
const held = lock.acquire();
defer held.release();
}
| https://raw.githubusercontent.com/lithdew/hyperia/c1d166f81b6f011d9a23ef818b620e68eee3f49a/sync.zig |
// --- Day 5: Sunny with a Chance of Asteroids ---
//
// You're starting to sweat as the ship makes its way toward Mercury. The Elves suggest that you
// get the air conditioner working by upgrading your ship computer to support the Thermal
// Environment Supervision Terminal.
//
// The Thermal Environment Supervision Terminal (TEST) starts by running a diagnostic program
// (your puzzle input). The TEST diagnostic program will run on your existing Intcode computer
// after a few modifications:
//
// First, you'll need to add two new instructions:
//
// Opcode 3 takes a single integer as input and saves it to the position given by its only
// parameter. For example, the instruction 3,50 would take an input value and store it at
// address 50.
// Opcode 4 outputs the value of its only parameter. For example, the instruction 4,50 would
// output the value at address 50.
//
// Programs that use these instructions will come with documentation that explains what should be
// connected to the input and output. The program 3,0,4,0,99 outputs whatever it gets as input,
// then halts.
//
// Second, you'll need to add support for parameter modes:
//
// Each parameter of an instruction is handled based on its parameter mode. Right now, your ship
// computer already understands parameter mode 0, position mode, which causes the parameter to be
// interpreted as a position - if the parameter is 50, its value is the value stored at address
// 50 in memory. Until now, all parameters have been in position mode.
//
// Now, your ship computer will also need to handle parameters in mode 1, immediate mode. In
// immediate mode, a parameter is interpreted as a value - if the parameter is 50, its value is
// simply 50.
//
// Parameter modes are stored in the same value as the instruction's opcode. The opcode is a
// two-digit number based only on the ones and tens digit of the value, that is, the opcode is the
// rightmost two digits of the first value in an instruction. Parameter modes are single digits,
// one per parameter, read right-to-left from the opcode: the first parameter's mode is in the
// hundreds digit, the second parameter's mode is in the thousands digit, the third parameter's
// mode is in the ten-thousands digit, and so on. Any missing modes are 0.
//
// For example, consider the program 1002,4,3,4,33.
//
// The first instruction, 1002,4,3,4, is a multiply instruction - the rightmost two digits of the
// first value, 02, indicate opcode 2, multiplication. Then, going right to left, the parameter
// modes are 0 (hundreds digit), 1 (thousands digit), and 0 (ten-thousands digit, not present and
// therefore zero):
//
// ABCDE
// 1002
//
// DE - two-digit opcode, 02 == opcode 2
// C - mode of 1st parameter, 0 == position mode
// B - mode of 2nd parameter, 1 == immediate mode
// A - mode of 3rd parameter, 0 == position mode,
// omitted due to being a leading zero
//
// This instruction multiplies its first two parameters. The first parameter, 4 in position mode,
// works like it did before - its value is the value stored at address 4 (33). The second parameter,
// 3 in immediate mode, simply has value 3. The result of this operation, 33 * 3 = 99, is written
// according to the third parameter, 4 in position mode, which also works like it did before - 99
// is written to address 4.
//
// Parameters that an instruction writes to will never be in immediate mode.
//
// Finally, some notes:
//
// It is important to remember that the instruction pointer should increase by the number of
// values in the instruction after the instruction finishes. Because of the new instructions,
// this amount is no longer always 4.
// Integers can be negative: 1101,100,-1,4,0 is a valid program (find 100 + -1, store the
// result in position 4).
//
// The TEST diagnostic program will start by requesting from the user the ID of the system to test
// by running an input instruction - provide it 1, the ID for the ship's air conditioner unit.
//
// It will then perform a series of diagnostic tests confirming that various parts of the Intcode
// computer, like parameter modes, function correctly. For each test, it will run an output
// instruction indicating how far the result of the test was from the expected value, where 0 means
// the test was successful. Non-zero outputs mean that a function is not working correctly; check
// the instructions that were run before the output instruction to see which one failed.
//
// Finally, the program will output a diagnostic code and immediately halt. This final output isn't
// an error; an output followed immediately by a halt means the program finished. If all outputs
// were zero except the diagnostic code, the diagnostic program ran successfully.
//
// After providing 1 to the only input instruction and passing all the tests, what diagnostic code
// does the program produce?
const std = @import("std");
const id = 1;
fn U(n: isize) usize {
return @intCast(usize, n);
}
fn P(mem: []const isize, m: isize, ip: usize) isize {
return switch (m) {
0 => mem[U(mem[ip])],
1 => mem[ip],
else => unreachable,
};
}
pub fn main() void {
var ip: usize = 0;
var mem = input;
program: while (true) {
const op = @mod(mem[ip], 100);
const m1 = @mod(@divFloor(mem[ip], 100), 10);
const m2 = @mod(@divFloor(mem[ip], 1000), 10);
const m3 = @mod(@divFloor(mem[ip], 10000), 10);
switch (op) {
1 => {
const p1 = P(mem[0..], m1, ip + 1);
const p2 = P(mem[0..], m2, ip + 2);
mem[U(mem[ip + 3])] = p1 + p2;
ip += 4;
},
2 => {
const p1 = P(mem[0..], m1, ip + 1);
const p2 = P(mem[0..], m2, ip + 2);
mem[U(mem[ip + 3])] = p1 * p2;
ip += 4;
},
3 => {
mem[U(mem[ip + 1])] = id;
ip += 2;
},
4 => {
const p1 = P(mem[0..], m1, ip + 1);
std.debug.warn("{}", p1);
ip += 2;
},
99 => {
break :program;
},
else => {
unreachable;
},
}
}
}
const input = [_]isize{
3, 225, 1, 225, 6, 6, 1100, 1, 238, 225, 104, 0, 1102, 91,
92, 225, 1102, 85, 13, 225, 1, 47, 17, 224, 101, -176, 224, 224,
4, 224, 1002, 223, 8, 223, 1001, 224, 7, 224, 1, 223, 224, 223,
1102, 79, 43, 225, 1102, 91, 79, 225, 1101, 94, 61, 225, 1002, 99,
42, 224, 1001, 224, -1890, 224, 4, 224, 1002, 223, 8, 223, 1001, 224,
6, 224, 1, 224, 223, 223, 102, 77, 52, 224, 1001, 224, -4697, 224,
4, 224, 102, 8, 223, 223, 1001, 224, 7, 224, 1, 224, 223, 223,
1101, 45, 47, 225, 1001, 43, 93, 224, 1001, 224, -172, 224, 4, 224,
102, 8, 223, 223, 1001, 224, 1, 224, 1, 224, 223, 223, 1102, 53,
88, 225, 1101, 64, 75, 225, 2, 14, 129, 224, 101, -5888, 224, 224,
4, 224, 102, 8, 223, 223, 101, 6, 224, 224, 1, 223, 224, 223,
101, 60, 126, 224, 101, -148, 224, 224, 4, 224, 1002, 223, 8, 223,
1001, 224, 2, 224, 1, 224, 223, 223, 1102, 82, 56, 224, 1001, 224,
-4592, 224, 4, 224, 1002, 223, 8, 223, 101, 4, 224, 224, 1, 224,
223, 223, 1101, 22, 82, 224, 1001, 224, -104, 224, 4, 224, 1002, 223,
8, 223, 101, 4, 224, 224, 1, 223, 224, 223, 4, 223, 99, 0,
0, 0, 677, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1105, 0, 99999, 1105, 227, 247, 1105, 1, 99999, 1005, 227, 99999, 1005, 0,
256, 1105, 1, 99999, 1106, 227, 99999, 1106, 0, 265, 1105, 1, 99999, 1006,
0, 99999, 1006, 227, 274, 1105, 1, 99999, 1105, 1, 280, 1105, 1, 99999,
1, 225, 225, 225, 1101, 294, 0, 0, 105, 1, 0, 1105, 1, 99999,
1106, 0, 300, 1105, 1, 99999, 1, 225, 225, 225, 1101, 314, 0, 0,
106, 0, 0, 1105, 1, 99999, 8, 226, 677, 224, 102, 2, 223, 223,
1005, 224, 329, 1001, 223, 1, 223, 1007, 226, 226, 224, 1002, 223, 2,
223, 1006, 224, 344, 101, 1, 223, 223, 108, 226, 226, 224, 1002, 223,
2, 223, 1006, 224, 359, 1001, 223, 1, 223, 107, 226, 677, 224, 102,
2, 223, 223, 1006, 224, 374, 101, 1, 223, 223, 8, 677, 677, 224,
102, 2, 223, 223, 1006, 224, 389, 1001, 223, 1, 223, 1008, 226, 677,
224, 1002, 223, 2, 223, 1006, 224, 404, 101, 1, 223, 223, 7, 677,
677, 224, 1002, 223, 2, 223, 1005, 224, 419, 101, 1, 223, 223, 1108,
226, 677, 224, 1002, 223, 2, 223, 1005, 224, 434, 101, 1, 223, 223,
1108, 226, 226, 224, 102, 2, 223, 223, 1005, 224, 449, 1001, 223, 1,
223, 107, 226, 226, 224, 102, 2, 223, 223, 1005, 224, 464, 101, 1,
223, 223, 1007, 677, 677, 224, 102, 2, 223, 223, 1006, 224, 479, 101,
1, 223, 223, 1007, 226, 677, 224, 102, 2, 223, 223, 1005, 224, 494,
1001, 223, 1, 223, 1008, 226, 226, 224, 1002, 223, 2, 223, 1005, 224,
509, 1001, 223, 1, 223, 1108, 677, 226, 224, 1002, 223, 2, 223, 1006,
224, 524, 1001, 223, 1, 223, 108, 677, 677, 224, 1002, 223, 2, 223,
1005, 224, 539, 101, 1, 223, 223, 108, 226, 677, 224, 1002, 223, 2,
223, 1005, 224, 554, 101, 1, 223, 223, 1008, 677, 677, 224, 1002, 223,
2, 223, 1006, 224, 569, 1001, 223, 1, 223, 1107, 677, 677, 224, 102,
2, 223, 223, 1005, 224, 584, 1001, 223, 1, 223, 7, 677, 226, 224,
102, 2, 223, 223, 1005, 224, 599, 1001, 223, 1, 223, 8, 677, 226,
224, 1002, 223, 2, 223, 1005, 224, 614, 1001, 223, 1, 223, 7, 226,
677, 224, 1002, 223, 2, 223, 1006, 224, 629, 101, 1, 223, 223, 1107,
677, 226, 224, 1002, 223, 2, 223, 1005, 224, 644, 1001, 223, 1, 223,
1107, 226, 677, 224, 102, 2, 223, 223, 1006, 224, 659, 1001, 223, 1,
223, 107, 677, 677, 224, 1002, 223, 2, 223, 1005, 224, 674, 101, 1,
223, 223, 4, 223, 99, 226,
};
| https://raw.githubusercontent.com/tiehuis/advent-of-code-2019/07f48c42d0870a7030d21c08626fcc40a447e695/5_1.zig |
const std = @import("std");
const memory_size = 1 << 16;
const program_start = 0x3000;
const Reg = enum(u16) { R0 = 0, R1, R2, R3, R4, R5, R6, R7, PC, COND, COUNT };
const Op = enum(u16) { BR = 0, ADD, LD, ST, JSR, AND, LDR, STR, RTI, NOT, LDI, STI, JMP, RES, LEA, TRAP };
const Flags = enum(u16) {
POS = 1 << 0,
ZRO = 1 << 1,
NEG = 1 << 2,
};
const VM = struct {
memory: [memory_size]u16 = undefined,
reg: [@intFromEnum(Reg.COUNT)]u16 = undefined,
run: bool = false,
pub fn init() VM {
var vm = VM{};
vm.memory = [_]u16{0} ** memory_size;
vm.reg = [_]u16{0} ** @intFromEnum(Reg.COUNT);
vm.reg[@intFromEnum(Reg.PC)] = program_start;
vm.reg[@intFromEnum(Reg.COND)] = @intFromEnum(Flags.ZRO);
return vm;
}
pub fn start(self: *VM) void {
self.run = true;
// while (self.run) {
const program_counter = self.reg[@intFromEnum(Reg.PC)];
const instruction = self.memory[program_counter];
const op: Op = @enumFromInt(instruction >> 12);
switch (op) {
.ADD => self.op_add(instruction),
else => {},
}
self.reg[@intFromEnum(Reg.PC)] = self.reg[@intFromEnum(Reg.PC)] + 1;
// }
}
pub fn op_add(self: *VM, program_counter: u16) void {
_ = self;
_ = program_counter;
std.log.info("ADD", .{});
}
};
pub fn main() void {
var vm = VM.init();
vm.memory[program_start] = 0x1240;
vm.memory[program_start + 1] = 0;
std.log.info("PC: {}", .{vm.reg[@intFromEnum(Reg.PC)]});
vm.start();
std.log.info("PC: {}", .{vm.reg[@intFromEnum(Reg.PC)]});
vm.start();
}
| https://raw.githubusercontent.com/unbalancedparentheses/lc3-vm.zig/9de025f029e546251d0611cb750f094c33706ce7/lc3.zig |
const std = @import("std");
const Card = struct {
nr: i9,
winning_nrs: [10]i8,
nrs: [25]i8,
};
fn parseNrStr(comptime T: type, buf: []const u8) !T {
var str = std.mem.trim(u8, buf, " ");
var it = std.mem.split(u8, str, " ");
var nrs: T = undefined;
var i: usize = 0;
while (it.next()) |nr_str| {
if (std.mem.eql(u8, nr_str, "")) continue;
nrs[i] = try std.fmt.parseInt(i8, nr_str, 10);
i += 1;
}
return nrs;
}
pub fn main() !void {
var input = try std.fs.cwd().openFile("input.txt", .{});
defer input.close();
var in_reader = std.io.bufferedReader(input.reader());
var in_stream = in_reader.reader();
var buf: [1024]u8 = undefined;
var total_points: usize = 0;
while (try in_stream.readUntilDelimiterOrEof(&buf, '\n')) |line| {
var it = std.mem.split(u8, line, ":");
var prefix = it.next().?;
var suffix = it.next().?;
prefix = std.mem.trimLeft(u8, prefix, "Card ");
var nr = try std.fmt.parseInt(i9, prefix, 10);
it = std.mem.split(u8, suffix, "|");
var winning_nrs = try parseNrStr([10]i8, it.next().?);
var nrs = try parseNrStr([25]i8, it.next().?);
var card = Card{
.nr = nr,
.winning_nrs = winning_nrs,
.nrs = nrs,
};
var found_nrs: usize = 0;
for (card.nrs) |card_nr| {
var is_found = false;
for (card.winning_nrs) |winning_nr| {
if (card_nr == winning_nr) is_found = true;
}
if (is_found) found_nrs += 1;
}
var points: usize = 0;
if (found_nrs > 0) {
points = std.math.pow(usize, 2, found_nrs - 1);
}
total_points += points;
}
std.debug.print("{}\n", .{total_points});
}
| https://raw.githubusercontent.com/froehlichA/aoc2023/2335f02043a267aed2d0e35c4fc6871b08a6dac5/04/1.zig |
const std = @import("std");
const data = @embedFile("input.txt");
fn readFirstNumber(
line: []const u8,
start_index: *usize,
end_index_exclusive: *usize,
) !?u32 {
var i = end_index_exclusive.*;
while (line.len > i and !std.ascii.isDigit(line[i])) {
i += 1;
}
if (line.len <= i) {
end_index_exclusive.* = i;
return null;
}
var j = i;
while (line.len > i and std.ascii.isDigit(line[j])) {
j += 1;
}
start_index.* = i;
end_index_exclusive.* = j;
return try std.fmt.parseInt(u32, line[i..j], 10);
}
fn handleAround(around: [3]?[]const u8, sum: *u32) !void {
if (around[1]) |center| {
var i: usize = 0;
var j: usize = 0;
while (try readFirstNumber(center, &i, &j)) |num| {
const lower_bound: usize =
@intCast(@max(0, @as(isize, @intCast(i)) - 1));
const upper_bound = @min(center.len - 1, j + 1);
var success = false;
for (around) |single_arround| {
if (single_arround) |single_arround_not_null| {
for (lower_bound..upper_bound) |index| {
const c = single_arround_not_null[index];
if (!std.ascii.isDigit(c) and c != '.') {
success = true;
break;
}
}
}
if (success) {
break;
}
}
if (success) {
sum.* += num;
}
}
}
}
pub fn main() !void {
var lines = std.mem.tokenizeScalar(u8, data, '\n');
var sum: u64 = 0;
while (lines.next()) |line| {
var colon = false;
var pipe = false;
var num_start: ?usize = null;
const allocator = std.heap.page_allocator;
var winning_list = std.ArrayList(u8).init(allocator);
defer winning_list.deinit();
var winning_count: usize = 0;
for (line, 0..) |c, idx| {
if (!colon) {
if (c == ':') {
colon = true;
}
continue;
}
if (c == ' ') {
if (num_start) |num_start_nn| {
const num = try std.fmt.parseInt(
u8,
line[num_start_nn..idx],
10,
);
num_start = null;
if (!pipe) {
try winning_list.append(num);
} else {
for (winning_list.items) |winning| {
if (num == winning) {
winning_count += 1;
break;
}
}
}
}
continue;
}
if (c == '|') {
pipe = true;
continue;
}
if (std.ascii.isDigit(c) and num_start == null) {
num_start = idx;
}
}
if (num_start) |num_start_nn| {
const num = try std.fmt.parseInt(
u8,
line[num_start_nn .. line.len - 1],
10,
);
for (winning_list.items) |winning| {
if (num == winning) {
winning_count += 1;
break;
}
}
}
if (winning_count > 0) {
sum += std.math.pow(usize, 2, winning_count - 1);
}
}
std.debug.print("{}\n", .{sum});
}
| https://raw.githubusercontent.com/kkard2/aoc2023/6ec8e32bd8b1bc411c37748d1c2659a3b6ffec3d/04/a.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const VEC_SIZE_F32 = std.simd.suggestVectorSize(f32) orelse 4;
// XXX: Because of the limitation of build system in zig v0.11, we cannot
// switch between `tracy_full` and `tracy_stub` by passing compilation flags.
// So we have to do this kind of "conditional import". See also section
// "conditional compilation" in "docs/ISSUES.md".
const use_tracy = @import("build_options").use_tracy;
const ztracy = if (use_tracy) @import("ztracy");
const tracy_wrapper_stub = struct {
pub inline fn startZone(
_: std.builtin.SourceLocation,
_: [*:0]const u8,
_: u64,
) void {}
pub inline fn endZone(_: *const anyopaque) void {}
};
const tracy_wrapper_full = struct {
pub inline fn startZone(
src_loc: std.builtin.SourceLocation,
name: [*:0]const u8,
color: u64,
) ztracy.ZoneCtx {
const zone = if (use_tracy) ztracy.ZoneNC(src_loc, name, color);
return zone;
}
pub inline fn endZone(zone: *const anyopaque) void {
if (use_tracy) @as(*ztracy.ZoneCtx, @constCast(@alignCast(@ptrCast(zone)))).End();
}
};
const TracyWrapper = if (use_tracy) tracy_wrapper_full else tracy_wrapper_stub;
// Helper function for development
fn printStruct(s: anytype) void {
inline for (std.meta.fields(@TypeOf(s))) |f| {
std.debug.print(f.name ++ ": {any}\n", .{@as(f.type, @field(s, f.name))});
}
}
// For model exported by `legacy_export()` (v0)
// NOTE: We should use `extern struct` as it supports guaranteed layout.
// Otherwise, `std.io.Reader.readStruct()` would fail.
pub const Config = extern struct {
dim: i32, // transformer dimension (model.params.dim)
hidden_dim: i32,
n_layers: i32,
n_heads: i32,
n_kv_heads: i32,
vocab_size: i32,
seq_len: i32,
};
pub const TransformerWeights = struct {
token_embedding_table: [*]f32, // (vocab_size, dim)
rms_att_weight: [*]f32, // (layer, dim)
rms_ffn_weight: [*]f32, // (layer, dim)
// weights for matmuls. note dim == n_heads * head_size
wq: [*]f32, // (layer, dim, n_heads * head_size)
wk: [*]f32, // (layer, dim, n_kv_heads * head_size)
wv: [*]f32, // (layer, dim, n_kv_heads * head_size)
wo: [*]f32, // (layer, n_heads * head_size, dim)
// weights for ffn
w1: [*]f32, // (layer, hidden_dim, dim)
w2: [*]f32, // (layer, dim, hidden_dim)
w3: [*]f32, // (layer, hidden_dim, dim)
// final rmsnorm
rms_final_weight: [*]f32, // (dim,)
// (optional) classifier weights for the logits, on the last layer
wcls: [*]f32,
// NOTE: Here we follow the way to mmap weights in `llama2.c/runq.c` by
// taking `*anyopaque` without presuming all weights are f32.
pub fn init(p: *Config, weights_ptr: *anyopaque, shared_weights: bool) TransformerWeights {
var w: TransformerWeights = undefined;
// NOTE: cast i32 to usize to avoid overflow for 13B+ models.
const dim: usize = @intCast(p.dim);
const hidden_dim: usize = @intCast(p.hidden_dim);
const n_layers: usize = @intCast(p.n_layers);
const n_heads: usize = @intCast(p.n_heads);
const n_kv_heads: usize = @intCast(p.n_kv_heads);
const vocab_size: usize = @intCast(p.vocab_size);
const seq_len: usize = @intCast(p.seq_len);
const head_size: usize = dim / n_heads;
var ptr: [*]f32 = @alignCast(@ptrCast(weights_ptr));
w.token_embedding_table = ptr;
ptr += vocab_size * dim;
w.rms_att_weight = ptr;
ptr += n_layers * dim;
w.wq = ptr;
ptr += n_layers * dim * (n_heads * head_size);
w.wk = ptr;
ptr += n_layers * dim * (n_kv_heads * head_size);
w.wv = ptr;
ptr += n_layers * dim * (n_kv_heads * head_size);
w.wo = ptr;
ptr += n_layers * (n_heads * head_size) * dim;
w.rms_ffn_weight = ptr;
ptr += n_layers * dim;
w.w1 = ptr;
ptr += n_layers * dim * hidden_dim;
w.w2 = ptr;
ptr += n_layers * hidden_dim * dim;
w.w3 = ptr;
ptr += n_layers * dim * hidden_dim;
w.rms_final_weight = ptr;
ptr += dim;
ptr += seq_len * head_size / 2; // skip what used to be freq_cis_real (for RoPE)
ptr += seq_len * head_size / 2; // skip what used to be freq_cis_imag (for RoPE)
w.wcls = if (shared_weights) w.token_embedding_table else ptr;
return w;
}
};
const RunState = struct {
x: []f32, // activation at current time stamp (dim,)
xb: []f32,
xb2: []f32,
hb: []f32, // buffer for hidden dimension in the ffn (hidden_dim,)
hb2: []f32,
q: []f32, // query (dim,)
// NOTE: we don't need to allocate memory for k, v as we can point them to
// kv caches.
// https://github.com/karpathy/llama2.c/blob/b3c4b6c/run.c#L255-L257
// https://github.com/karpathy/llama2.c/pull/400
k: []f32 = undefined, // key (dim,)
v: []f32 = undefined, // value (dim,)
att: []f32, // buffer for scores/attention values (n_heads, seq_len)
logits: []f32, // output logits, distribution of vocabulary (vocab_size)
key_cache: []f32, // (layer, seq_len, dim)
value_cache: []f32, // (layer, seq_len, dim)
pub fn init(p: *const Config, allocator: Allocator) !RunState {
const dim: usize = @intCast(p.dim);
const hidden_dim: usize = @intCast(p.hidden_dim);
const n_layers: usize = @intCast(p.n_layers);
const n_heads: usize = @intCast(p.n_heads);
const n_kv_heads: usize = @intCast(p.n_kv_heads);
const vocab_size: usize = @intCast(p.vocab_size);
const seq_len: usize = @intCast(p.seq_len);
const kv_dim: usize = (dim * n_kv_heads) / n_heads;
// TODO: consider alignment for SIMD?
// https://github.com/cgbur/llama2.zig/blob/main/src/main.zig#L140C32-L152
return RunState{
.x = try allocator.alloc(f32, dim),
.xb = try allocator.alloc(f32, dim),
.xb2 = try allocator.alloc(f32, dim),
.hb = try allocator.alloc(f32, hidden_dim),
.hb2 = try allocator.alloc(f32, hidden_dim),
.q = try allocator.alloc(f32, dim),
.key_cache = try allocator.alloc(f32, n_layers * seq_len * kv_dim),
.value_cache = try allocator.alloc(f32, n_layers * seq_len * kv_dim),
.att = try allocator.alloc(f32, n_heads * seq_len),
.logits = try allocator.alloc(f32, vocab_size),
};
}
pub fn deinit(self: RunState, allocator: Allocator) void {
allocator.free(self.x);
allocator.free(self.xb);
allocator.free(self.xb2);
allocator.free(self.hb);
allocator.free(self.hb2);
allocator.free(self.q);
allocator.free(self.key_cache);
allocator.free(self.value_cache);
allocator.free(self.att);
allocator.free(self.logits);
}
};
// ----------------------------------------------------------------------
pub const Transformer = struct {
config: Config = undefined,
weights: TransformerWeights = undefined,
state: RunState = undefined,
// XXX: In llama2.c, `fd` was kept to be closed manually while program is
// about to exit, but we can actually close it right after mmap is done.
fd: std.fs.File = undefined,
data: *anyopaque = undefined,
file_size: u64 = undefined,
pub fn forward(self: *Transformer, token: u32, pos: u32) []f32 {
const p = self.config;
const w = self.weights;
var s = self.state;
var x = s.x;
const dim: usize = @intCast(p.dim);
const hidden_dim: usize = @intCast(p.hidden_dim);
const n_layers: usize = @intCast(p.n_layers);
const n_heads: usize = @intCast(p.n_heads);
const n_kv_heads: usize = @intCast(p.n_kv_heads);
const vocab_size: usize = @intCast(p.vocab_size);
const seq_len: usize = @intCast(p.seq_len);
const kv_dim: usize = (dim * n_kv_heads) / n_heads;
const kv_mul: usize = n_heads / n_kv_heads; // integer multiplier of the kv sharing in multiquery
const head_size: usize = dim / n_heads;
const content_row = w.token_embedding_table[(dim * token)..(dim * (token + 1))];
@memcpy(x, content_row);
// forward all the layers
for (0..n_layers) |l| {
// attention rmsnorm
rmsnorm(s.xb, x, w.rms_att_weight[l * dim .. (l + 1) * dim]);
// key and value point to the kv cache
const loff = l * seq_len * kv_dim;
s.k = s.key_cache[(loff + pos * kv_dim)..(loff + (pos + 1) * kv_dim)];
s.v = s.value_cache[(loff + pos * kv_dim)..(loff + (pos + 1) * kv_dim)];
// op: `xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)`
// src: Attention.forward()
matmul(s.q, s.xb, w.wq[l * dim * dim .. (l + 1) * dim * dim], dim, dim);
matmul(s.k, s.xb, w.wk[l * dim * kv_dim .. (l + 1) * dim * kv_dim], dim, kv_dim);
matmul(s.v, s.xb, w.wv[l * dim * kv_dim .. (l + 1) * dim * kv_dim], dim, kv_dim);
// RoPE relative positional encoding
var j: usize = 0;
while (j < dim) : (j += 2) {
const head_dim: f32 = @floatFromInt(j % head_size);
const freq: f32 = 1.0 / std.math.pow(f32, 10000.0, head_dim / @as(f32, @floatFromInt(head_size)));
const val: f32 = @as(f32, @floatFromInt(pos)) * freq;
const fcr = std.math.cos(val);
const fci = std.math.sin(val);
const rotn: usize = if (j < kv_dim) 2 else 1; // how many vectors? 2 = q & k, 1 = q only
for (0..rotn) |v| {
const vec = if (v == 0) s.q else s.k;
const v0 = vec[j];
const v1 = vec[j + 1];
vec[j] = v0 * fcr - v1 * fci;
vec[j + 1] = v0 * fci + v1 * fcr;
}
}
// multihead attention. iterate over all heads
for (0..n_heads) |h| {
// get the query vector for this head
const q = s.q[h * head_size .. (h + 1) * head_size];
// attention scores for this head
const att = s.att[h * seq_len .. (h + 1) * seq_len];
// iterate over all timesteps, including the current one
for (0..pos + 1) |t| {
const il: usize = loff + t * kv_dim + (h / kv_mul) * head_size;
const ir = il + head_size;
const k = s.key_cache[il..ir];
var score: f32 = 0.0;
for (0..head_size) |i| {
score += q[i] * k[i];
}
score /= std.math.sqrt(@as(f32, @floatFromInt(head_size)));
att[t] = score;
}
// softmax the scores to get attention weights, from 0..pos inclusively
// NOTE: in `Attention.forward()::model.py`, this works with a mask of
// upper triangular matrix filling with -inf.
softmax(att[0 .. pos + 1]);
// weighted sum of the values, store back into xb
var xb = s.xb[h * head_size .. (h + 1) * head_size];
@memset(xb, 0.0);
for (0..pos + 1) |t| {
const il: usize = loff + t * kv_dim + (h / kv_mul) * head_size;
const ir = il + head_size;
const v = s.value_cache[il..ir];
const a = att[t];
for (0..head_size) |i| {
xb[i] += a * v[i];
}
}
}
// final matmul to get the output of the attention
// op: `output = self.wo(output)`
// src: Attention.forward()
matmul(s.xb2, s.xb, w.wo[l * dim * dim .. (l + 1) * dim * dim], dim, dim);
// residual connection back into x
// op: `h = x + self.attention.forward(...)`
// src: TransformerBlock.forward()
for (0..dim) |i| {
x[i] += s.xb2[i];
}
// ffn rmsnorm
rmsnorm(s.xb, x, w.rms_ffn_weight[l * dim .. (l + 1) * dim]);
// Now for FFN in PyTorch we have: self.w2(F.silu(self.w1(x)) * self.w3(x))
matmul(s.hb, s.xb, w.w1[l * dim * hidden_dim .. (l + 1) * dim * hidden_dim], dim, hidden_dim);
matmul(s.hb2, s.xb, w.w3[l * dim * hidden_dim .. (l + 1) * dim * hidden_dim], dim, hidden_dim);
// SwiGLU non-linearity
for (0..hidden_dim) |i| {
var val: f32 = s.hb[i];
// silu(x)=x*σ(x), where σ(x) is the logistic sigmoid
val *= (1.0 / (1.0 + std.math.exp(-val)));
// elementwise multiply with w3(x)
val *= s.hb2[i];
s.hb[i] = val;
}
// final matmul to get the output of the ffn
matmul(s.xb, s.hb, w.w2[l * dim * hidden_dim .. (l + 1) * dim * hidden_dim], hidden_dim, dim);
// residual connection
for (0..dim) |i| {
x[i] += s.xb[i];
}
}
// final rmsnorm
rmsnorm(x, x, w.rms_final_weight[0..dim]);
// classifier into logits
matmul(s.logits, x, w.wcls[0 .. dim * vocab_size], dim, vocab_size);
return s.logits;
}
};
pub fn rmsnorm(o: []f32, x: []f32, weight: []f32) void {
assert(o.len == x.len);
assert(o.len == weight.len);
const size = o.len;
var ss: f32 = 0.0;
// calculate sum of sqaures
for (0..size) |j| {
ss += x[j] * x[j];
}
ss /= @as(f32, @floatFromInt(size));
ss += 1e-5;
ss = 1.0 / std.math.sqrt(ss);
// normalize and scale
for (0..size) |j| {
o[j] = weight[j] * (ss * x[j]);
}
}
pub fn softmax(x: []f32) void {
const size = x.len;
// find max value (for numerical stability)
var max_val = x[0];
for (1..size) |i| {
if (x[i] > max_val) {
max_val = x[i];
}
}
// exp and sum
var sum: f32 = 0.0;
for (0..size) |i| {
x[i] = std.math.exp(x[i] - max_val);
sum += x[i];
}
// normalize
for (0..size) |i| {
x[i] /= sum;
}
}
/// Matrix multiplication: W (d,n) @ x (n,) -> xout (d,)
pub fn matmul(xout: []f32, x: []f32, w: []f32, n: usize, d: usize) void {
const zone = TracyWrapper.startZone(@src(), "matmul", 0x00_00_ff_00);
defer TracyWrapper.endZone(&zone);
// matmul_naive(xout, x, w, n, d);
matmul_simd(xout, x, w, n, d);
}
fn matmul_naive(xout: []f32, x: []f32, w: []f32, n: usize, d: usize) void {
for (0..d) |i| {
var val: f32 = 0.0;
for (0..n) |j| {
val += w[i * n + j] * x[j];
}
xout[i] = val;
}
}
fn matmul_simd(xout: []f32, x: []f32, w: []f32, n: usize, d: usize) void {
const vec_sz = VEC_SIZE_F32;
const n_vec: usize = n / vec_sz;
const n_rem: usize = n % vec_sz;
for (0..d) |i| {
var val: f32 = 0.0;
const offset: usize = i * n;
var vsum: @Vector(vec_sz, f32) = @splat(0.0);
for (0..n_vec) |nv| {
// NOTE: SIMD vector requires a known size at compile time, so we
// need to access slice like this.
const vx: @Vector(vec_sz, f32) = x[nv * vec_sz ..][0..vec_sz].*;
const vw: @Vector(vec_sz, f32) = w[offset + nv * vec_sz ..][0..vec_sz].*;
vsum += vx * vw;
}
val = @reduce(.Add, vsum);
// Process remaining elements
const offset2: usize = vec_sz * n_vec;
for (0..n_rem) |j| {
val += w[offset + offset2 + j] * x[offset2 + j];
}
xout[i] = val;
}
}
/// Read checkpoint and initialize transformer. Note that user is responsible to
/// call `freeTransformer()` to delete the memory mapping.
pub fn readCheckpoint(
checkpoint: []const u8,
transformer: *Transformer,
use_mmap: bool,
allocator: Allocator,
) !void {
const file = try std.fs.cwd().openFile(checkpoint, .{ .mode = .read_only });
// NOTE: we can close file after `mmap()` call has returned
defer file.close();
var config: *Config = &transformer.config;
config.* = try file.reader().readStruct(Config);
// XXX: (llama2.c) negative vocab size -> unshared weights
const shared_weights: bool = config.vocab_size > 0;
config.vocab_size = try std.math.absInt(config.vocab_size);
transformer.file_size = (try file.stat()).size;
// Reposition to the head of file. Offset of `Config` will be handled later.
try file.seekTo(0);
var data: []align(std.mem.page_size) u8 = undefined;
if (use_mmap) {
data = try std.os.mmap(
null,
transformer.file_size,
std.os.PROT.READ,
std.os.MAP.PRIVATE,
file.handle,
0,
);
} else {
data = blk: {
const buffer = try allocator.alignedAlloc(u8, std.mem.page_size, transformer.file_size);
const read_len = try file.readAll(buffer);
if (read_len != transformer.file_size) {
std.debug.print("error: failed to read checkpoint file\n", .{});
return std.os.ReadError.OperationAborted;
}
break :blk buffer;
};
}
transformer.data = @ptrCast(data);
// View `data` as `void*` from C perspective (`*anyopaque` in zig)
var weights_ptr: *anyopaque = @ptrCast(data);
// View `weights_ptr` in byte (u8), and offset it with the size of `Config`.
// So that we don't need to assume all fields in `Config` are the same type.
weights_ptr = @as([*]u8, @ptrCast(weights_ptr)) + @sizeOf(Config);
transformer.weights = TransformerWeights.init(config, weights_ptr, shared_weights);
}
fn buildTransformer(
transformer: *Transformer,
checkpoint_path: []const u8,
use_mmap: bool,
allocator: Allocator,
) !void {
try readCheckpoint(checkpoint_path, transformer, use_mmap, allocator);
transformer.state = try RunState.init(&transformer.config, allocator);
}
fn freeTransformer(transformer: *Transformer, use_mmap: bool, allocator: Allocator) void {
// Cast pointer of mmap data from `*anyopaque` to the original output type
// `[]align(std.mem.page_size) u8`.
const data = @as(
[*]align(std.mem.page_size) u8,
@alignCast(@ptrCast(transformer.data)),
)[0..transformer.file_size];
if (use_mmap) {
// Delete memory mapping
std.os.munmap(data);
} else {
allocator.free(data);
}
transformer.state.deinit(allocator);
}
// ----------------------------------------------------------------------
pub const TokenIndex = struct {
str: []const u8,
id: u32,
/// Comparator. True: a < b.
pub fn desc(_: void, a: TokenIndex, b: TokenIndex) bool {
return strcmp(a.str, b.str) < 0;
}
};
pub const Tokenizer = struct {
vocab: [][]u8 = undefined,
vocab_scores: []f32 = undefined,
sorted_vocab: ?[]TokenIndex = null,
vocab_size: i32 = undefined,
max_token_length: u32 = undefined,
byte_pieces: [256]u8 = undefined, // stores all single-byte strings
pub fn init(tokenizer_path: []const u8, vocab_size: i32, allocator: Allocator) !Tokenizer {
var t = Tokenizer{};
// NOTE: vocab_size might be written into tokenizer file in the future,
// then we could change this accordingly.
t.vocab_size = vocab_size;
const n_vocab: usize = @intCast(vocab_size);
t.vocab = try allocator.alloc([]u8, n_vocab);
t.vocab_scores = try allocator.alloc(f32, n_vocab);
// NOTE: every element in `byte_pieces` will be used as a slice with
// length 1, so that we don't need to append a null terminator to it.
for (0..256) |i| {
t.byte_pieces[i] = @intCast(i);
}
const file = try std.fs.cwd().openFile(tokenizer_path, .{ .mode = .read_only });
defer file.close();
var buf_x32: [4]u8 = undefined;
var buffered_file = std.io.bufferedReader(file.reader());
// number of bytes read
var nb_read = try buffered_file.read(&buf_x32);
if (nb_read != 4) {
std.debug.print("failed read\n", .{});
return std.fs.File.ReadError.Unexpected;
}
t.max_token_length = std.mem.readIntSliceLittle(u32, &buf_x32);
// read tokens, lengths of tokens, and scores
var len: i32 = undefined;
for (0..n_vocab) |i| {
// score
nb_read = try buffered_file.read(&buf_x32);
if (nb_read != 4) {
std.debug.print("failed read\n", .{});
return std.fs.File.ReadError.Unexpected;
}
t.vocab_scores[i] = @bitCast(buf_x32);
// length of token
nb_read = try buffered_file.read(&buf_x32);
if (nb_read != 4) {
std.debug.print("failed read\n", .{});
return std.fs.File.ReadError.Unexpected;
}
len = @bitCast(buf_x32);
// token
// NOTE: here we make use of zig's slice since it contains length
// information of a sequence, so we don't need to append a sentinel
// ('\x00') to the end of a string. However, if we do need it, we
// can call `allocator.allocSentinel()` to allocate a buffer which
// ends with a sentinel while the sentinel char is not counted into
// `buffer.len` (this is useful for reading data in zig style since
// the number of bytes to read is determined by length of the buffer).
t.vocab[i] = try allocator.alloc(u8, @intCast(len));
nb_read = try buffered_file.read(t.vocab[i]);
if (nb_read != len) {
std.debug.print("failed read\n", .{});
return std.fs.File.ReadError.Unexpected;
}
}
return t;
}
pub fn deinit(self: Tokenizer, allocator: Allocator) void {
for (0..self.vocab.len) |i| {
allocator.free(self.vocab[i]);
}
allocator.free(self.vocab);
allocator.free(self.vocab_scores);
if (self.sorted_vocab != null) {
allocator.free(self.sorted_vocab.?);
}
}
pub fn strLookup(self: Tokenizer, str: []const u8) ?u32 {
const tok = TokenIndex{ .str = str, .id = undefined };
// NOTE: `bsearch` in C returns a pointer, this returns an index.
const res = std.sort.binarySearch(TokenIndex, tok, self.sorted_vocab.?, {}, compareToken);
const idx = res orelse return null;
const tok_id = self.sorted_vocab.?[idx].id;
return tok_id;
}
pub fn encode(
self: *Tokenizer,
text: []const u8,
bos: bool,
eos: bool,
tokens: []u32,
allocator: Allocator,
) !u32 {
// XXX: we need to update member in Tokenizer here, that's why the first
// parameter of this function should be a pointer. (not sure what's the
// conventional way to do this)
if (self.sorted_vocab == null) {
// lazily initialize the vocabulary
const n_vocab: usize = @intCast(self.vocab_size);
self.sorted_vocab = try allocator.alloc(TokenIndex, n_vocab);
for (0..n_vocab) |i| {
self.sorted_vocab.?[i] = TokenIndex{
.str = self.vocab[i],
.id = @intCast(i),
};
}
// sort vocab
std.sort.pdq(TokenIndex, self.sorted_vocab.?, {}, TokenIndex.desc);
}
// (llama2.c) Temporary buffer to store merge candidates of always two
// consecutive tokens. *2 for concat, +1 for null terminator, +2 for
// UTF8 (in case max_token_length is 1).
var str_buffer = try allocator.alloc(u8, self.max_token_length * 2 + 1 + 2);
defer allocator.free(str_buffer);
var str_len: usize = 0;
var n_tokens: u32 = 0; // retval
if (bos) {
tokens[n_tokens] = 1;
n_tokens += 1;
}
// add dummy prefix
// TODO: need to read through source code of sentencepice to figure out
// how it work properly.
if (text.len != 0) {
const dummy_prefix = self.strLookup(" ").?;
tokens[n_tokens] = dummy_prefix;
n_tokens += 1;
}
// process the raw (UTF-8) byte sequence of the input string
for (0..text.len) |i| {
const c = text[i];
// Check whether the highest 2 bits are 10 (0b10xxxxxx)
// mask: 0xC0 (0b11000000)
if ((c & 0xC0) != 0x80) {
str_len = 0;
}
str_buffer[str_len] = c;
str_len += 1;
// NOTE: we don't need to set the last byte to null everytime here,
// check out the comment related to `strLookup` below.
// str_buffer[str_len] = '\x00';
// NOTE: we will peek the next byte in text, so we need to make
// sure the index won't exceed the length of it. (in llama2.c, this
// loop checks with null terminator, so it doesn't need to do so)
if ((i + 1) < text.len and (text[i + 1] & 0xC0) == 0x80 and str_len < 4) {
continue;
}
// NOTE: (IMPORTANT!) since our implementation of `strcmp` checks
// with length of string instead of the null terminator, we need to
// pass a `slice` instead of the whole buffer to search.
const lookup_result = self.strLookup(str_buffer[0..str_len]);
if (lookup_result != null) {
tokens[n_tokens] = lookup_result.?;
n_tokens += 1;
} else {
// fallback: encode each byte literally
for (0..str_len) |j| {
// +3: offset for the first 3 vocabs (<unk>, <s>, </s>)
tokens[n_tokens] = str_buffer[j] + 3;
n_tokens += 1;
}
}
str_len = 0;
}
while (true) {
var best_score: f32 = -std.math.inf(f32);
var best_id: ?u32 = null;
var best_idx: ?usize = null;
for (0..(n_tokens - 1)) |i| {
const token1 = self.vocab[tokens[i]];
const token2 = self.vocab[tokens[i + 1]];
_ = try std.fmt.bufPrint(str_buffer, "{s}{s}", .{ token1, token2 });
var len = token1.len + token2.len;
const lookup_result = self.strLookup(str_buffer[0..len]);
if (lookup_result != null and self.vocab_scores[lookup_result.?] > best_score) {
const id = lookup_result.?;
best_score = self.vocab_scores[id];
best_id = id;
best_idx = i;
}
}
if (best_idx == null) {
break; // cannot find any more pairs to merge, so quit this loop
}
// merge the consecutive pair (best_idx, best_idx+1) into new token best_id
tokens[best_idx.?] = best_id.?;
// delete token at position best_idx+1, shift the entire sequence back 1
for ((best_idx.? + 1)..(n_tokens - 1)) |i| {
tokens[i] = tokens[i + 1];
}
n_tokens -= 1;
}
if (eos) {
tokens[n_tokens] = 2;
n_tokens += 1;
}
return n_tokens;
}
// XXX: if `self` is not specified as a pointer here, the returned value
// would be gibberish.
pub fn decode(self: *Tokenizer, prev_token: u32, token: u32) []u8 {
var piece: []u8 = self.vocab[token];
// NOTE: (llama2.c) following BOS token, sentencepiece decoder strips
// any leading whitespace.
if (prev_token == 1 and piece[0] == ' ') {
piece = piece[1..];
}
// In llama2.c, `piece` is checked with pattern "<0x%02hhX>", and it
// can be breakdown into:
// - "<0x": literally matching these characters
// - "%02hhX": matching a 2-digit number
// - "02": 2-digit number, padding with 0 if necessary
// - "hh": these 2-digit number are 2-byte variable
// - "X": interprete this 2-digit number as a hexadecimal number
// - ">": literally matching it
if (piece.len == 6 and piece[0] == '<' and piece[5] == '>') {
const byte_val: u8 = std.fmt.parseUnsigned(u8, piece[1..5], 0) catch |err| switch (err) {
else => {
std.log.err("Failed to parse token, id: {d}\n", .{token});
return piece;
},
};
// NOTE: type coercion explanation (`...` denotes the former item)
// 1. `self.byte_pieces[byte_val]`: u8
// 2. `&...`: *u8 (a single-item pointer to u8)
// 3. `@as(*[1]u8, ...)`: *[1]u8 (a pointer to a u8 array with length 1)
// 4. `piece = ...`: []u8 (a slice of u8)
//
// In 3., if we try to directly cast type to `[]u8`, compiler will
// complain "error: expected type '[]u8', found '*u8'", because
// compiler doesn't know the length of it.
// In 4., it works because slice is a fat pointer (ptr + len), and
// `*[1]u8` is a pointer with length info, so type coercion is valid.
piece = @as(*[1]u8, &self.byte_pieces[byte_val]);
}
return piece;
}
};
/// Compare strings like how `strcmp` works in C. Note that this implementation
/// does not rely on null terminator, but it relies on how `slice` works in zig
/// as it provides length infomation of a sequence.
pub fn strcmp(a: []const u8, b: []const u8) i32 {
var i: usize = 0;
while (i < a.len and i < b.len) {
if (a[i] != b[i]) {
return @as(i32, a[i]) - @as(i32, b[i]);
}
i += 1;
}
// Now, we ran out of characters from either a or b. So we just need to
// check with the lengths of them.
const len_a: i32 = @intCast(a.len);
const len_b: i32 = @intCast(b.len);
return len_a - len_b;
}
/// Compare 2 `TokenIndex`s and return `math.Order`.
pub fn compareToken(context: void, a: TokenIndex, b: TokenIndex) std.math.Order {
_ = context;
const res = strcmp(a.str, b.str);
if (res < 0) {
return std.math.Order.lt;
} else if (res == 0) {
return std.math.Order.eq;
} else {
return std.math.Order.gt;
}
}
pub fn safePrint(piece: []const u8) void {
if (piece.len == 1) {
if (piece[0] == '\x00') return;
const byte_val: u8 = piece[0];
if (!(std.ascii.isPrint(byte_val) or std.ascii.isWhitespace(byte_val))) {
std.log.warn("Found non-printable input, len: {d}\n", .{piece.len});
return;
}
}
std.debug.print("{s}", .{piece});
}
pub fn buildTokenizer(
t: *Tokenizer,
tokenizer_path: []const u8,
vocab_size: i32,
allocator: Allocator,
) !void {
t.* = try Tokenizer.init(tokenizer_path, vocab_size, allocator);
}
pub fn freeTokenizer(tokenizer: *Tokenizer, allocator: Allocator) void {
tokenizer.deinit(allocator);
}
// ----------------------------------------------------------------------
pub const ProbIndex = struct {
prob: f32,
index: usize,
/// Comparator. True: a > b.
pub fn asc(_: void, a: ProbIndex, b: ProbIndex) bool {
return a.prob > b.prob;
}
};
pub const Sampler = struct {
vocab_size: i32,
probindex: []ProbIndex,
temperature: f32,
topp: f32,
rng_state: u64,
pub fn init(
vocab_size: i32,
temperature: f32,
topp: f32,
rng_seed: u64,
allocator: Allocator,
) !Sampler {
const n_vocab: usize = @intCast(vocab_size);
return Sampler{
.vocab_size = vocab_size,
.temperature = temperature,
.topp = topp,
.rng_state = rng_seed,
.probindex = try allocator.alloc(ProbIndex, n_vocab),
};
}
pub fn deinit(self: Sampler, allocator: Allocator) void {
allocator.free(self.probindex);
}
pub fn sample(self: *Sampler, logits: []f32) u32 {
// sample the token given the logits and some hyperparameters
var next: usize = 0;
if (self.temperature == 0.0) {
// greedy argmax sampling: take the token with the highest probability
next = sampleArgmax(logits);
} else {
// apply the temperature to the logits
const n_vocab: usize = @intCast(self.vocab_size);
for (0..n_vocab) |q| {
logits[q] /= self.temperature;
}
// apply softmax to the logits to get the probabilities for next token
softmax(logits);
// flip a (float) coin (this is our source of entropy for sampling)
const coin = randomF32(&self.rng_state);
// we sample from this distribution to get the next token
if (self.topp <= 0 or self.topp >= 1) {
// simply sample from the predicted probability distribution
next = sampleMult(logits, coin);
} else {
// top-p (nucleus) sampling, clamping the least likely tokens to zero
next = sampleTopp(logits, self.topp, self.probindex, coin);
}
}
return @as(u32, @intCast(next));
}
};
// TODO: should we change the output type to u32? (other sampling functions
// below should be changed too)
pub fn sampleArgmax(probabilities: []f32) usize {
// return the index that has the highest probability
var max_i: usize = 0;
var max_p: f32 = probabilities[0];
for (1..probabilities.len) |i| {
if (probabilities[i] > max_p) {
max_i = i;
max_p = probabilities[i];
}
}
return max_i;
}
pub fn sampleMult(probabilities: []f32, coin: f32) usize {
var cdf: f32 = 0.0;
for (0..probabilities.len) |i| {
cdf += probabilities[i];
if (coin < cdf) {
return i;
}
}
return probabilities.len - 1; // in case of rounding errors
}
pub fn sampleTopp(probabilities: []f32, topp: f32, probindex: []ProbIndex, coin: f32) usize {
var n0: usize = 0;
// filter out probs < (1 - topp) / (n - 1) before sorting
const cutoff: f32 = (1.0 - topp) / @as(f32, @floatFromInt(probabilities.len - 1));
for (0..probabilities.len) |i| {
if (probabilities[i] >= cutoff) {
probindex[n0].index = i;
probindex[n0].prob = probabilities[i];
n0 += 1;
}
}
std.sort.pdq(ProbIndex, probindex[0..n0], {}, ProbIndex.asc);
// truncate the list where cumulative probability exceeds topp
var cumulative_prob: f32 = 0.0;
var last_idx = n0 - 1;
for (0..n0) |i| {
cumulative_prob += probindex[i].prob;
if (cumulative_prob > topp) {
last_idx = i;
break; // note that last index is included now
}
}
// sample from the truncated list
const r = coin * cumulative_prob;
var cdf: f32 = 0.0;
for (0..(last_idx + 1)) |i| {
cdf += probindex[i].prob;
if (r < cdf) {
return probindex[i].index;
}
}
return probindex[last_idx].index;
}
pub fn randomU32(state: *u64) u32 {
state.* ^= state.* >> 12;
state.* ^= state.* << 25;
state.* ^= state.* >> 27;
return @as(u32, @intCast((state.* *% @as(u64, 0x2545F4914F6CDD1D)) >> 32));
}
pub fn randomF32(state: *u64) f32 {
// 16777216 = 2^24 = "0 10010111 00000000000000000000000"
// sign: 0, exponent: 10010111 (-127 + 151 = 24), mantissa: 0
const magic: f32 = 16777216.0;
return @as(f32, @floatFromInt(randomU32(state) >> 8)) / magic;
}
// ----------------------------------------------------------------------
pub fn generate(
transformer: *Transformer,
tokenizer: *Tokenizer,
sampler: *Sampler,
prompt: []const u8,
steps: u32,
allocator: Allocator,
) !void {
var prompt_tokens: []u32 = try allocator.alloc(u32, prompt.len + 3);
defer allocator.free(prompt_tokens);
const n_tokens = try tokenizer.encode(prompt, true, false, prompt_tokens, allocator);
var start: i64 = 0;
var next: u32 = undefined;
var token = prompt_tokens[0];
var pos: u32 = 0;
while (pos < steps) {
// forward the transformer to get logits for the next token
var logits: []f32 = transformer.forward(token, pos);
if (pos < n_tokens - 1) {
next = prompt_tokens[pos + 1];
} else {
next = sampler.sample(logits);
}
pos += 1;
// data-dependent terminating condition: the BOS (=1) token delimits sequences
if (next == 1) {
break;
}
const piece = tokenizer.decode(token, next);
safePrint(piece);
token = next;
// init the timer here because the first iteration can be slower
if (start == 0) {
start = std.time.milliTimestamp();
}
}
std.debug.print("\n", .{});
if (pos > 1) {
const end: i64 = std.time.milliTimestamp();
const tok_per_sec: f32 = @as(f32, @floatFromInt(pos - 1)) / @as(f32, @floatFromInt((end - start))) * 1000;
std.debug.print("achieved tok/s: {d}\n", .{tok_per_sec});
}
}
fn errorUsage() void {
const msg =
\\ Usage: run <checkpoint> [options]
\\ Example: run model.bin -n 256 -i \"Once upon a time\"
\\ Options:
\\ -t <float> temperature in [0,inf], default i.0
\\ -p <float> p value in top-p (nucleus) sampling in [0,1] default 0.9
\\ -s <int> random seed, default time(NULL)
\\ -n <int> number of steps to run for, default 256. 0 = max_seq_len
\\ -i <string> input prompt
\\ -z <string> optional path to custom tokenizer
\\ -m <string> mode: generate|chat, default: generate
\\ -y <string> (optional) system prompt in chat mode
\\ -l <int> (optional) use mmap for checkpoint (0: disable, 1: enable)
;
std.debug.print("{s}\n", .{msg});
std.process.exit(1);
}
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
const args = try std.process.argsAlloc(allocator);
defer std.process.argsFree(allocator, args);
if (args.len < 2) {
std.debug.print("No model checkpoint is specified\n", .{});
errorUsage();
}
const checkpoint_path = args[1];
var tokenizer_path: []const u8 = "tokenizer.bin";
var temperature: f32 = 1.0;
var topp: f32 = 0.9;
var steps: u32 = 256;
var prompt: []const u8 = "";
var rng_seed: u64 = 0;
var mode: []const u8 = "generate";
var system_prompt: []const u8 = "";
var use_mmap: bool = true;
var i: usize = 2;
while (i < args.len) : (i += 2) {
if (i + 1 > args.len) {
errorUsage();
}
const arg = args[i];
const val = args[i + 1];
if (arg[0] != '-' or arg.len != 2) {
errorUsage();
}
if (arg[1] == 't') {
temperature = try std.fmt.parseFloat(f32, val);
} else if (arg[1] == 'p') {
topp = try std.fmt.parseFloat(f32, val);
} else if (arg[1] == 's') {
rng_seed = try std.fmt.parseUnsigned(u64, val, 10);
} else if (arg[1] == 'n') {
steps = try std.fmt.parseUnsigned(u32, val, 10);
} else if (arg[1] == 'i') {
prompt = val;
} else if (arg[1] == 'z') {
tokenizer_path = val;
} else if (arg[1] == 'm') {
mode = val;
} else if (arg[1] == 'y') {
system_prompt = val;
} else if (arg[1] == 'l') {
const tmp = try std.fmt.parseInt(u1, val, 0);
use_mmap = if (tmp == 1) true else false;
} else {
errorUsage();
}
}
// parameter validation/overrides
if (rng_seed <= 0) {
rng_seed = @intCast(std.time.timestamp());
}
if (temperature < 0.0) {
temperature = 0.0;
}
if (topp < 0.0 or 1.0 < topp) {
topp = 0.9;
}
if (steps < 0) {
steps = 0;
}
if (!std.mem.eql(u8, mode, "generate")) {
std.debug.print("[ERROR] Currently only 'generate' mode is supported.\n", .{});
std.process.exit(1);
}
var transformer = Transformer{};
try buildTransformer(&transformer, checkpoint_path, use_mmap, allocator);
defer freeTransformer(&transformer, use_mmap, allocator);
if (steps == 0) {
steps = @intCast(transformer.config.seq_len);
} else if (steps > transformer.config.seq_len) {
// XXX: Currently we will clip `steps` if it exceeds the maximal
// sequence length (see also llama2.c issue#348). But maybe we can
// apply RoPE scaling to make it able to inference with longer context?
std.debug.print("warning: clipping `steps` because it exceeds `max_seq_len`\n", .{});
steps = @intCast(transformer.config.seq_len);
}
// Build tokenizer
var tokenizer = Tokenizer{};
try buildTokenizer(&tokenizer, tokenizer_path, 32000, allocator);
defer freeTokenizer(&tokenizer, allocator);
// Build sampler
var sampler = try Sampler.init(32000, temperature, topp, rng_seed, allocator);
defer sampler.deinit(allocator);
try generate(&transformer, &tokenizer, &sampler, prompt, steps, allocator);
}
| https://raw.githubusercontent.com/NaleRaphael/llama2.zig/3ea6b14bf7db4ccec25a50db275655f5192cd0d2/run.zig |
const std = @import("std");
const fs = std.fs;
const print = std.debug.print;
const label = [_]u8{'J', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'Q', 'K', 'A'};
const Record = struct {
hand : []u8,
bid : usize,
};
fn cmp(_:void, a:u8, b:u8) bool {
const ra = std.mem.indexOf(u8, &label, &[_]u8{a}).?;
const rb = std.mem.indexOf(u8, &label, &[_]u8{b}).?;
if (ra < rb) {
return true;
} else {
return false;
}
}
fn rank(hand: []u8) usize {
var sorted:[5]u8 = undefined;
var i:usize = 0;
var sorted_size:usize = 0;
var j_cnt:u8 = 0;
while (i < 5) : ( i += 1) {
if (hand[i] != 'J') {
sorted[sorted_size] = hand[i];
sorted_size += 1;
} else {
j_cnt += 1;
}
}
if (sorted_size == 0) {
return 6;
}
std.sort.block(u8, sorted[0..sorted_size], {}, cmp);
var r:[5]u8 = [_]u8{0} ** 5;
i = 1;
var j:usize = 0;
r[j] = 1;
while (i < sorted_size) : (i += 1) {
if (sorted[i] == sorted[i-1]) {
r[j] +=1;
} else {
j += 1;
r[j] = 1;
}
}
std.sort.block(u8, &r, {}, std.sort.desc(u8));
r[0] += j_cnt;
//print ("{d} {c}\n", .{r, hand});
if (j == 0)
return 6;
if (j == 1) {
if (r[0] == 4) {
return 5;
} else {
return 4;
}
}
if (j == 2) {
if (r[0] == 3 and r[1] == 1)
return 3;
if (r[0] == 2 and r[1] == 2)
return 2;
unreachable;
}
if (j == 3) {
return 1;
}
return 0;
}
fn handCmp(_: void, a: Record, b: Record) bool {
const ra = rank(a.hand);
const rb = rank(b.hand);
if (ra < rb) {
return true;
} else if (ra > rb) {
return false;
} else {
var i:usize = 0;
while (i < 5 ) : ( i += 1) {
const rla = std.mem.indexOf(u8, &label, &[_]u8{a.hand[i]}).?;
const rlb = std.mem.indexOf(u8, &label, &[_]u8{b.hand[i]}).?;
if (rla < rlb) {
return true;
} else if (rla > rlb) {
return false;
}
}
}
unreachable;
}
pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
const file = try fs.cwd().openFile("input", .{});
defer file.close();
const rdr = file.reader();
var list = std.ArrayList(Record).init(allocator);
defer list.deinit();
while (try rdr.readUntilDelimiterOrEofAlloc(allocator, '\n', 4096)) |line| {
defer allocator.free(line);
var it = std.mem.split(u8, line, " ");
const hand_str = it.next().?;
const hand = try allocator.alloc(u8, hand_str.len);
@memcpy(hand, hand_str);
const bid = try std.fmt.parseInt(usize, it.next().?, 10);
try list.append(Record{.hand = hand, .bid = bid});
}
const s = try list.toOwnedSlice();
defer allocator.free(s);
defer {
for (s) |*r| {
allocator.free(r.hand);
}
}
std.sort.block(Record, s, {}, handCmp);
var bid:usize = 0;
for (s, 1 .. ) |*r, i| {
print ("{c} {}\n", .{ r.hand, r.bid});
bid += r.bid * i;
}
print ("{}\n", .{bid});
}
| https://raw.githubusercontent.com/xnhp0320/aoc2023/6e42c1e3791ba8e50c3ea0930572cade5caaef5b/7/s2.zig |
const std = @import("std");
pub fn getMimeType(path: []const u8) ?[]const u8 {
const extension = std.mem.lastIndexOf(u8, path, ".") orelse return null;
if (std.mem.eql(u8, path[extension + 1 ..], "html")) {
return "text/html";
} else if (std.mem.eql(u8, path[extension + 1 ..], "css")) {
return "text/css";
} else if (std.mem.eql(u8, path[extension + 1 ..], "js")) {
return "application/javascript";
} else if (std.mem.eql(u8, path[extension + 1 ..], "png")) {
return "image/png";
} else if (std.mem.eql(u8, path[extension + 1 ..], "jpg") or std.mem.eql(u8, path[extension + 1 ..], "jpeg")) {
return "image/jpeg";
} else if (std.mem.eql(u8, path[extension + 1 ..], "gif")) {
return "image/gif";
} else if (std.mem.eql(u8, path[extension + 1 ..], "svg")) {
return "image/svg+xml";
} else if (std.mem.eql(u8, path[extension + 1 ..], "mp4")) {
return "video/mp4";
} else if (std.mem.eql(u8, path[extension + 1 ..], "webm")) {
return "video/webm";
} else if (std.mem.eql(u8, path[extension + 1 ..], "mp3")) {
return "audio/mpeg";
} else if (std.mem.eql(u8, path[extension + 1 ..], "wav")) {
return "audio/wav";
} else if (std.mem.eql(u8, path[extension + 1 ..], "pdf")) {
return "application/pdf";
} else if (std.mem.eql(u8, path[extension + 1 ..], "ico")) {
return "image/x-icon";
} else {
return null;
}
}
| https://raw.githubusercontent.com/hajsf/zig-server/22d3e3f821217f50e0dff9e13ee5450d221c5d30/mime.zig |
const std = @import("std");
const print = std.debug.print;
pub fn main() !void {
var file = try std.fs.cwd().openFile("data/input2.txt", .{});
defer file.close();
var reader = file.reader();
var buf: [4096]u8 = undefined;
var hpos: u32 = 0;
var ypos: u32 = 0;
var aim: u32 = 0;
while (try reader.readUntilDelimiterOrEof(&buf, '\n')) |line| {
var splitter = std.mem.tokenize(u8, line, " ");
var move = splitter.next().?;
var amount = try std.fmt.parseInt(u32, splitter.next().?, 0);
if (std.mem.eql(u8, move, "forward")) {
hpos += amount;
ypos += aim*amount;
} else if (std.mem.eql(u8, move, "backward")) {
hpos -= amount;
} else if (std.mem.eql(u8, move, "down")) {
aim += amount;
} else {
aim -= amount;
}
}
print("prod {}\n", .{hpos*ypos});
} | https://raw.githubusercontent.com/angeris/advent-of-code-2021/fcd858e08f2da93ef53a8aee0698921c896df2ec/d2-2.zig |
const std = @import("std");
const Nihilist = @import("nihilist.zig").Nihilist;
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
var allocator = &arena.allocator;
var args = try std.process.argsAlloc(allocator);
defer std.process.argsFree(allocator, args);
if (args.len < 4) {
std.debug.warn("Usage: nihilist <encrypt|decrypt> <polybius_key> <nihilist_key> <plaintext|ciphertext>\n", .{});
return;
}
var enc = std.mem.eql(u8, args[1], "encrypt");
var dec = std.mem.eql(u8, args[1], "decrypt");
if (!(enc or dec)) {
std.debug.warn("Usage: nihilist <encrypt|decrypt> <polybius_key> <nihilist_key> <plaintext|ciphertext>\n", .{});
return;
}
var nihilist = try Nihilist.init(allocator, args[2], args[3]);
var output = if (dec) nihilist.decrypt(args[4]) else nihilist.encrypt(args[4]);
if (output) |out| {
std.debug.warn("{}\n", .{out});
} else |err| {
switch (err) {
error.InvalidKey => {
std.debug.warn("Invalid key!\n", .{});
},
error.InvalidCiphertext => {
std.debug.warn("Invalid ciphertext!\n", .{});
},
else => {
std.debug.warn("Error: {}\n", .{err});
}
}
}
}
| https://raw.githubusercontent.com/stripedpajamas/nihilist/578019241123857630858161e30832788fd92e3b/main.zig |
const std = @import("std");
const g = @cImport({
@cInclude("glib-object.h");
});
pub fn main() void {
const obj1: ?*g.GObject = @alignCast(@ptrCast(g.g_object_new(g.G_TYPE_OBJECT, null)));
const obj2: ?*g.GObject = @alignCast(@ptrCast(g.g_object_new(g.G_TYPE_OBJECT, null)));
const instance1: ?*g.GTypeInstance = @ptrCast(obj1);
const instance2: ?*g.GTypeInstance = @ptrCast(obj2);
const class1: ?*g.GTypeClass = instance1.?.*.g_class;
const class2: ?*g.GTypeClass = instance2.?.*.g_class;
std.debug.print("addr1 {*}\n", .{obj1});
std.debug.print("addr2 {*}\n", .{obj2});
std.debug.print("klass1 {*}\n", .{class1});
std.debug.print("klass2 {*}\n", .{class2});
g.g_object_unref(obj1);
g.g_object_unref(obj2);
}
| https://raw.githubusercontent.com/evaporei/zobject/23f6bd4c5d72ebda3da82dd189052aa1e70db985/main.zig |
pub const compare = @import("./compare.zig");
pub const meta = @import("./meta.zig");
pub const sentinel = @import("./sentinel.zig");
pub const limitslice = @import("./limitslice.zig");
// disabled for now, too many things to fix during a zig update
//pub const range = @import("./range.zig");
pub const mem = @import("./mem.zig");
pub const stringpool = @import("./stringpool.zig");
// Stuff taken from git-extra
pub const tuple = @import("./tuple.zig");
pub const appendlib = @import("./appendlib.zig");
pub const runutil = @import("./runutil.zig");
pub const cmdlinetool = @import("./cmdlinetool.zig");
const std = @import("std");
test {
std.testing.refAllDecls(@This());
}
| https://raw.githubusercontent.com/marler8997/zog/0f5f075792ba1a9c76901a9a766125386a33cce6/zog.zig |
const std = @import("std");
const libvirt = @import("libvirt");
pub fn main() !void {
std.log.info("All your codebase are belong to us.", .{});
std.log.info("libvirt version: {d}", .{libvirt.c.LIBVIR_VERSION_NUMBER});
}
| https://raw.githubusercontent.com/nektro/zig-libvirt/af019d9f5f5e51d5934b92ad539b3c50d67d0638/main.zig |
pub usingnamespace @import("src/main.zig");
pub const bun = @import("src/bun.zig");
pub const content = struct {
pub const error_js_path = "packages/bun-error/dist/index.js";
pub const error_js = @embedFile(error_js_path);
pub const error_css_path = "packages/bun-error/dist/bun-error.css";
pub const error_css_path_dev = "packages/bun-error/bun-error.css";
pub const error_css = @embedFile(error_css_path);
};
pub const completions = struct {
pub const bash = @embedFile("./completions/bun.bash");
pub const zsh = @embedFile("./completions/bun.zsh");
pub const fish = @embedFile("./completions/bun.fish");
};
pub const JavaScriptCore = @import("./src/jsc.zig");
pub const C = @import("./src/c.zig");
| https://raw.githubusercontent.com/txthinking/jb/d0e3ddce48be1ca0490904397bcd030c5aa7032e/root.zig |
//! This file is auto-generated by zpm-update and *should*
//! not be changed. This file can be checked into your VCS
//! and is able to work standalone.
const std = @import("std");
pub const pkgs = struct {
pub const ziggysynth = std.build.Pkg{
.name = "ziggysynth",
.source = .{ .path = "vendor/ziggysynth//src/ziggysynth.zig" },
};
pub const libgamestudio = std.build.Pkg{
.name = "libgamestudio",
.source = .{ .path = "vendor/libgamestudio/src/main.zig" },
};
pub const zlm = std.build.Pkg{
.name = "zlm",
.source = .{ .path = "vendor/zlm/zlm.zig" },
};
pub const maps = std.build.Pkg{
.name = "maps",
.source = .{ .path = "vendor/maps/src/maps.zig" },
};
pub const ode = std.build.Pkg{
.name = "ode",
.source = .{ .path = "vendor/ode/src/ode.zig" },
};
};
pub const sdks = struct {
pub const @"zero-graphics" = @import("vendor/zero-graphics/Sdk.zig");
pub const soundio = @import("vendor/soundio/Sdk.zig");
pub const ode = @import("vendor/ode/Sdk.zig");
};
| https://media.githubusercontent.com/media/ikskuh/zyclone/ae63975f672d9504b5cf1c9b7616454c67193500/zpm.zig |
const lua = @cImport({
@cInclude("lua.h");
@cInclude("lualib.h");
@cInclude("lauxlib.h");
});
export fn add(s: ?*lua.lua_State) c_int {
const a = lua.luaL_checkinteger(s, 1);
const b = lua.luaL_checkinteger(s, 2);
const c = a + b;
lua.lua_pushinteger(s, c);
return 1;
}
pub fn main() void {
var s = lua.luaL_newstate();
lua.luaL_openlibs(s);
lua.lua_register(s, "zig_add", add);
// TODO translate-c: luaL_dostring
_ = lua.luaL_loadstring(s, "print(zig_add(3, 5))");
// TODO translate-c: lua_pcall
_ = lua.lua_pcallk(s, 0, lua.LUA_MULTRET, 0, 0, null);
}
| https://raw.githubusercontent.com/tiehuis/zig-lua/bb4e2759304b4b38df10919a499528fadfe33632/main.zig |
pub extern fn add(a: i32, b: i32) i32;
pub extern fn sub(a: i32, b: i32) i32;
| https://raw.githubusercontent.com/hajsf/zig-and-go-simple-example/746c6a774a6f537a996c3d20effebc98d47ed47d/def.zig |
const expect = @import("std").testing.expect;
// 5. for loops are used to iterate over the arrays
//5. for loop
test "for loop" {
const rust_name = [_]u8{ 'r', 'u', 's', 't' }; // character literals are equivalent to integer literals
for (rust_name, 0..) |character, index| {
_ = character; //we use _ because rust does not allow us to have unused values
_ = index;
}
for (rust_name) |character| {
_ = character;
}
for (rust_name, 0..) |_, index| {
_ = index;
}
for (rust_name) |_| {}
}
| https://raw.githubusercontent.com/muathendirangu/zig/9566d8190e6e980418fdd2bdcf72b418b91b8948/for.zig |
const expect = @import("std").testing.expect;
test "for" {
//character literals are equivalent to integer literals
const string = [_]u8{ 'a', 'b', 'c' };
for (string, 0..) |character, index| {
_ = character;
_ = index;
}
for (string) |character| {
_ = character;
}
for (string, 0..) |_, index| {
_ = index;
}
for (string) |_| {}
}
| https://raw.githubusercontent.com/wolffshots/ziglearn/5743d7a6748bf38207e5395ea0cc7b5dbdeaf230/for.zig |
End of preview. Expand
in Dataset Viewer.
Zig programming language code dataset, loaded using GitHub Search REST API.
- Downloads last month
- 40