aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorbonmas14 <bonmas14@gmail.com>2025-08-04 13:33:47 +0000
committerbonmas14 <bonmas14@gmail.com>2025-08-04 13:33:47 +0000
commited1cab483e49e29396178a33dea51773aa770855 (patch)
tree78102102f7f7d07be0e6bef4b10ef69576f0c23e
parent471b539bdbf658ff7924b7500f89fd237df8be9b (diff)
downloadungrateful-ed1cab483e49e29396178a33dea51773aa770855.tar.gz
ungrateful-ed1cab483e49e29396178a33dea51773aa770855.zip
Math, not tested
-rw-r--r--README.md15
-rwxr-xr-xbuild.sh2
-rw-r--r--src/un_math.c99
-rw-r--r--src/un_splines.c40
-rw-r--r--src/un_vec.c294
-rw-r--r--src/ungrateful.c3
-rw-r--r--src/ungrateful.h130
-rw-r--r--tests/un/math.c5
8 files changed, 585 insertions, 3 deletions
diff --git a/README.md b/README.md
index a4d8a6e..728a637 100644
--- a/README.md
+++ b/README.md
@@ -13,13 +13,26 @@ Only core features of C99 were used:
- long long;
- restrict keyword;
+# Compile flags
+
+```
+# clang gcc
+-std=c99 -lm -ffast-math
+
+# cl (i guess right now)
+
+/std:c99 /fp:fast
+```
+
# Ungrateful
+Note: we need to manipulate register of SSE so we disable subnormals (Flush to zero).
+
Types, Strings, basic memory allocators (platform specific), and so on. Features:
+ Memory allocation;
+ Strings;
-- easing;
++ easing;
- splines (qubic);
- vecs/ivec;
- matrix;
diff --git a/build.sh b/build.sh
index d54c6b9..a2f91ec 100755
--- a/build.sh
+++ b/build.sh
@@ -4,7 +4,7 @@ cc="gcc"
ld="gcc"
ar="ar"
-cflags="-std=c99 -fPIC -Wall -Wextra -g -Wno-error -pedantic"
+cflags="-std=c99 -fPIC -Wall -Wextra -g -Wno-error -pedantic -lm -ffast-math"
proc=$(nproc)
diff --git a/src/un_math.c b/src/un_math.c
new file mode 100644
index 0000000..772ef70
--- /dev/null
+++ b/src/un_math.c
@@ -0,0 +1,99 @@
+/* reals */
+
+real un_m_lerpr(real a, real b, real t) {
+ return (1.0 - t) * a + b * t;
+}
+
+real un_m_ease_isiner(real t) {
+#if defined(UN_DOUBLE_PRECISION)
+ return 1.0 - cos((t * PI) / 2.0);
+#else
+ return 1.0 - cosf((t * PI) / 2.0);
+#endif
+}
+
+real un_m_ease_iosiner(real t) {
+#if defined(UN_DOUBLE_PRECISION)
+ return sin((t * PI) / 2.0f);
+#else
+ return sinf((t * PI) / 2.0f);
+#endif
+}
+
+real un_m_ease_osiner(real t) {
+#if defined(UN_DOUBLE_PRECISION)
+ return -(cos(t * PI) - 1.0) / 2.0;
+#else
+ return -(cosf(t * PI) - 1.0) / 2.0;
+#endif
+}
+
+real un_m_ease_iquadr(real t) {
+ return t * t;
+}
+
+real un_m_ease_ioquadr(real t) {
+ return 1.0 - (1.0 - t) * (1.0 - t);
+}
+
+real un_m_ease_oquadr(real t) {
+ return t < 0.5 ? 2.0 * t * t : 1.0 - ((-2.0 * t + 2.0) * (-2.0 * t + 2.0)) / 2.0;
+}
+
+/* floats */
+f32 un_m_lerpf(f32 a, f32 b, f32 t) {
+ return (1.0 - t) * a + b * t;
+}
+
+f32 un_m_ease_isinef(f32 t) {
+ return 1.0 - cosf((t * PI) / 2.0);
+}
+
+f32 un_m_ease_iosinef(f32 t) {
+ return sinf((t * PI) / 2.0f);
+}
+
+f32 un_m_ease_osinef(f32 t) {
+ return -(cosf(t * PI) - 1.0) / 2.0;
+}
+
+f32 un_m_ease_iquadf(f32 t) {
+ return t * t;
+}
+
+f32 un_m_ease_ioquadf(f32 t) {
+ return 1.0 - (1.0 - t) * (1.0 - t);
+}
+
+f32 un_m_ease_oquadf(f32 t) {
+ return t < 0.5 ? 2.0 * t * t : 1.0 - ((-2.0 * t + 2.0) * (-2.0 * t + 2.0)) / 2.0;
+}
+
+/* doubles */
+f64 un_m_lerpd(f64 a, f64 b, f64 t) {
+ return (1.0 - t) * a + b * t;
+}
+
+f64 un_m_ease_isined(f64 t) {
+ return 1.0 - cos((t * PI) / 2.0);
+}
+
+f64 un_m_ease_iosined(f64 t) {
+ return sin((t * PI) / 2.0f);
+}
+
+f64 un_m_ease_osined(f64 t) {
+ return -(cos(t * PI) - 1.0) / 2.0;
+}
+
+f64 un_m_ease_iquadd(f64 t) {
+ return t * t;
+}
+
+f64 un_m_ease_ioquadd(f64 t) {
+ return 1.0 - (1.0 - t) * (1.0 - t);
+}
+
+f64 un_m_ease_oquadd(f64 t) {
+ return t < 0.5 ? 2.0 * t * t : 1.0 - ((-2.0 * t + 2.0) * (-2.0 * t + 2.0)) / 2.0;
+}
diff --git a/src/un_splines.c b/src/un_splines.c
new file mode 100644
index 0000000..7b27ee0
--- /dev/null
+++ b/src/un_splines.c
@@ -0,0 +1,40 @@
+real un_m_bezierr(real a, real q0, real q1, real b, real t) {
+ real i0, i1, i2, p1, p2;
+
+ i0 = un_m_lerpr(a, q0, t);
+ i1 = un_m_lerpr(q0, q1, t);
+ i2 = un_m_lerpr(q1, b, t);
+
+ p1 = un_m_lerpr(i0, i1, t);
+ p2 = un_m_lerpr(i1, i2, t);
+
+ return un_m_lerpr(p1, p2, t);
+}
+
+f32 un_m_bezierf(f32 a, f32 q0, f32 q1, f32 b, f32 t) {
+ f32 i0, i1, i2, p1, p2;
+
+ i0 = un_m_lerpf(a, q0, t);
+ i1 = un_m_lerpf(q0, q1, t);
+ i2 = un_m_lerpf(q1, b, t);
+
+ p1 = un_m_lerpf(i0, i1, t);
+ p2 = un_m_lerpf(i1, i2, t);
+
+ return un_m_lerpf(p1, p2, t);
+}
+
+f64 un_m_bezierd(f64 a, f64 q0, f64 q1, f64 b, f64 t) {
+ f64 i0, i1, i2, p1, p2;
+
+ i0 = un_m_lerpd(a, q0, t);
+ i1 = un_m_lerpd(q0, q1, t);
+ i2 = un_m_lerpd(q1, b, t);
+
+ p1 = un_m_lerpd(i0, i1, t);
+ p2 = un_m_lerpd(i1, i2, t);
+
+ return un_m_lerpd(p1, p2, t);
+}
+
+// add v2-4 variants
diff --git a/src/un_vec.c b/src/un_vec.c
new file mode 100644
index 0000000..1e1ccda
--- /dev/null
+++ b/src/un_vec.c
@@ -0,0 +1,294 @@
+
+
+
+/*
+ *
+ * vector from two vectors
+ *
+ * add
+ * sub
+ *
+ * add scalar
+ * sub scalar
+ *
+ * mul scalar
+ * div scalar
+ *
+ * distance
+ * discance sqr
+ *
+ * magnitude
+ * magnitude sqr
+ *
+ * dot, cross, surface_cross, normalize, reflect, flatten, project
+ *
+ *
+ *
+ */
+
+void un_m_add2f(f32 *v, f32 *a, f32 *b) {
+ v[0] = a[0] + b[0];
+ v[1] = a[1] + b[1];
+}
+
+void un_m_add3f(f32 *v, f32 *a, f32 *b) {
+ v[0] = a[0] + b[0];
+ v[1] = a[1] + b[1];
+ v[2] = a[2] + b[2];
+}
+
+void un_m_add4f(f32 *v, f32 *a, f32 *b) {
+ v[0] = a[0] + b[0];
+ v[1] = a[1] + b[1];
+ v[2] = a[2] + b[2];
+ v[3] = a[3] + b[3];
+}
+
+void un_m_sub2f(f32 *v, f32 *a, f32 *b) {
+ v[0] = a[0] - b[0];
+ v[1] = a[1] - b[1];
+}
+
+void un_m_sub3f(f32 *v, f32 *a, f32 *b) {
+ v[0] = a[0] - b[0];
+ v[1] = a[1] - b[1];
+ v[2] = a[2] - b[2];
+}
+
+void un_m_sub4f(f32 *v, f32 *a, f32 *b) {
+ v[0] = a[0] - b[0];
+ v[1] = a[1] - b[1];
+ v[2] = a[2] - b[2];
+ v[3] = a[3] - b[3];
+}
+
+void un_m_add_scalar2f(f32 *v, f32 *a, f32 scalar) {
+ v[0] = a[0] + scalar;
+ v[1] = a[1] + scalar;
+}
+
+void un_m_add_scalar3f(f32 *v, f32 *a, f32 scalar) {
+ v[0] = a[0] + scalar;
+ v[1] = a[1] + scalar;
+ v[2] = a[2] + scalar;
+}
+
+void un_m_add_scalar4f(f32 *v, f32 *a, f32 scalar) {
+ v[0] = a[0] + scalar;
+ v[1] = a[1] + scalar;
+ v[2] = a[2] + scalar;
+ v[3] = a[3] + scalar;
+}
+
+void un_m_sub_scalar2f(f32 *v, f32 *a, f32 scalar) {
+ v[0] = a[0] - scalar;
+ v[1] = a[1] - scalar;
+}
+
+void un_m_sub_scalar3f(f32 *v, f32 *a, f32 scalar) {
+ v[0] = a[0] - scalar;
+ v[1] = a[1] - scalar;
+ v[2] = a[2] - scalar;
+}
+
+void un_m_sub_scalar4f(f32 *v, f32 *a, f32 scalar) {
+ v[0] = a[0] - scalar;
+ v[1] = a[1] - scalar;
+ v[2] = a[2] - scalar;
+ v[3] = a[3] - scalar;
+}
+
+void un_m_mul_scalar2f(f32 *v, f32 *a, f32 scalar) {
+ v[0] = a[0] * scalar;
+ v[1] = a[1] * scalar;
+}
+
+void un_m_mul_scalar3f(f32 *v, f32 *a, f32 scalar) {
+ v[0] = a[0] * scalar;
+ v[1] = a[1] * scalar;
+ v[2] = a[2] * scalar;
+}
+
+void un_m_mul_scalar4f(f32 *v, f32 *a, f32 scalar) {
+ v[0] = a[0] * scalar;
+ v[1] = a[1] * scalar;
+ v[2] = a[2] * scalar;
+ v[3] = a[3] * scalar;
+}
+
+void un_m_div_scalar2f(f32 *v, f32 *a, f32 scalar) {
+ v[0] = a[0] / scalar;
+ v[1] = a[1] / scalar;
+}
+
+void un_m_div_scalar3f(f32 *v, f32 *a, f32 scalar) {
+ v[0] = a[0] / scalar;
+ v[1] = a[1] / scalar;
+ v[2] = a[2] / scalar;
+}
+
+void un_m_div_scalar4f(f32 *v, f32 *a, f32 scalar) {
+ v[0] = a[0] / scalar;
+ v[1] = a[1] / scalar;
+ v[2] = a[2] / scalar;
+ v[3] = a[3] / scalar;
+}
+
+void un_m_dot2f(f32 *v, f32 *a, f32 *b) {
+ *v = a[0] * b[0] + a[1] * b[1];
+}
+
+void un_m_dot3f(f32 *v, f32 *a, f32 *b) {
+ *v = a[0] * b[0] + a[1] * b[1] + a[2] * b[2];
+}
+
+void un_m_dot4f(f32 *v, f32 *a, f32 *b) {
+ *v = a[0] * b[0] + a[1] * b[1] + a[2] * b[2] + a[3] * b[3];
+}
+
+void un_m_hadamard2f(f32 *v, f32 *a, f32 *b) {
+ v[0] = a[0] * b[0];
+ v[1] = a[1] * b[1];
+}
+
+void un_m_hadamard3f(f32 *v, f32 *a, f32 *b) {
+ v[0] = a[0] * b[0];
+ v[1] = a[1] * b[1];
+ v[2] = a[2] * b[2];
+}
+
+void un_m_hadamard4f(f32 *v, f32 *a, f32 *b) {
+ v[0] = a[0] * b[0];
+ v[1] = a[1] * b[1];
+ v[2] = a[2] * b[2];
+ v[3] = a[3] * b[3];
+}
+
+void un_m_cross2f(f32 *v, f32 *a) {
+ v[0] = a[1];
+ v[1] =-a[0];
+}
+
+void un_m_cross3f(f32 *v, f32 *a, f32 *b) {
+ v[0] = a[1] * b[2] - a[2] * b[1];
+ v[1] = a[2] * b[0] - a[0] * b[2];
+ v[2] = a[0] * b[1] - a[1] * b[0];
+}
+
+void un_m_magnitude2f(f32 *v, f32 *a) {
+ *v = sqrtf(a[0] * a[0] + a[1] * a[1]);
+}
+
+void un_m_magnitude3f(f32 *v, f32 *a) {
+ *v = sqrtf(a[0] * a[0] + a[1] * a[1] + a[2] * a[2]);
+}
+
+void un_m_magnitude4f(f32 *v, f32 *a) {
+ *v = sqrtf(a[0] * a[0] + a[1] * a[1] + a[2] * a[2] + a[3] * a[3]);
+}
+
+void un_m_magnitude_sqr2f(f32 *v, f32 *a) {
+ *v = a[0] * a[0] + a[1] * a[1];
+}
+
+void un_m_magnitude_sqr3f(f32 *v, f32 *a) {
+ *v = a[0] * a[0] + a[1] * a[1] + a[2] * a[2];
+}
+
+void un_m_magnitude_sqr4f(f32 *v, f32 *a) {
+ *v = a[0] * a[0] + a[1] * a[1] + a[2] * a[2] + a[3] * a[3];
+}
+
+
+void un_m_distance_sqr2f(f32 *v, f32 *a, f32 *b) {
+ f32 f[2];
+ un_m_sub2f(f, b, a);
+ *v = f[0] * f[0] + f[1] * f[1];
+}
+
+void un_m_distance_sqr3f(f32 *v, f32 *a, f32 *b) {
+ f32 f[3];
+ un_m_sub3f(f, b, a);
+ *v = f[0] * f[0] + f[1] * f[1] + f[2] * f[2];
+}
+
+void un_m_distance_sqr4f(f32 *v, f32 *a, f32 *b) {
+ f32 f[4];
+ un_m_sub4f(f, b, a);
+ *v = f[0] * f[0] + f[1] * f[1] + f[2] * f[2] + f[3] * f[3];
+}
+
+void un_m_normalize2f(f32 *v, f32 *a) {
+ f32 f;
+ un_m_magnitude2f(&f, a);
+ v[0] /= f;
+ v[1] /= f;
+}
+
+void un_m_normalize3f(f32 *v, f32 *a) {
+ f32 f;
+ un_m_magnitude3f(&f, a);
+ v[0] /= f;
+ v[1] /= f;
+ v[2] /= f;
+}
+
+void un_m_normalize4f(f32 *v, f32 *a) {
+ f32 f;
+ un_m_magnitude4f(&f, a);
+ v[0] /= f;
+ v[1] /= f;
+ v[2] /= f;
+ v[3] /= f;
+}
+
+void un_m_project2f(f32 *v, f32 *a, f32 *onto) {
+ f32 dot, magn, scale;
+
+ un_m_dot2f(&dot, a, onto);
+ un_m_magnitude_sqr2f(&magn, onto);
+
+ if (magn < EPSILON) {
+ un_memory_set((void*)v, 0, sizeof(*v) * 2);
+ return;
+ }
+
+ scale = dot / magn;
+ v[0] = onto[0] * scale;
+ v[1] = onto[1] * scale;
+}
+
+void un_m_project3f(f32 *v, f32 *a, f32 *onto) {
+ f32 dot, magn, scale;
+
+ un_m_dot3f(&dot, a, onto);
+ un_m_magnitude_sqr3f(&magn, onto);
+
+ if (magn < EPSILON) {
+ un_memory_set((void*)v, 0, sizeof(*v) * 3);
+ return;
+ }
+
+ scale = dot / magn;
+ v[0] = onto[0] * scale;
+ v[1] = onto[1] * scale;
+ v[2] = onto[2] * scale;
+}
+
+void un_m_project4f(f32 *v, f32 *a, f32 *onto) {
+ f32 dot, magn, scale;
+
+ un_m_dot4f(&dot, a, onto);
+ un_m_magnitude_sqr4f(&magn, onto);
+
+ if (magn < EPSILON) {
+ un_memory_set((void*)v, 0, sizeof(*v) * 4);
+ return;
+ }
+
+ scale = dot / magn;
+ v[0] = onto[0] * scale;
+ v[1] = onto[1] * scale;
+ v[2] = onto[2] * scale;
+ v[3] = onto[3] * scale;
+}
diff --git a/src/ungrateful.c b/src/ungrateful.c
index f0c2346..6bfd381 100644
--- a/src/ungrateful.c
+++ b/src/ungrateful.c
@@ -3,3 +3,6 @@
#include "un_memory.c"
#include "un_strings.c"
#include "un_list.c"
+#include "un_math.c"
+#include "un_vec.c"
+#include "un_splines.c"
diff --git a/src/ungrateful.h b/src/ungrateful.h
index e3c52cf..ff8c24b 100644
--- a/src/ungrateful.h
+++ b/src/ungrateful.h
@@ -60,6 +60,7 @@
#include <stdarg.h>
#include <limits.h>
#include <assert.h>
+#include <math.h>
#define UNUSED(x) (void)(x)
@@ -80,7 +81,6 @@
extern "C" {
#endif
-
typedef uint64_t u64;
typedef uint32_t u32;
typedef uint16_t u16;
@@ -97,6 +97,46 @@ typedef int8_t b8;
typedef float f32;
typedef double f64;
+#if defined(UN_DOUBLE_PRECISION)
+typedef double real;
+
+#ifndef PI
+# define PI 3.14159265358979323846
+#endif
+
+#ifndef EPSILON
+# define EPSILON 0.000001
+#endif
+
+#ifndef DEG2RAD
+# define DEG2RAD (PI/180.0)
+#endif
+
+#ifndef RAD2DEG
+# define RAD2DEG (180.0/PI)
+#endif
+
+#else
+typedef float real;
+
+#ifndef PI
+# define PI 3.14159265358979323846f
+#endif
+
+#ifndef EPSILON
+# define EPSILON 0.000001f
+#endif
+
+#ifndef DEG2RAD
+# define DEG2RAD (PI/180.0f)
+#endif
+
+#ifndef RAD2DEG
+# define RAD2DEG (180.0f/PI)
+#endif
+
+#endif // UN_DOUBLE_PRECISION
+
/* ---- Memory Allocators API ---- */
typedef enum {
@@ -186,6 +226,94 @@ extern s64 un_string_index_of_last(String input, u8 value);
extern String un_string_format(Allocator alloc, String buffer, ...);
extern String un_string_vformat(Allocator alloc, String buffer, va_list args);
+/* ---- math, and vecmath ---- */
+
+real un_m_lerpr(real a, real b, real t);
+f32 un_m_lerpf(f32 a, f32 b, f32 t);
+f64 un_m_lerpd(f64 a, f64 b, f64 t);
+
+/* 2d */
+void un_m_add2f(f32 *v, f32 *a, f32 *b);
+void un_m_sub2f(f32 *v, f32 *a, f32 *b);
+void un_m_add_scalar2f(f32 *v, f32 *a, f32 scalar);
+void un_m_sub_scalar2f(f32 *v, f32 *a, f32 scalar);
+void un_m_mul_scalar2f(f32 *v, f32 *a, f32 scalar);
+void un_m_div_scalar2f(f32 *v, f32 *a, f32 scalar);
+
+void un_m_dot2f(f32 *v, f32 *a, f32 *b);
+void un_m_hadamard2f(f32 *v, f32 *a, f32 *b);
+void un_m_project2f(f32 *v, f32 *a, f32 *onto);
+void un_m_cross2f(f32 *v, f32 *a);
+
+void un_m_normalize2f(f32 *v, f32 *a);
+void un_m_magnitude2f(f32 *v, f32 *a);
+void un_m_magnitude_sqr2f(f32 *v, f32 *a);
+void un_m_distance_sqr2f(f32 *v, f32 *a, f32 *b);
+
+/* 3d */
+void un_m_add3f(f32 *v, f32 *a, f32 *b);
+void un_m_sub3f(f32 *v, f32 *a, f32 *b);
+void un_m_add_scalar3f(f32 *v, f32 *a, f32 scalar);
+void un_m_sub_scalar3f(f32 *v, f32 *a, f32 scalar);
+void un_m_mul_scalar3f(f32 *v, f32 *a, f32 scalar);
+void un_m_div_scalar3f(f32 *v, f32 *a, f32 scalar);
+
+void un_m_dot3f(f32 *v, f32 *a, f32 *b);
+void un_m_hadamard3f(f32 *v, f32 *a, f32 *b);
+void un_m_project3f(f32 *v, f32 *a, f32 *onto);
+void un_m_cross3f(f32 *v, f32 *a, f32 *b);
+
+void un_m_normalize3f(f32 *v, f32 *a);
+void un_m_magnitude3f(f32 *v, f32 *a);
+void un_m_magnitude_sqr3f(f32 *v, f32 *a);
+void un_m_distance_sqr3f(f32 *v, f32 *a, f32 *b);
+
+/* 4d */
+void un_m_add4f(f32 *v, f32 *a, f32 *b);
+void un_m_sub4f(f32 *v, f32 *a, f32 *b);
+void un_m_add_scalar4f(f32 *v, f32 *a, f32 scalar);
+void un_m_sub_scalar4f(f32 *v, f32 *a, f32 scalar);
+void un_m_mul_scalar4f(f32 *v, f32 *a, f32 scalar);
+void un_m_div_scalar4f(f32 *v, f32 *a, f32 scalar);
+
+void un_m_dot4f(f32 *v, f32 *a, f32 *b);
+void un_m_hadamard4f(f32 *v, f32 *a, f32 *b);
+void un_m_project4f(f32 *v, f32 *a, f32 *onto);
+
+void un_m_normalize4f(f32 *v, f32 *a);
+void un_m_magnitude4f(f32 *v, f32 *a);
+void un_m_magnitude_sqr4f(f32 *v, f32 *a);
+void un_m_distance_sqr4f(f32 *v, f32 *a, f32 *b);
+
+/* ---- splines ---- */
+
+real un_m_bezierr(real a, real q0, real q1, real b, real t);
+f32 un_m_bezierf(f32 a, f32 q0, f32 q1, f32 b, f32 t);
+f64 un_m_bezierd(f64 a, f64 q0, f64 q1, f64 b, f64 t);
+
+/* ---- easing ---- */
+
+real un_m_ease_isiner(real t);
+real un_m_ease_iosiner(real t);
+real un_m_ease_osiner(real t);
+real un_m_ease_iquadr(real t);
+real un_m_ease_ioquadr(real t);
+real un_m_ease_oquadr(real t);
+
+f32 un_m_ease_isinef(f32 t);
+f32 un_m_ease_iosinef(f32 t);
+f32 un_m_ease_osinef(f32 t);
+f32 un_m_ease_iquadf(f32 t);
+f32 un_m_ease_ioquadf(f32 t);
+f32 un_m_ease_oquadf(f32 t);
+
+f64 un_m_ease_isined(f64 t);
+f64 un_m_ease_iosined(f64 t);
+f64 un_m_ease_osined(f64 t);
+f64 un_m_ease_iquadd(f64 t);
+f64 un_m_ease_ioquadd(f64 t);
+f64 un_m_ease_oquadd(f64 t);
+
#if defined(__cplusplus)
}
#endif
diff --git a/tests/un/math.c b/tests/un/math.c
new file mode 100644
index 0000000..6cc7cd8
--- /dev/null
+++ b/tests/un/math.c
@@ -0,0 +1,5 @@
+#include <ungrateful.h>
+
+int main(void) {
+
+}