Autumn!: 1 Linearize execution for single job 2 files changed, 70 insertions(+), 65 deletions(-)
Copy & paste the following snippet into your terminal to import this patchset into git:
curl -s https://lists.sr.ht/~autumnull/haredo-devel/patches/38192/mbox | git am -3Learn more about email & git
Signed-off-by: Autumn! <autumnull@posteo.net> --- README.md | 2 +- src/main.ha | 133 +++++++++++++++++++++++++++------------------------- 2 files changed, 70 insertions(+), 65 deletions(-) diff --git a/README.md b/README.md index b9b1a6a..30d9dc7 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ Problems with `redo`: `haredo` solves all of these problems: - Script syntax is plain shell script - Only one command with few extraneous rules -- Source code is absurdly simple (~250 lines) +- Source code is extremely simple (~400 lines) - .do files are short and modular like in `redo` - Builds its dependency tree on the fly, uses no database - Doesn't break the build state when interrupted diff --git a/src/main.ha b/src/main.ha index a44862f..31308d9 100644 --- a/src/main.ha +++ b/src/main.ha @@ -17,11 +17,14 @@ use uuid; type context = struct { verbose: bool, quiet: bool, + indent: str, + toplevel: bool, parent_timestamp: i64, jobs: uint, rd: io::file, wr: io::file, }; +let tmpdir: str = ""; // default environment variables const envprogs: [_](str, str) = [ @@ -36,9 +39,9 @@ const envprogs: [_](str, str) = [ ("SCDOC", "scdoc"), ]; -let tmpdir: str = ""; - export fn main() void = { + // job slots for the -j option are handled by reading a byte from a pipe when + // a slot is acquired, and writing a byte to the pipe when a slot is freed. let (rd, wr) = match (os::getenv("HAREDO_PIPE")) { case void => yield unix::pipe(unix::pipe_flag::NOCLOEXEC)!; @@ -48,16 +51,17 @@ export fn main() void = { let wr = io::fdopen(strconv::stoi(wr)!); yield (rd, wr); }; + // free a job slot if called from a parent .do script + io::write(wr, [0])!; + defer io::read(rd, [0])!; + const parent = os::getenv("HAREDO_PARENT"); let ctx = context { verbose = os::getenv("HAREDO_VERBOSE") is str, quiet = os::getenv("HAREDO_QUIET") is str, - parent_timestamp = match (os::getenv("HAREDO_PARENT")) { - case void => - yield types::I64_MIN; - case let s: str => - yield strconv::stoi64(s)!; - }, + indent = os::tryenv("HAREDO_INDENT", ""), + toplevel = parent is void, + parent_timestamp = if (parent is void) types::I64_MIN else strconv::stoi64(parent as str)!, jobs = strconv::stou(os::tryenv("HAREDO_JOBS", "1"))!, rd = rd, wr = wr, @@ -65,7 +69,7 @@ export fn main() void = { tmpdir = temp::dir(); const cmd = getopt::parse(os::args, - "simple and idiomatic build automator.\nsee `man haredo` for detailed usage.", + "simple and unix-idiomatic build automator.\nsee `man haredo` for detailed usage.", ('v', "print verbose logs"), ('q', "(quiet) don't print 'redo' logs"), ('j', "jobs", "run <jobs> jobs in parallel"), @@ -86,15 +90,6 @@ export fn main() void = { abort(); }; }; - - for (let i = 0z; i < ctx.jobs; i += 1) { - io::write(ctx.wr, [0])!; - }; - - defer for (let i = 0z; i < ctx.jobs; i += 1) { - io::read(ctx.rd, [0])!; - }; - if (len(cmd.args) == 0) { cmd.args = ["all"]; }; @@ -103,62 +98,50 @@ export fn main() void = { defer { for (let i = 0z; i < len(children); i += 1) { free(children[i].1); - io::write(ctx.wr, [0])!; }; free(children); };
-%<-
- // first run sub-builds + if (ctx.toplevel) { + // initialize job slots + for (let i = 0z; i < ctx.jobs - 1; i += 1) { + io::write(ctx.wr, [0])!; + }; + }; + + // first start sub-builds... for (let i = 0z; i < len(cmd.args); i += 1) { if (cmd.args[i] == "++") break; - let child = match (try_do(ctx, cmd.args[i])) { + let child = match (try_do(&ctx, cmd.args[i])) { case void => continue; case let child: (exec::process, str) => yield child; case let e: exec::error => - const indent = os::tryenv("HAREDO_INDENT", ""); fmt::fatalf("\x1b[31mharedo {}{} (error: {})\x1b[0m", - indent, cmd.args[i], exec::strerror(e)); + ctx.indent, cmd.args[i], exec::strerror(e)); + }; + if (ctx.jobs > 1) { + append(children, (child.0, child.1, cmd.args[i])); + } else { + // if there's only one job (or zero), wait immediately for the child + const status = exec::wait(&child.0)!; + cleanup_child(&ctx, &status, child.1, cmd.args[i]); }; - append(children, (child.0, child.1, cmd.args[i])); }; - // ...then wait for them to finish + // ...then wait for them to finish... for (len(children) != 0) { const (child, status) = exec::waitany()!; - defer io::write(ctx.wr, [0])!; - let tmpfile: (str | void) = void; - let target: (str | void) = void; - for (let i = 0z; i < len(children); i += 1) { - if (child == children[i].0) { - tmpfile = children[i].1; - target = children[i].2; - delete(children[i]); - }; + let i = 0z; + for (i < len(children); i += 1) { + if (child == children[i].0) break; }; - const tmpfile = tmpfile as str; - const target = target as str; - const indent = os::tryenv("HAREDO_INDENT", "");
I would add an assert(children[i].0 == child) in order to avoid silently doing the wrong thing if that loop falls off the end without finding something.
+ const tmpfile = children[i].1; + const target = children[i].2; + delete(children[i]); defer free(tmpfile); - - match (exec::check(&status)) { - case void => void; - case let e: !exec::exit_status => - fmt::fatalf("\x1b[31mharedo {}{} (error: {})\x1b[0m", - indent, target, exec::exitstr(e)); - }; - match (os::move(tmpfile, target)) { - case void => void; - case errors::noentry => void; - case let e: fs::error => - fmt::fatalf("\x1b[31mharedo {}{} (error: {})\x1b[0m", - indent, target, fs::strerror(e)); - }; - if (!ctx.quiet) { - errorfln("\x1b[32mharedo {}{} (done)\x1b[0m", - indent, target)!; - }; + cleanup_child(&ctx, &status, tmpfile, target); }; // ...then check for updates @@ -189,14 +172,12 @@ type do_paths = struct { }; fn try_do( - ctx: context, + ctx: *context, target: str, ) ((exec::process, str) | void | exec::error) = { - const indent = os::tryenv("HAREDO_INDENT", ""); - const dopaths = match (find_do_file(target)) { case void => - if (!ctx.quiet) errorfln("\x1b[33mharedo {}{} (no dofile)\x1b[0m", indent, target)?; + if (!ctx.quiet) errorfln("\x1b[33mharedo {}{} (no dofile)\x1b[0m", ctx.indent, target)?; return; case let s: do_paths => yield s; @@ -218,7 +199,7 @@ fn try_do( if (!ctx.quiet) { // indent subprocesses by 2 spaces - const new_indent = strings::concat(" ", indent); + const new_indent = strings::concat(" ", ctx.indent); defer free(new_indent); exec::setenv(&cmd, "HAREDO_INDENT", new_indent)?; }; @@ -237,16 +218,19 @@ fn try_do( if (ctx.verbose) exec::setenv(&cmd, "HAREDO_VERBOSE", "1")?; if (ctx.quiet) exec::setenv(&cmd, "HAREDO_QUIET", "1")?; - // set default program variables in toplevel process - if (os::getenv("HAREDO_PARENT") is void) { + if (ctx.toplevel) { + // set default program variables in toplevel process for (let i = 0z; i < len(envprogs); i += 1) { const (var, value) = envprogs[i]; exec::setenv(&cmd, var, os::tryenv(var, value))?; }; - exec::unsetenv(&cmd, "HAREDO_JOBS")?; + + // set pipe for job control let pipe = fmt::asprintf("{},{}", ctx.rd: int, ctx.wr: int); defer free(pipe); exec::setenv(&cmd, "HAREDO_PIPE", pipe)?; + + exec::setenv(&cmd, "HAREDO_JOBS", strconv::utos(ctx.jobs))?; }; const proc = match (exec::fork()?) { @@ -255,7 +239,7 @@ fn try_do( case void => io::read(ctx.rd, [0])!; if (!ctx.quiet) errorfln("\x1b[32mharedo {}{}\x1b[0m", - indent, target)?; + ctx.indent, target)?; os::chdir(dopaths.execdir)!; exec::exec(&cmd); }; @@ -265,6 +249,27 @@ fn try_do( return (proc, tmpfilepath); }; +fn cleanup_child(ctx: *context, status: *exec::status, tmpfile: str, target: str) void = { + match (exec::check(status)) { + case void => void; + case let e: !exec::exit_status => + fmt::fatalf("\x1b[31mharedo {}{} (error: {})\x1b[0m", + ctx.indent, target, exec::exitstr(e)); + }; + match (os::move(tmpfile, target)) { + case void => void; + case errors::noentry => void; + case let e: fs::error => + fmt::fatalf("\x1b[31mharedo {}{} (error: {})\x1b[0m", + ctx.indent, target, fs::strerror(e)); + }; + if (!ctx.quiet) { + errorfln("\x1b[32mharedo {}{} (done)\x1b[0m", + ctx.indent, target)!; + }; + io::write(ctx.wr, [0])!; +}; + // finds the do file for a given target. // all strings in do_paths must be freed by the caller. fn find_do_file(target: str) (do_paths | void) = { -- 2.39.0
Overall this looks good, but there are a few minor issues:
This needs to be kept in order to avoid a deadlock if something fails between when we start spawning children and when we're done waiting for said children. In addition, in order for this defer to be triggered. cleanup_child also needs to return (void | whatever | error | types) rather than exiting (either with ! or with fmt::fatal).