From bc9f38439065914577f3c848fb42ae7c747477be Mon Sep 17 00:00:00 2001 From: Cesar Douady Date: Sat, 30 Mar 2024 23:12:06 +0100 Subject: [PATCH] fixed target clash fuzzy detection + implemented dep compression --- Makefile | 2 +- TO_DO | 11 +- src/autodep/gather.cc | 4 +- src/autodep/record.cc | 4 + src/ldump_job.cc | 24 ++-- src/lmakeserver/backend.cc | 41 +++---- src/lmakeserver/caches/dir_cache.cc | 70 ++++++----- src/lmakeserver/cmd.cc | 69 ++++++----- src/lmakeserver/codec.cc | 23 ++-- src/lmakeserver/job.cc | 166 ++++++++++++--------------- src/lmakeserver/job.x.hh | 30 ++--- src/lmakeserver/node.cc | 161 ++++++++++++++++++++------ src/lmakeserver/node.x.hh | 172 +++++++++++++++++++--------- src/lmakeserver/req.cc | 31 +++-- src/lmakeserver/store.cc | 39 +++---- src/lmakeserver/store.x.hh | 22 ++-- src/rpc_job.cc | 19 ++- src/rpc_job.hh | 51 ++++++--- src/store/alloc.hh | 141 ++++++++++++++--------- src/store/vector.hh | 10 +- unit_tests/conflict.py | 8 +- unit_tests/critical.py | 15 +-- 22 files changed, 643 insertions(+), 470 deletions(-) diff --git a/Makefile b/Makefile index ffcbb4db..a1d42f87 100644 --- a/Makefile +++ b/Makefile @@ -805,7 +805,7 @@ $(LMAKE_ENV)/stamp : $(LMAKE_ALL_FILES) $(LMAKE_ENV)/Manifest $(patsubst %,$(LMA @touch $@ @echo init $(LMAKE_ENV)-cache $(LMAKE_ENV)/tok : $(LMAKE_ENV)/stamp $(LMAKE_ENV)/Lmakefile.py - @set -e ; cd $(LMAKE_ENV) ; export CC=$(CC) ; $(ROOT_DIR)/bin/lmake lmake.tar.gz -Vn & sleep 1 ; $(ROOT_DIR)/bin/lmake lmake.tar.gz >$(@F) ; wait $$! ; touch $(@F) + @set -e ; cd $(LMAKE_ENV) ; export CC=$(CC) ; $(ROOT_DIR)/bin/lmake lmake.tar.gz -Vn & sleep 1 ; $(ROOT_DIR)/bin/lmake lmake.tar.gz >$(@F) || rm -f $(@F) ; wait $$! || rm -f $(@F) # # archive diff --git a/TO_DO b/TO_DO index d38d5001..ec001604 100644 --- a/TO_DO +++ b/TO_DO @@ -29,14 +29,12 @@ items : * improve lshow -i - generate info on nodes -* use Pdate (end of job) rather than Ddate (target date) to detect clash - - Ddate are not reliable - - ok to detect manual, nothing else * manage 32 bits executables - compile ld_audit.so (and co) in both 32 & 64 bits - put adequate $PLATFORM in LD_PRELOAD * generate meaningful message in case of I/O error such as disk full ! before erasing a dir, check for phony targets, not only files on disk + - then dir markers for gcc include dirs could be phony, which is more elegant ? mimic slurm killing procedure - use PR_SET_CHILD_SUBREAPER (cf man 2 prctl) to ensure we get all sub-processes - follow process hierarchy @@ -108,10 +106,9 @@ items : - much like faketree : https://github.com/enfabrica/enkit/tree/master/faketree - generalize tmp mapping * implement cache v2 (copy & link) : - - put typeid(StartInfo,EndInfo,...) as version tag to ensure no inter-version clashes - 2 levels : disk level, global level - use link instead of copy - - warning : only when mtime is used instead of ctime + - for disk level ! support direct rebuild of deps - specify a target flag 'direct' for use by pattern - specify a dep flag 'direct' for dynamic use (through ldepend) @@ -184,10 +181,6 @@ items : - most importantly sufixes ? maybe a pre-pass searching for infix is advisable * no req_info for non-buildable nodes -* make deps be a sequence of non-buildable nodes followed by a buildable node - - invent a new concept - - deps chain : Job -> NodeSeq -> Node -> Job - - store in a prefix file for best sharing and easy retieval * there can be 1 backend thread per backend - with one master socket - and everything replicated per backend (including mutexes, tables, etc.) diff --git a/src/autodep/gather.cc b/src/autodep/gather.cc index 9dbf1310..e7cb943f 100644 --- a/src/autodep/gather.cc +++ b/src/autodep/gather.cc @@ -299,8 +299,8 @@ Status Gather::exec_child( ::vector_s const& args , Fd cstdin , Fd cstdout , Fd return false ; } ; auto set_status = [&]( Status status_ , ::string const& msg_={} )->void { - if (status==Status::New) status = status_ ; // else there is already another reason - if (+msg_ ) { set_nl(msg) ; msg += msg_ ; } + if ( status==Status::New || status==Status::Ok ) status = status_ ; // else there is already another reason + if ( +msg_ ) append_line_to_string(msg,msg_) ; } ; // SWEAR(!slaves) ; diff --git a/src/autodep/record.cc b/src/autodep/record.cc index 4243c763..589da022 100644 --- a/src/autodep/record.cc +++ b/src/autodep/record.cc @@ -173,6 +173,10 @@ Record::Mkdir::Mkdir( Record& r , Path&& path , ::string&& c ) : Solve{r,::move( if (file_loc==FileLoc::Repo) r._report_guard( ::move(real) , ::move(c) ) ; } +// note : in case the file is open WR_ONLY w/o O_TRUNC, it is true that the final content depends on the initial content. +// However : +// - if it is an official target, it is not a dep, whether you declare reading it or not +// - else, we do not compute a CRC on it and its actual content is not guaranteed. What is important in this case is that the execution of the job does not see the content. static bool _do_stat (int flags) { return flags&O_PATH ; } static bool _do_read (int flags) { return !_do_stat(flags) && (flags&O_ACCMODE)!=O_WRONLY && !(flags&O_TRUNC) ; } static bool _do_write(int flags) { return !_do_stat(flags) && (flags&O_ACCMODE)!=O_RDONLY ; } diff --git a/src/ldump_job.cc b/src/ldump_job.cc index 3b14928f..a1c8f317 100644 --- a/src/ldump_job.cc +++ b/src/ldump_job.cc @@ -96,19 +96,15 @@ int main( int argc , char* argv[] ) { if (argc!=2) exit(Rc::Usage,"usage : ldump_job file") ; app_init() ; // - IFStream job_stream{argv[1]} ; - try { - auto report_start = deserialize(job_stream) ; - ::cout << "eta : " << report_start.eta <<'\n' ; - ::cout << "host : " << SockFd::s_host(report_start.host) <<'\n' ; - print_submit_attrs(report_start.submit_attrs) ; - ::cout << "rsrcs :\n" ; _print_map(report_start.rsrcs) ; - print_pre_start (report_start.pre_start ) ; - print_start (report_start.start ) ; - } catch(...) {} - try { - auto report_end = deserialize(job_stream) ; - print_end(report_end.end) ; - } catch(...) {} + JobInfo job_info { argv[1] } ; + // + ::cout << "eta : " << job_info.start.eta <<'\n' ; + ::cout << "host : " << SockFd::s_host(job_info.start.host) <<'\n' ; + print_submit_attrs(job_info.start.submit_attrs) ; + ::cout << "rsrcs :\n" ; _print_map(job_info.start.rsrcs) ; + print_pre_start (job_info.start.pre_start ) ; + print_start (job_info.start.start ) ; + // + print_end(job_info.end.end) ; return 0 ; } diff --git a/src/lmakeserver/backend.cc b/src/lmakeserver/backend.cc index b36e71b4..91ea1975 100644 --- a/src/lmakeserver/backend.cc +++ b/src/lmakeserver/backend.cc @@ -336,8 +336,8 @@ namespace Backends { , .stderr = start_msg_err.second } ; trace("early",digest) ; - { OFStream ofs { dir_guard(jaf) } ; - serialize( ofs , JobInfoStart({ + JobInfo ji { + { .eta = eta , .submit_attrs = ::move(submit_attrs) , .rsrcs = rsrcs @@ -345,9 +345,10 @@ namespace Backends { , .pre_start = jrr , .start = ::move(reply) , .stderr = start_msg_err.second - }) ) ; - serialize( ofs , JobInfoEnd{ JobRpcReq{JobProc::End,jrr.seq_id,jrr.job,JobDigest(digest)} } ) ; - } + } + , { { JobProc::End , jrr.seq_id , jrr.job , ::copy(digest) } } + } ; + ji.write(jaf) ; job_exec = { job , reply.addr , file_date(jaf) , New } ; // job starts and ends //vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv g_engine_queue.emplace( JobProc::Start , ::copy(job_exec) , false/*report_now*/ , ::move(pre_actions.second) , ""s , ::move(jrr.msg ) ) ; @@ -462,7 +463,6 @@ namespace Backends { dep.acquire_crc() ; dd.crc_date(dep) ; } - for( auto& [tn,td] : jrr.digest.targets ) if (td.extra_tflags[ExtraTflag::Wash]) td.date = je.start_date.d ; // adjust wash date as start_date was not available in job ::string jaf = job->ancillary_file() ; serialize( OFStream(jaf,::ios::app) , JobInfoEnd{jrr} ) ; // /!\ _s_starting_job ensures ancillary file is written by _s_handle_job_start before we append to it job->end_exec() ; @@ -560,25 +560,22 @@ namespace Backends { status = _s_release_start_entry(it,hbs) ; trace("handle_job",job,entry,status) ; } - { ::string jaf = Job(job)->ancillary_file() ; - JobDigest jd { .status=status } ; + { Job j { job } ; + JobDigest jd { .status=status } ; if (status==Status::EarlyLostErr) { // if we do not retry, record run info - JobInfoStart jis { - .eta = eta - , .submit_attrs = submit_attrs - , .rsrcs = rsrcs - , .host = conn.host - , .pre_start { JobProc::None , conn.seq_id , job } - , .start { JobProc::None } - } ; - JobInfoEnd jie { - .end { JobProc::End , conn.seq_id , job , ::copy(jd) , ::copy(lost_report.first) } + JobInfo ji { + { .eta = eta + , .submit_attrs = submit_attrs + , .rsrcs = rsrcs + , .host = conn.host + , .pre_start { JobProc::None , conn.seq_id , job } + , .start { JobProc::None } + } + , { .end { JobProc::End , conn.seq_id , job , ::copy(jd) , ::copy(lost_report.first) } } } ; - OFStream os { dir_guard(jaf) } ; - serialize( os , jis ) ; - serialize( os , jie ) ; + j->write_job_info(ji) ; } - JobExec je { job , file_date(jaf) , New } ; // job starts and ends, no host + JobExec je { j , file_date(j->ancillary_file()) , New } ; // job starts and ends, no host //vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv g_engine_queue.emplace( JobProc::Start , ::copy(je) , false/*report_now*/ ) ; g_engine_queue.emplace( JobProc::End , ::move(je) , ::move(rsrcs) , ::move(jd) , ::move(lost_report.first) ) ; diff --git a/src/lmakeserver/caches/dir_cache.cc b/src/lmakeserver/caches/dir_cache.cc index b6f35f92..cfbedef2 100644 --- a/src/lmakeserver/caches/dir_cache.cc +++ b/src/lmakeserver/caches/dir_cache.cc @@ -272,28 +272,24 @@ namespace Caches { ::vector_s copied ; Trace trace("DirCache::download",job,id,jn) ; try { - JobInfoStart report_start ; - JobInfoEnd report_end ; - { LockedFd lock { dfd , false/*exclusive*/ } ; // because we read the data , shared is ok - IFStream is { to_string(dir,'/',jn,"/data") } ; - deserialize(is,report_start) ; - deserialize(is,report_end ) ; + JobInfo job_info ; + { LockedFd lock { dfd , false/*exclusive*/ } ; // because we read the data , shared is ok + job_info = { to_string(dir,'/',jn,"/data") } ; // update some info - report_start.pre_start.job = +job ; // id is not stored in cache - report_start.submit_attrs.reason = reason ; + job_info.start.pre_start.job = +job ; // id is not stored in cache + job_info.start.submit_attrs.reason = reason ; // - for( NodeIdx ti=0 ; tiancillary_file()) ; - OFStream os { dir_guard(copied.back()) } ; - serialize(os,report_start) ; - serialize(os,report_end ) ; + job_info.write(dir_guard(copied.back())) ; } // ensure we take a single lock at a time to avoid deadlocks // upload is the only one to take several locks @@ -302,7 +298,7 @@ namespace Caches { _lru_first(jn,sz_) ; trace("done",sz_) ; } - return report_end.end.digest ; + return job_info.end.end.digest ; } catch(::string const& e) { for( ::string const& f : copied ) unlnk(f) ; // clean up partial job trace("failed") ; @@ -314,38 +310,34 @@ namespace Caches { ::string jn = _unique_name(job,repo) ; Trace trace("DirCache::upload",job,jn) ; // - JobInfoStart report_start ; - JobInfoEnd report_end ; - try { - IFStream is { job->ancillary_file() } ; - deserialize(is,report_start) ; - deserialize(is,report_end ) ; - } catch (::string const& e) { - trace("no_ancillary_file",e) ; + JobInfo job_info = job->job_info() ; + if (!job_info.end.end.proc) { // we need a full report to cache job + trace("no_ancillary_file") ; return false/*ok*/ ; } // remove useless info - report_start.pre_start.seq_id = 0 ; // no seq_id since no execution - report_start.start .small_id = 0 ; // no small_id since no execution - report_start.pre_start.job = 0 ; // job_id may not be the same in the destination repo - report_start.eta = {} ; // dont care about timing info in cache - report_start.submit_attrs.reason = {} ; // cache does not care about original reason - report_start.rsrcs.clear() ; // caching resources is meaningless as they have no impact on content - for( auto& [tn,td] : report_end.end.digest.targets ) { - SWEAR(!td.polluted) ; // cannot be a candidate for upload as this must have failed + job_info.start.pre_start.seq_id = 0 ; // no seq_id since no execution + job_info.start.start .small_id = 0 ; // no small_id since no execution + job_info.start.pre_start.job = 0 ; // job_id may not be the same in the destination repo + job_info.start.eta = {} ; // dont care about timing info in cache + job_info.start.submit_attrs.reason = {} ; // cache does not care about original reason + job_info.start.rsrcs.clear() ; // caching resources is meaningless as they have no impact on content + for( auto& [tn,td] : job_info.end.end.digest.targets ) { + SWEAR(!td.polluted) ; // cannot be a candidate for upload as this must have failed td.date.clear() ; td.extra_tflags = {} ; } + job_info.end.end.digest.end_date = {} ; // check deps - for( auto const& [dn,dd] : report_end.end.digest.deps ) if (dd.is_date) return false/*ok*/ ; + for( auto const& [dn,dd] : job_info.end.end.digest.deps ) if (dd.is_date) return false/*ok*/ ; // mkdir(dir_fd,jn) ; AutoCloseFd dfd = open_read(dir_fd,jn) ; // // upload is the only one to take several locks and it starts with the global lock // this way, we are sure to avoid deadlocks - LockedFd lock2{ dir_fd , true/*exclusive*/ } ; // because we manipulate LRU and because we take several locks, need exclusive - LockedFd lock { dfd , true/*exclusive*/ } ; // because we write the data , need exclusive + LockedFd lock2{ dir_fd , true/*exclusive*/ } ; // because we manipulate LRU and because we take several locks, need exclusive + LockedFd lock { dfd , true/*exclusive*/ } ; // because we write the data , need exclusive // Sz old_sz = _lru_remove(jn) ; Sz new_sz = 0 ; @@ -356,8 +348,10 @@ namespace Caches { // store meta-data ::string data_file = to_string(dir,'/',jn,"/data") ; ::string deps_file = to_string(dir,'/',jn,"/deps") ; - { OFStream os { data_file } ; serialize(os,report_start) ; serialize(os,report_end) ; } - { OFStream os { deps_file } ; serialize(os,report_end.end.digest.deps) ; } // store deps in a compact format so that matching is fast + // + job_info.write(data_file) ; + serialize(OFStream(deps_file),job_info.end.end.digest.deps) ; // store deps in a compact format so that matching is fast + // /**/ new_sz += FileInfo(data_file ).sz ; /**/ new_sz += FileInfo(deps_file ).sz ; for( auto const& [tn,_] : digest.targets ) new_sz += FileInfo(nfs_guard.access(tn)).sz ; @@ -367,8 +361,8 @@ namespace Caches { _copy( digest.targets[ti].first , dfd , to_string(ti) , false/*unlnk_dst*/ , true/*mk_read_only*/ ) ; } catch (::string const& e) { trace("failed",e) ; - unlnk_inside(dfd) ; // clean up in case of partial execution - _mk_room( made_room?new_sz:old_sz , 0 ) ; // finally, we did not populate the entry + unlnk_inside(dfd) ; // clean up in case of partial execution + _mk_room( made_room?new_sz:old_sz , 0 ) ; // finally, we did not populate the entry return false/*ok*/ ; } _lru_first(jn,new_sz) ; diff --git a/src/lmakeserver/cmd.cc b/src/lmakeserver/cmd.cc index aa1ad0cb..1ea2dad4 100644 --- a/src/lmakeserver/cmd.cc +++ b/src/lmakeserver/cmd.cc @@ -157,10 +157,11 @@ namespace Engine { w = ::max( w , k.size() ) ; rev_map[d.first] = k ; } - for( NodeIdx d=0 ; ddeps.size() ; d++ ) { - Dep const& dep = job->deps[d] ; - bool cdp = d >0 && dep .parallel ; - bool ndp = d+1deps.size() && job->deps[d+1].parallel ; + ::vector parallel ; for( Dep const& d : job->deps ) parallel.push_back(d.parallel) ; // first pass to count deps as they are compressed and size is not known upfront + NodeIdx d = 0 ; + for( Dep const& dep : job->deps ) { + bool cdp = d >0 && parallel[d ] ; + bool ndp = d+1name()) : ""s ; ::string pfx = to_string( dep.dflags_str() ,' ', dep.accesses_str() , ' ' , ::setw(w) , dep_key ,' ') ; if ( !cdp && !ndp ) pfx.push_back(' ' ) ; @@ -168,6 +169,7 @@ namespace Engine { else if ( cdp && ndp ) pfx.push_back('|' ) ; else pfx.push_back('\\') ; _send_node( fd , ro , show_deps==Yes , (Maybe&!dep.dflags[Dflag::Required])|hide , pfx , dep , lvl+1 ) ; + d++ ; } } @@ -438,13 +440,13 @@ R"({ if (!job ) throw "no job found"s ; if (job->rule->is_special()) throw to_string("cannot debug ",job->rule->name," jobs") ; // - IFStream job_stream { job->ancillary_file() } ; - JobInfoStart report_start ; - JobInfoEnd report_end ; - try { deserialize(job_stream,report_start) ; } catch (...) { audit( fd , ro , Color::Err , "no info available" ) ; return false ; } - try { deserialize(job_stream,report_end ) ; } catch (...) { } // we can debug w/o report_end + JobInfo job_info = job->job_info() ; + if (!job_info.start.start.proc) { + audit( fd , ro , Color::Err , "no info available" ) ; + return false ; + } // - JobRpcReply const& start = report_start.start ; + JobRpcReply const& start = job_info.start.start ; bool redirected = +start.stdin || +start.stdout ; ::string dbg_dir = job->ancillary_file(AncillaryTag::Dbg) ; ::string script_file = dbg_dir+"/script" ; @@ -457,12 +459,12 @@ R"({ , "coolchyni.beyond-debug" } ; // - ::string script = _mk_script( job , ro.flags , report_start , report_end , dbg_dir , true/*with_cmd*/ , vs_ext ) ; - ::string cmd = _mk_cmd ( job , ro.flags , start , dbg_dir , redirected ) ; - ::string vscode = _mk_vscode( job , report_start , report_end , dbg_dir , vs_ext ) ; + ::string script = _mk_script( job , ro.flags , job_info.start , job_info.end , dbg_dir , true/*with_cmd*/ , vs_ext ) ; + ::string cmd = _mk_cmd ( job , ro.flags , start , dbg_dir , redirected ) ; + ::string vscode = _mk_vscode( job , job_info.start , job_info.end , dbg_dir , vs_ext ) ; // - OFStream(dir_guard(script_file)) << script ; ::chmod(script_file.c_str(),0755) ; // make executable - OFStream(dir_guard(cmd_file )) << cmd ; ::chmod(cmd_file .c_str(),0755) ; // . + OFStream(dir_guard(script_file)) << script ; ::chmod(script_file.c_str(),0755) ; // make executable + OFStream(dir_guard(cmd_file )) << cmd ; ::chmod(cmd_file .c_str(),0755) ; // . OFStream(dir_guard(vscode_file)) << vscode ; // audit( fd , ro , script_file , true/*as_is*/ ) ; @@ -506,16 +508,11 @@ R"({ static void _show_job( Fd fd , ReqOptions const& ro , Job job , DepDepth lvl=0 ) { Trace trace("show_job",ro.key,job) ; Rule rule = job->rule ; - IFStream job_stream { job->ancillary_file() } ; - JobInfoStart report_start ; - JobInfoEnd report_end ; + JobInfo job_info = job->job_info() ; bool has_start = false ; bool has_end = false ; bool verbose = ro.flags[ReqFlag::Verbose] ; - JobDigest const& digest = report_end.end.digest ; - try { deserialize(job_stream,report_start) ; has_start = true ; } catch (...) { goto Go ; } - try { deserialize(job_stream,report_end ) ; has_end = true ; } catch (...) { goto Go ; } - Go : + JobDigest const& digest = job_info.end.end.digest ; switch (ro.key) { case ReqKey::Cmd : case ReqKey::Env : @@ -538,8 +535,8 @@ R"({ break ; DF} } else { - JobRpcReq const& pre_start = report_start.pre_start ; - JobRpcReply const& start = report_start.start ; + JobRpcReq const& pre_start = job_info.start.pre_start ; + JobRpcReply const& start = job_info.start.start ; bool redirected = +start.stdin || +start.stdout ; // if (pre_start.job) SWEAR(pre_start.job==+job,pre_start.job,+job) ; @@ -547,14 +544,14 @@ R"({ switch (ro.key) { case ReqKey::Env : { if (!has_start) { audit( fd , ro , Color::Err , "no info available" , true/*as_is*/ , lvl ) ; break ; } - ::vmap_ss env = _mk_env(start.env,report_end.end.dynamic_env) ; + ::vmap_ss env = _mk_env(start.env,job_info.end.end.dynamic_env) ; size_t w = 0 ; for( auto const& [k,v] : env ) w = ::max(w,k.size()) ; for( auto const& [k,v] : env ) audit( fd , ro , to_string(::setw(w),k," : ",v) , true/*as_is*/ , lvl ) ; } break ; - case ReqKey::ExecScript : //! as_is - if (!has_start) audit( fd , ro , Color::Err , "no info available" , true , lvl ) ; - else audit( fd , ro , _mk_script(job,ro.flags,report_start,report_end,ro.flag_args[+ReqFlag::Debug],false/*with_cmd*/) , true , lvl ) ; + case ReqKey::ExecScript : //! as_is + if (!has_start) audit( fd , ro , Color::Err , "no info available" , true , lvl ) ; + else audit( fd , ro , _mk_script(job,ro.flags,job_info.start,job_info.end,ro.flag_args[+ReqFlag::Debug],false/*with_cmd*/) , true , lvl ) ; break ; case ReqKey::Cmd : { //! as_is if (!has_start) audit( fd , ro , Color::Err , "no info available" , true , lvl ) ; @@ -571,9 +568,9 @@ R"({ if (has_start) { if (verbose) audit( fd , ro , Color::Note , pre_start.msg , false/*as_is*/ , lvl+1 ) ; } - if (has_end) { //! as_is - if (verbose) audit( fd , ro , Color::Note , report_end.end.msg , false , lvl+1 ) ; - /**/ audit( fd , ro , digest.stderr , true , lvl+1 ) ; + if (has_end) { //! as_is + if (verbose) audit( fd , ro , Color::Note , job_info.end.end.msg , false , lvl+1 ) ; + /**/ audit( fd , ro , digest.stderr , true , lvl+1 ) ; } break ; case ReqKey::Info : { @@ -611,7 +608,7 @@ R"({ else push_entry("required by",localize(mk_file( n ->name()),ro.startup_dir_s)) ; } if (has_start) { - JobInfoStart const& rs = report_start ; + JobInfoStart const& rs = job_info.start ; SubmitAttrs const& sa = rs.submit_attrs ; ::string cwd = rs.start.cwd_s.substr(0,rs.start.cwd_s.size()-1) ; ::string tmp_dir = rs.start.autodep_env.tmp_dir ; @@ -641,7 +638,7 @@ R"({ if (sa.tag!=BackendTag::Local ) push_entry("backend" ,snake_str(sa.tag) ) ; } // - ::map_ss allocated_rsrcs = mk_map(report_start.rsrcs ) ; + ::map_ss allocated_rsrcs = mk_map(job_info.start.rsrcs) ; ::map_ss required_rsrcs ; try { Rule::SimpleMatch match ; @@ -669,9 +666,9 @@ R"({ } } // - if (+report_start.pre_start.msg) push_entry("start message",localize(report_start.pre_start.msg,ro.startup_dir_s) ) ; - if (+report_start.stderr ) push_entry("start stderr" ,report_start.stderr ,Color::Warning) ; - if (+report_end.end.msg ) push_entry("message" ,localize(report_end.end .msg,ro.startup_dir_s) ) ; + if (+job_info.start.pre_start.msg) push_entry("start message",localize(job_info.start.pre_start.msg,ro.startup_dir_s) ) ; + if (+job_info.start.stderr ) push_entry("start stderr" ,job_info.start.stderr ,Color::Warning) ; + if (+job_info.end.end.msg ) push_entry("message" ,localize(job_info.end.end .msg,ro.startup_dir_s) ) ; // generate output if (porcelaine) { auto audit_rsrcs = [&]( ::string const& k , ::map_ss const& rsrcs , bool allocated )->void { diff --git a/src/lmakeserver/codec.cc b/src/lmakeserver/codec.cc index 3f2861f4..81813607 100644 --- a/src/lmakeserver/codec.cc +++ b/src/lmakeserver/codec.cc @@ -72,9 +72,9 @@ namespace Codec { static bool _buildable_ok( ::string const& file , Node node ) { switch (node->buildable) { case Buildable::No : - case Buildable::Unknown : return false ; + case Buildable::Unknown : return false ; case Buildable::Decode : - case Buildable::Encode : return node->date==Closure::s_tab.at(file).log_date ; + case Buildable::Encode : return node->date.d==Closure::s_tab.at(file).log_date ; DF} } @@ -173,7 +173,7 @@ namespace Codec { for( auto const& [val,code] : e_entry ) process_node(ctx,code,val) ; } // wrap up - Ddate log_date = s_tab.at(file).log_date ; + FullDate log_date = s_tab.at(file).log_date ; for( Node n : nodes ) n->date = log_date ; trace("done",nodes.size()/2) ; } @@ -182,7 +182,7 @@ namespace Codec { auto [it,inserted] = s_tab.try_emplace(file,Entry()) ; Entry& entry = it->second ; if (!inserted) { - for( ReqIdx r : reqs ) if ( entry.sample_date < Req(r)->start_date.p ) goto Refresh ; // we sample disk once per Req + for( ReqIdx r : reqs ) if ( entry.sample_date < Req(r)->start_date.p ) goto Refresh ; // we sample disk once per Req return true/*ok*/ ; } Refresh : @@ -202,9 +202,9 @@ namespace Codec { entry.sample_date = New ; if (inserted) { Node node{ni} ; - if ( inserted && node->buildable==Buildable::Decode ) entry.phys_date = entry.log_date = node->date ; // initialize from known info + if ( inserted && node->buildable==Buildable::Decode ) entry.phys_date = entry.log_date = node->date.d ; // initialize from known info } - if ( phys_date==entry.phys_date ) return true/*ok*/ ; // file has not changed, nothing to do + if ( phys_date==entry.phys_date ) return true/*ok*/ ; // file has not changed, nothing to do entry.log_date = phys_date ; // _s_canonicalize(file,reqs) ; @@ -255,10 +255,11 @@ namespace Codec { trace("new_code",code) ; OFStream(file,::ios::app) << _codec_line(ctx,code,txt,true/*with_nl*/) ; Entry& entry = s_tab.at(file) ; + Pdate now { New } ; _create_pair( file , decode_node , txt , encode_node , code ) ; - decode_node->date = entry.log_date ; - encode_node->date = entry.log_date ; - entry.phys_date = file_date(file) ; // we have touched the file but not the semantic, update phys_date but not log_date + decode_node->date = {entry.log_date,now} ; + encode_node->date = {entry.log_date,now} ; + entry.phys_date = file_date(file) ; // we have touched the file but not the semantic, update phys_date but not log_date // trace("found",code) ; return JobRpcReply( JobProc::Encode , code , encode_node->crc , Yes ) ; @@ -268,10 +269,10 @@ namespace Codec { Node node { ni } ; SWEAR( node->is_decode() || node->is_encode() ) ; ::string file = mk_file(node->name()) ; if ( !Closure::s_refresh( file , ni , {r} ) ) { - node->refresh( Crc::None , Closure::s_tab.at(file).log_date ) ; + node->refresh(Crc::None) ; return false/*ok*/ ; } - return node->date!=Closure::s_tab.at(file).log_date ; + return node->crc!=Crc::None && node->date.d!=Closure::s_tab.at(file).log_date ; } void codec_thread_func(Closure const& cc) { diff --git a/src/lmakeserver/job.cc b/src/lmakeserver/job.cc index b971900f..a6583752 100644 --- a/src/lmakeserver/job.cc +++ b/src/lmakeserver/job.cc @@ -40,7 +40,7 @@ namespace Engine { else if (!t.tflags[Tflag::Incremental]) fat = FileActionTag::Unlnk ; else if ( t.tflags[Tflag::NoUniquify ]) fat = FileActionTag::None ; else fat = FileActionTag::Uniquify ; - FileAction fa { fat , t->crc , t->crc==Crc::None?Ddate():t->date } ; + FileAction fa { fat , t->crc , t->date.d } ; // trace("wash_target",t,fa) ; switch (fat) { @@ -125,7 +125,7 @@ namespace Engine { // ::ostream& operator<<( ::ostream& os , JobReqInfo const& ri ) { - return os<<"JRI(" << ri.req <<','<< (ri.full?"full":"makable") <<','<< ri.speculate <<','<< ri.step()<<':'<Status::Early ) { // if early, we have not touched the targets, not even washed them, if lost, old targets are better than new ones // - ::uset old_targets ; - for( Node t : (*this)->targets ) if (t->has_actual_job(*this)) { - t->actual_job().clear() ; // ensure targets we no more generate do not keep pointing to us - old_targets.insert(t) ; - } + for( Node t : (*this)->targets ) if (t->has_actual_job(*this)) t->actual_job() = {} ; // ensure targets we no more generate do not keep pointing to us // ::vector targets ; targets.reserve(digest.targets.size()) ; for( auto const& [tn,td] : digest.targets ) { @@ -366,25 +362,28 @@ namespace Engine { Crc crc = td.crc ; bool target_modified = false ; // - SWEAR( !( tflags[Tflag::Target] && crc==Crc::None && !static_phony ) , tn , td ) ; // else job_exec should have suppressed the Target flag + SWEAR( !( tflags[Tflag::Target] && crc==Crc::None && !static_phony ) , tn , td ) ; // else job_exec should have suppressed the Target flag // target->set_buildable() ; // if (+crc) { // file dates are very fuzzy and unreliable, at least, filter out targets we generated ourselves - if ( +start_date.d && target->date>start_date.d && !old_targets.contains(target) ) { // if no start_date.d, job did not execute, it cannot generate a clash + if ( +start_date.p && target->date.p>start_date.p ) { // if no start_date.p, job did not execute, it cannot generate a clash // /!\ This may be very annoying ! // A job was running in parallel with us and there was a clash on this target. // There are 2 problems : for us and for them. // For us, it's ok, we will rerun. - // But for them, they are already done, possibly some dependent jobs are done, possibly even Req's are already done and we may have reported ok to the user, all that is wrong + // But for them, they are already done, possibly some dependent jobs are done, possibly even Req's are already done and we may have reported ok to the user, + // and all that is wron. // This is too complex and too rare to detect (and ideally handle). // Putting target in clash_nodes will generate a frightening message to user asking to relaunch all commands that were running in parallel. - if (crc.valid()) - target_reason |= {JobReasonTag::ClashTarget,+target} ; // crc is actually unreliable, rerun - if ( target->crc.valid() && !target->is_src_anti() ) { // existing crc was believed to be reliable but actually was not (if no execution, there is no problem) - trace("critical_clash",start_date.d,target->date) ; - for( Req r : target->reqs() ) { + if ( crc.valid() && td.tflags[Tflag::Target] ) { // official targets should have a valid crc, but if not, we dont care + trace("clash",start_date.p,target->date.p) ; + target_reason |= {JobReasonTag::ClashTarget,+target} ; // crc is actually unreliable, rerun + } + if ( target->crc.valid() && target->has_actual_job() && target->actual_tflags()[Tflag::Target] && !target->is_src_anti() ) { // existing crc was believed to be reliable ... + trace("critical_clash",start_date.p,target->date.p) ; // ... but actually was not (if no execution, ... + for( Req r : target->reqs() ) { // ... there is no problem) r->clash_nodes.emplace(target,r->clash_nodes.size()) ; target->req_info(r).reset() ; // best effort to trigger re-analysis but this cannot be guaranteed (fooled req may be gone) } @@ -403,8 +402,8 @@ namespace Engine { SourceOk : ; } // - target_modified = target->refresh( crc , crc==Crc::None?end_date.d:td.date ) ; - modified |= target_modified && tflags[Tflag::Target] ; + target_modified = target->refresh( crc , { td.date , td.extra_tflags[ExtraTflag::Wash]?start_date.p:end_date.p } ) ; + modified |= target_modified && tflags[Tflag::Target] ; } if ( crc==Crc::None && !static_phony ) { target->actual_job () = {} ; @@ -419,6 +418,8 @@ namespace Engine { targets.emplace_back( target , tflags ) ; if (td.polluted) target_reason |= {JobReasonTag::PrevTarget,+target} ; trace("target",target,td,STR(target_modified)) ; + } else { + trace("not_target",target,td) ; } } ::sort(targets) ; // ease search in targets @@ -521,33 +522,26 @@ namespace Engine { bool update_deps = seen_dep_date && full_ok ; // if full_ok, all deps have been resolved and we can update the record for a more reliable info bool update_msg = all_done && (+err_reason||+local_msg||+severe_msg) ; // if recorded local_msg was incomplete, update it if ( update_deps || update_msg ) { - ::string jaf = (*this)->ancillary_file() ; - try { - IFStream is{jaf} ; - auto report_start = deserialize(is) ; - auto report_end = deserialize(is) ; - bool updated = false ; - if (update_msg) { - append_line_to_string( report_end.end.msg , +err_reason ? reason_str(err_reason)+'\n' : local_msg , severe_msg ) ; - updated = true ; - } - if (update_deps) { - ::vmap_s& dds =report_end.end.digest.deps ; - SWEAR(dds.size()==(*this)->deps.size()) ; - for( NodeIdx di=0 ; dideps[di]) ; + JobInfo ji = (*this)->job_info() ; + bool updated = false ; + if (update_msg) { + append_line_to_string( ji.end.end.msg , +err_reason ? reason_str(err_reason)+'\n' : local_msg , severe_msg ) ; + updated = true ; + } + if (update_deps) { + ::vmap_s& dds = ji.end.end.digest.deps ; + NodeIdx di = 0 ; + for( Dep const& d : (*this)->deps ) { + DepDigest& dd = dds[di].second ; + if (dd.is_date) { + dd.crc_date(d) ; updated |= !dd.is_date ; // in case of ^C, dep.make may not transform date into crc } + di++ ; } - if (updated) { - OFStream os{jaf} ; - serialize(os,report_start) ; - serialize(os,report_end ) ; - } + SWEAR(di==dds.size()) ; // deps must be coherent between ancillary file and internal info } - catch (...) {} // in case ancillary file cannot be read, dont record and ignore + if (updated) (*this)->write_job_info(ji) ; } // as soon as job is done for a req, it is meaningful and justifies to be cached, in practice all reqs agree most of the time if ( full_ok && +cache_none_attrs.key ) { // cache only successful results @@ -620,10 +614,10 @@ namespace Engine { Trace trace("set_pressure",idx(),ri,pressure) ; Req req = ri.req ; CoarseDelay dep_pressure = ri.pressure + best_exec_time().first ; - switch (ri.step()) { //! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv - case JobStep::Dep : for( Dep const& d : deps.subvec(ri.i_dep) ) d-> set_pressure( d->req_info(req) , dep_pressure ) ; break ; - case JobStep::Queued : Backend::s_set_pressure( ri.backend , +idx() , +req , {.pressure=dep_pressure} ) ; break ; - default : ; //! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + switch (ri.step()) { //! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv + case JobStep::Dep : for( DepsIter it{deps,ri.iter }; it!=deps.end() ; it++ ) (*it)-> set_pressure( (*it)->req_info(req) , dep_pressure ) ; break ; + case JobStep::Queued : Backend::s_set_pressure( ri.backend , +idx() , +req , {.pressure=dep_pressure} ) ; break ; + default : ; //! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ } } @@ -692,7 +686,6 @@ namespace Engine { bool need_run = r.need_run() || ri.reason().need_run() ; return ri.full && care && (need_run||archive) ; } ; - Idx n_deps = deps.size() ; RestartAnalysis : // restart analysis here when it is discovered we need deps to run the job bool stamped_seen_waiting = false ; bool proto_seen_waiting = false ; @@ -703,30 +696,26 @@ namespace Engine { ri.speculative_deps = false ; // initially, we are not speculatively waiting ri.stamped_modif = ri.proto_modif = ri.new_cmd ; // - for ( auto [cur_i_dep,cur_reason] = ::pair(ri.i_dep,ri.reasons.first) ; SWEAR(cur_i_dep<=n_deps,cur_i_dep,n_deps),true ; cur_i_dep++ ) { + JobReason cur_reason = ri.reasons.first ; + for ( DepsIter iter {deps,ri.iter} ; true ; iter++ ) { if (!proto_seen_waiting) { - ri.i_dep = cur_i_dep ; // fast path : info is recorded in ri, next time, restart analysis here - ri.reasons.first = cur_reason ; + ri.iter = iter.digest(deps) ; // fast path : info is recorded in ri, next time, restart analysis here + ri.reasons.first = cur_reason ; } // - bool seen_all = cur_i_dep==n_deps ; - Dep const& dep = seen_all ? Sentinel : deps[cur_i_dep] ; // use empty dep as sentinel + bool seen_all = iter==deps.end() ; + Dep const& dep = seen_all ? Sentinel : *iter ; // use empty dep as sentinel if ( !dep.parallel || seen_all ) { ri.stamped_err = ri.proto_err ; // proto become stamped upon sequential dep ri.stamped_modif = ri.proto_modif ; // . - if ( critical_modif && !seen_all ) { - NodeIdx j = cur_i_dep ; - for( NodeIdx i=cur_i_dep ; i static_deps ; + for( DepsIter it=iter ; it!=deps.end() ; it++ ) if (it->dflags[Dflag::Static]) { + static_deps.push_back(*it) ; + static_deps.back().accesses = {} ; } + deps.replace_tail(iter,static_deps) ; + seen_all = !static_deps ; } if ( seen_all || critical_waiting ) break ; stamped_seen_waiting = proto_seen_waiting ; @@ -864,9 +853,8 @@ namespace Engine { if ( auto it = req->missing_audits.find(idx()) ; it!=req->missing_audits.end() && !req.zombie() ) { JobAudit const& ja = it->second ; trace("report_missing",ja) ; - IFStream job_stream { ancillary_file() } ; - /**/ deserialize(job_stream) ; - ::string stderr = deserialize(job_stream).end.digest.stderr ; + JobInfo ji = job_info() ; + ::string const& stderr = ji.end.end.digest.stderr ; // if (ja.report!=JobReport::Hit) { // if not Hit, then job was rerun and ja.report is the report that would have been done w/o rerun SWEAR(req->stats.ended(JobReport::Rerun)>0) ; @@ -935,8 +923,7 @@ namespace Engine { else return "frozen file does not exist while not phony\n" ; case Special::Infinite : { ::string res ; - for( Dep const& d : ::c_vector_view(deps.items(),g_config.n_errs(deps.size())) ) append_to_string( res , d->name() , '\n' ) ; - if ( g_config.errs_overflow(deps.size()) ) append_to_string( res , "..." , '\n' ) ; + for( Dep const& d : deps ) append_to_string( res , d->name() , '\n' ) ; return res ; } default : @@ -954,31 +941,28 @@ namespace Engine { // switch (special) { case Special::Plain : { - SWEAR(frozen_) ; // only case where we are here without special rule + SWEAR(frozen_) ; // only case where we are here without special rule SpecialStep special_step = SpecialStep::Idle ; Node worst_target ; Bool3 modified = No ; NfsGuard nfs_guard { g_config.reliable_dirs } ; for( Target t : targets ) { - ::string tn = t->name() ; - FileInfo fi { nfs_guard.access(tn) } ; - bool plain = t->crc.valid() && t->crc.exists() ; - SpecialStep ss = {}/*garbage*/ ; - if ( plain && +fi && fi.date==t->date ) { + ::string tn = t->name() ; + FileInfo fi { nfs_guard.access(tn) } ; + SpecialStep ss = {}/*garbage*/ ; + if ( t->crc.valid() && fi.date==t->date.d ) { ss = SpecialStep::Idle ; } else { - Trace trace("src",fi.date,t->date) ; - Crc crc { tn , g_config.hash_algo } ; - Ddate date = fi.date ; // we cant do much if !fi as we have no date to put here - modified |= crc.match(t->crc) ? No : !plain ? Maybe : Yes ; - //vvvvvvvvvvvvvvvvvvvvvv - t->refresh( crc , date ) ; - //^^^^^^^^^^^^^^^^^^^^^^ - // if file disappeared, there is not way to know at which date, we are optimistic here as being pessimistic implies false overwrites - if (!crc.valid() ) ss = SpecialStep::Err ; - else if (+fi ) ss = SpecialStep::Ok ; - else if ( t.tflags[Tflag::Target] && t.tflags[Tflag::Static] ) ss = SpecialStep::Err ; - else { t->actual_job().clear() ; ss = SpecialStep::Idle ; } // unlink of a star target is nothing + Ddate dd ; + Crc crc { dd , tn , g_config.hash_algo } ; + modified |= crc.match(t->crc) ? No : t->crc.valid() ? Yes : Maybe ; + Trace trace( "frozen" , t->crc ,"->", crc , t->date ,"->", dd ) ; + //vvvvvvvvvvvvvvvvvvvvvvvvv + t->refresh( crc , {dd,{}} ) ; // if file disappeared, there is not way to know at which date, we are optimistic here as being pessimistic implies false overwrites + //^^^^^^^^^^^^^^^^^^^^^^^^^ + if ( crc!=Crc::None || t.tflags[Tflag::Phony] ) ss = SpecialStep::Ok ; + else if ( t.tflags[Tflag::Target] && t.tflags[Tflag::Static] ) ss = SpecialStep::Err ; + else { t->actual_job() = {} ; ss = SpecialStep::Idle ; } // unlink of a star or side target is nothing } if (ss>special_step) { special_step = ss ; worst_target = t ; } } @@ -1157,15 +1141,15 @@ namespace Engine { } bool/*ok*/ JobData::forget( bool targets_ , bool deps_ ) { - Trace trace("Jforget",idx(),STR(targets_),STR(deps_),deps,deps.size()) ; - for( Req r : running_reqs() ) { (void)r ; return false ; } // ensure job is not running + Trace trace("Jforget",idx(),STR(targets_),STR(deps_),mk_vector(deps)) ; + for( Req r : running_reqs() ) { (void)r ; return false ; } // ensure job is not running status = Status::New ; - fence() ; // once status is New, we are sure target is not up to date, we can safely modify it + fence() ; // once status is New, we are sure target is not up to date, we can safely modify it run_status = RunStatus::Ok ; if (deps_) { - NodeIdx j = 0 ; - for( Dep const& d : deps ) if (d.dflags[Dflag::Static]) deps[j++] = d ; - if (j!=deps.size()) deps.shorten_by(deps.size()-j) ; + ::vector static_deps ; + for( Dep const& d : deps ) if (d.dflags[Dflag::Static]) static_deps.push_back(d) ; + deps.assign(static_deps) ; } if (!rule->is_special()) { exec_gen = 0 ; diff --git a/src/lmakeserver/job.x.hh b/src/lmakeserver/job.x.hh index 694314b2..69c67682 100644 --- a/src/lmakeserver/job.x.hh +++ b/src/lmakeserver/job.x.hh @@ -194,7 +194,7 @@ namespace Engine { void reset() { if (step()>Step::Dep) step(Step::Dep) ; missing_dsk = false ; - i_dep = 0 ; + iter = {} ; reasons.first = {} ; stamped_err = proto_err = {} ; // no errors in dep as no dep analyzed yet stamped_modif = proto_modif = false ; @@ -215,24 +215,24 @@ namespace Engine { // data // req independent (identical for all Req's) : these fields are there as there is no Req-independent non-persistent table ::pair reasons ; // 36+36<=128 bits, reasons to run job when deps are ready - NodeIdx i_dep = 0 ; // ~20 <= 32 bits, deps up to this one statisfy required action + DepsIter::Digest iter ; // ~28 <= 64 bits, deps up to this one statisfy required action uint8_t n_submits = 0 ; // 8 bits, number of times job has been submitted to avoid infinite loop bool new_cmd :1 = false ; // 1 bit , if true <=> cmd has been modified bool full :1 = false ; // 1 bit , if true <=>, job result is asked, else only makable bool missing_dsk :1 = false ; // 1 bit , if true <=>, a dep has been checked but not on disk - RunStatus stamped_err :2 = {} ; // 2 bits, errors seen in dep until i_dep before last parallel chunk, Maybe means missing static - RunStatus proto_err :2 = {} ; // 2 bits, errors seen in dep until i_dep including last parallel chunk, Maybe means missing static - bool stamped_modif :1 = false ; // 1 bit , modifs seen in dep until i_dep before last parallel chunk - bool proto_modif :1 = false ; // 1 bit , modifs seen in dep until i_dep including last parallel chunk + RunStatus stamped_err :2 = {} ; // 2 bits, errors seen in dep until iter before last parallel chunk, Maybe means missing static + RunStatus proto_err :2 = {} ; // 2 bits, errors seen in dep until iter including last parallel chunk, Maybe means missing static + bool stamped_modif :1 = false ; // 1 bit , modifs seen in dep until iter before last parallel chunk + bool proto_modif :1 = false ; // 1 bit , modifs seen in dep until iter including last parallel chunk bool start_reported :1 = false ; // 1 bit , if true <=> start message has been reported to user bool speculative_deps:1 = false ; // 1 bit , if true <=> job is waiting for speculative deps only Bool3 speculate :2 = Yes ; // 2 bits, Yes : prev dep not ready, Maybe : prev dep in error (percolated) bool reported :1 = false ; // 1 bit , used for delayed report when speculating BackendTag backend :2 = {} ; // 2 bits private : - Step _step :3 = {} ; // 3 bits + Step _step :3 = {} ; // 3 bits } ; - static_assert(sizeof(JobReqInfo)==40) ; // check expected size + static_assert(sizeof(JobReqInfo)==48) ; // check expected size } @@ -311,10 +311,12 @@ namespace Engine { // Tflags tflags(Node target) const ; // - void end_exec ( ) const ; // thread-safe + void end_exec ( ) const ; // thread-safe ::string ancillary_file(AncillaryTag=AncillaryTag::Data) const ; + JobInfo job_info ( ) const { return { ancillary_file() } ; } + void write_job_info(JobInfo const& ji ) const { ji.write(Disk::dir_guard(ancillary_file())) ; } ::string special_stderr(Node ) const ; - ::string special_stderr( ) const ; // cannot declare a default value for incomplete type Node + ::string special_stderr( ) const ; // cannot declare a default value for incomplete type Node // void invalidate_old() ; Rule::SimpleMatch simple_match () const ; // thread-safe @@ -348,6 +350,7 @@ namespace Engine { bool/*maybe_new_deps*/ _submit_plain ( ReqInfo& , JobReason , CoarseDelay pressure ) ; void _set_pressure_raw( ReqInfo& , CoarseDelay ) const ; // data + // START_OF_VERSIONING public : //Name name ; // 32 bits, inherited Node asking ; // 32 bits, last target needing this job @@ -355,13 +358,14 @@ namespace Engine { Deps deps ; // 31<=32 bits, owned Rule rule ; // 16 bits, can be retrieved from full_name, but would be slower CoarseDelay exec_time ; // 16 bits, for plain jobs - ExecGen exec_gen :NExecGenBits = 0 ; // <= 8 bits, for plain jobs, cmd generation of rule - mutable MatchGen match_gen :NMatchGenBits = 0 ; // <= 8 bits, if deemed !sure - Tokens1 tokens1 = 0 ; // <= 8 bits, for plain jobs, number of tokens - 1 for eta computation + ExecGen exec_gen :NExecGenBits = 0 ; // 8 bits, for plain jobs, cmd generation of rule + mutable MatchGen match_gen :NMatchGenBits = 0 ; // 8 bits, if deemed !sure + Tokens1 tokens1 = 0 ; // 8 bits, for plain jobs, number of tokens - 1 for eta computation RunStatus run_status:3 = {} ; // 3 bits Status status :4 = {} ; // 4 bits private : mutable bool _sure :1 = false ; // 1 bit + // END_OF_VERSIONING } ; static_assert(sizeof(JobData)==24) ; // check expected size diff --git a/src/lmakeserver/node.cc b/src/lmakeserver/node.cc index 08cc32b6..25998f03 100644 --- a/src/lmakeserver/node.cc +++ b/src/lmakeserver/node.cc @@ -38,13 +38,11 @@ namespace Engine { // ::ostream& operator<<( ::ostream& os , NodeData const& nd ) { - /**/ os << '(' << nd.crc ; - if (nd.crc!=Crc::None) os << ',' << nd.date ; - /**/ os << ',' ; - if (!nd.match_ok() ) os << '~' ; - /**/ os << "job:" ; - if (nd.is_plain() ) os << +Job(nd.actual_job()) ; - return os << ")" ; + /**/ os <<'('<< nd.crc <<','<< nd.date ; + if (!nd.match_ok()) os << ",~job:" ; + /**/ os << ",job:" ; + if (nd.is_plain() ) os << +Job(nd.actual_job()) ; + return os << ")" ; } Manual NodeData::manual_wash( ReqInfo& ri , bool lazy ) { @@ -94,7 +92,7 @@ namespace Engine { bool frozen = idx().frozen() ; const char* msg = frozen ? "frozen" : "src" ; NfsGuard nfs_guard { g_config.reliable_dirs } ; - FileInfo fi { nfs_guard.access(name_) } ; + FileInfo fi { nfs_guard.access(name_) } ; Trace trace("refresh_src_anti",STR(report_no_file),reqs_,fi.date) ; if (frozen) for( Req r : reqs_ ) r->frozen_nodes.emplace(idx(),r->frozen_nodes.size()) ; if (!fi) { @@ -104,7 +102,7 @@ namespace Engine { refresh(Crc::None) ; //^^^^^^^^^^^^^^^^ } else { - if ( crc.valid() && crc.exists() && fi.date==date ) return false/*updated*/ ; + if ( crc.valid() && fi.date==date.d ) return false/*updated*/ ; Crc crc_ = Crc::Reg ; Ddate date_ ; while ( crc_==Crc::Reg || crc_==Crc::Lnk ) crc_ = Crc(date_,name_,g_config.hash_algo) ; // ensure file is stable when computing crc @@ -401,7 +399,7 @@ namespace Engine { Codec : { SWEAR(crc.valid()) ; if (!Codec::refresh(+idx(),+ri.req)) status(NodeStatus::None) ; - if (date>req->start_date.d) ri.overwritten = Access::Reg ; // date is only updated when actual content is modified and codec cannot be links + if (newer(req->start_date) ) ri.overwritten = Access::Reg ; // date is only updated when actual content is modified and codec cannot be links trace("codec",ri.overwritten) ; goto Done ; } @@ -417,7 +415,7 @@ namespace Engine { goto ActuallyDone ; } ActuallyDone : - actual_job().clear() ; + actual_job() = {} ; Done : ri.done_ = ri.goal ; // disk is de facto updated NotDone : @@ -605,7 +603,7 @@ namespace Engine { void NodeData::mk_old() { Trace trace("mk_old",idx()) ; - if ( +actual_job() && actual_job()->rule.old() ) actual_job().clear() ; // old jobs may be collected, do not refer to them anymore + if ( +actual_job() && actual_job()->rule.old() ) actual_job() = {} ; // old jobs may be collected, do not refer to them anymore _set_match_gen(false/*ok*/) ; } @@ -638,19 +636,14 @@ namespace Engine { if (+crc_) refresh(crc_) ; } - bool/*modified*/ NodeData::refresh( Crc crc_ , Ddate date_ ) { - if (crc.match(crc_)) { - Trace trace("refresh_idle",idx(),date,"->",date_) ; - date = date_ ; - return false ; - } else { - Trace trace("refresh",idx(),reqs(),crc,"->",crc_,date,"->",date_) ; - crc = {} ; fence() ; - date = date_ ; fence() ; - crc = crc_ ; // ensure crc is never associated with a wrong date, even in case of crash - for( Req r : reqs() ) req_info(r).reset(NodeGoal::Status) ; // target is not conform on disk any more - return true ; - } + bool/*modified*/ NodeData::refresh( Crc crc_ , FullDate const& fd ) { + bool modified = !crc.match(crc_) ; + // + Trace trace( "refresh" , STR(modified) , idx() , reqs() , crc ,"->", crc_ , date ,"->", fd ) ; + // + if (modified) { crc_date(crc_,fd) ; for( Req r : reqs() ) req_info(r).reset(NodeGoal::Status) ; } // target is not conform on disk any more + else date = fd ; + return modified ; } static ::pair _manual_refresh( NodeData& nd , FileInfo const& fi ) { @@ -662,10 +655,10 @@ namespace Engine { if ( m==Manual::Empty && nd.crc==Crc::Empty ) { // fast path : no need to open file nd.date = file_date(ndn) ; } else { - Ddate ndd ; - Crc crc { ndd , ndn , g_config.hash_algo } ; + Ddate dd ; + Crc crc { dd , ndn , g_config.hash_algo } ; if (!nd.crc.match(crc)) return {m,false/*refreshed*/} ; // real modif - nd.date = ndd ; + nd.date = dd ; } return {Manual::Ok,true/*refreshed*/} ; // file is steady } @@ -692,14 +685,6 @@ namespace Engine { } - // - // Deps - // - - ::ostream& operator<<( ::ostream& os , Deps const& ds ) { - return os << c_vector_view(ds) ; - } - // // Dep // @@ -720,4 +705,108 @@ namespace Engine { return res ; } + // + // Deps + // + + ::ostream& operator<<( ::ostream& os , DepsIter::Digest const& did ) { + return os <<'('<< did.hdr <<','<< did.i_chunk <<')' ; + } + + ::ostream& operator<<( ::ostream& os , Deps const& ds ) { + return os << c_vector_view(ds) ; + } + + static void _append_dep( ::vector& deps , Dep const& dep , size_t& hole ) { + bool can_compress = !dep.is_date && +dep.accesses && dep.crc()==Crc::None && !dep.dflags && !dep.parallel ; + if (hole==Npos) { + if (can_compress) { // create new open chunk + /**/ hole = deps.size() ; + Dep& hdr = deps.emplace_back() ; + /**/ hdr.sz = 1 ; + /**/ hdr.chunk_accesses = dep.accesses ; + /**/ static_cast(&deps.emplace_back())[0] = dep ; + } else { // create a chunk just for dep + deps.push_back(dep) ; + } + } else { + Dep& hdr = deps[hole] ; + if ( can_compress && dep.accesses==hdr.chunk_accesses && hdr.sz(&deps.back())[i] = dep ; + hdr.sz++ ; + } else { // close chunk : copy dep to hdr, excetp sz and chunk_accesses fields + uint8_t sz = hdr.sz ; + Accesses chunk_accesses = hdr.chunk_accesses ; + /**/ hdr = dep ; + /**/ hdr.sz = sz ; + /**/ hdr.chunk_accesses = chunk_accesses ; + /**/ hole = Npos ; + } + } + } + static void _fill_hole(Dep& hdr) { + SWEAR(hdr.sz!=0) ; + uint8_t sz = hdr.sz-1 ; + Accesses chunk_accesses = hdr.chunk_accesses ; + /**/ hdr = { static_cast(&hdr+1)[sz] , hdr.chunk_accesses , Crc::None } ; + /**/ hdr.sz = sz ; + /**/ hdr.chunk_accesses = chunk_accesses ; + } + static void _fill_hole( ::vector& deps , size_t hole ) { + if (hole==Npos) return ; + Dep& d = deps[hole] ; + _fill_hole(d) ; + if (d.sz%Dep::NodesPerDep==0) deps.pop_back() ; + } + + Deps::Deps(::vmap const& deps , Accesses accesses , bool parallel ) { + ::vector ds ; ds.reserve(deps.size()) ; // reserving deps.size() is comfortable and guarantees no reallocaiton + size_t hole = Npos ; + for( auto const& [d,df] : deps ) _append_dep( ds , {d,accesses,df,parallel} , hole ) ; + _fill_hole(ds,hole) ; + *this = {ds} ; + } + + Deps::Deps( ::vector const& deps , Accesses accesses , Dflags dflags , bool parallel ) { + ::vector ds ; ds.reserve(deps.size()) ; // reserving deps.size() is comfortable and guarantees no reallocaiton + size_t hole = Npos ; + for( auto const& d : deps ) _append_dep( ds , {d,accesses,dflags,parallel} , hole ) ; + _fill_hole(ds,hole) ; + *this = {ds} ; + } + + void Deps::assign(::vector const& deps) { + ::vector ds ; ds.reserve(deps.size()) ; // reserving deps.size() is comfortable and guarantees no reallocaiton + size_t hole = Npos ; + for( auto const& d : deps ) _append_dep( ds , d , hole ) ; + _fill_hole(ds,hole) ; + DepsBase::assign(ds) ; + } + + void Deps::replace_tail( DepsIter it , ::vector const& deps ) { + // close current chunk + Dep* cur_dep = const_cast(it.hdr) ; + if (it.i_chunk!=0) { + cur_dep->sz = it.i_chunk ; + _fill_hole(*cur_dep) ; + cur_dep = &cur_dep->next() ; + } + // create new tail + ::vector ds ; + size_t hole = Npos ; + for( auto const& d : deps ) _append_dep( ds , d , hole ) ; + _fill_hole(ds,hole) ; + // splice it + NodeIdx tail_sz = cur_dep-items() ; + if (ds.size()<=tail_sz) { + for( Dep const& d : ds ) *cur_dep++ = d ; // copy all + shorten_by(tail_sz-ds.size()) ; // and shorten + } else { + for( Dep const& d : ::vector_view(ds.data(),tail_sz) ) *cur_dep++ = d ; // copy what can be fitted + append(::vector_view( &ds[tail_sz] , ds.size()-tail_sz ) ) ; // and append for the remaining + } + } + } diff --git a/src/lmakeserver/node.x.hh b/src/lmakeserver/node.x.hh index 8d096124..ef8cd179 100644 --- a/src/lmakeserver/node.x.hh +++ b/src/lmakeserver/node.x.hh @@ -151,9 +151,12 @@ namespace Engine { struct Dep : DepDigestBase { friend ::ostream& operator<<( ::ostream& , Dep const& ) ; using Base = DepDigestBase ; + static const uint8_t NodesPerDep ; // cxtors & casts using Base::Base ; // accesses + Dep const& next() const { return *(this+1+div_up(sz,NodesPerDep)) ; } + Dep & next() { return *(this+1+div_up(sz,NodesPerDep)) ; } ::string accesses_str() const ; ::string dflags_str () const ; // services @@ -161,17 +164,78 @@ namespace Engine { void acquire_crc() ; } ; static_assert(sizeof(Dep)==16) ; + inline constexpr uint8_t Dep::NodesPerDep = sizeof(Dep)/sizeof(Node) ; static_assert(sizeof(Dep)%sizeof(Node)==0) ; // // Deps // + struct DepsIter { + struct Digest { + friend ::ostream& operator<<( ::ostream& , Digest const& ) ; + NodeIdx hdr = 0 ; + uint8_t i_chunk = 0 ; + } ; + // cxtors & casts + DepsIter( ) = default ; + DepsIter( DepsIter const& dit ) : hdr{dit.hdr} , i_chunk{dit.i_chunk} {} + DepsIter( Dep const* d ) : hdr{d } {} + DepsIter( Deps , Digest ) ; + // + DepsIter& operator=(DepsIter const& dit) { + hdr = dit.hdr ; + i_chunk = dit.i_chunk ; + return *this ; + } + // accesses + bool operator==(DepsIter const& dit) const { return hdr==dit.hdr && i_chunk==dit.i_chunk ; } + Digest digest (Deps ) const ; + // services + Dep const* operator->() const { return &**this ; } + Dep const& operator* () const { + if (i_chunk==hdr->sz) return *hdr ; + static_cast(tmpl) = static_cast(hdr+1)[i_chunk] ; + tmpl.accesses = hdr->chunk_accesses ; + return tmpl ; + } + DepsIter& operator++(int) { return ++*this ; } + DepsIter& operator++( ) { + if (i_chunk==hdr->sz) { // go to next chunk + /**/ i_chunk = 0 ; // Node's in chunk are semanticly located before header + /**/ hdr = &hdr->next() ; + if (hdr->sz) tmpl = { hdr->chunk_accesses , Crc::None , {} } ; // prepare tmpl when first accessing it (assumes sequential access) + } else { // go to next item in chunk + i_chunk++ ; + } + return *this ; + } + // data + Dep const* hdr = nullptr ; // pointer to current chunk header + uint8_t i_chunk = 0 ; // current index in chunk + mutable Dep tmpl = {{},Crc::None} ; // template to store uncompressed Dep's + } ; + struct Deps : DepsBase { friend ::ostream& operator<<( ::ostream& , Deps const& ) ; // cxtors & casts using DepsBase::DepsBase ; Deps( ::vmap const& , Accesses , bool parallel ) ; Deps( ::vector const& , Accesses , Dflags , bool parallel ) ; + // accesses + Dep const& operator[](size_t i) const = delete ; // deps are compressed, cannot do random accesses + Dep & operator[](size_t i) = delete ; // . + NodeIdx size ( ) const = delete ; // . + // services + DepsIter begin() const { + Dep const* first = items() ; + return {first} ; + } + DepsIter end() const { + Dep const* last1 = items()+DepsBase::size() ; + return {last1} ; + } + void assign ( ::vector const& ) ; + void replace_tail( DepsIter , ::vector const& ) ; } ; } @@ -218,12 +282,12 @@ namespace Engine { using Idx = NodeIdx ; using ReqInfo = NodeReqInfo ; using MakeAction = NodeMakeAction ; - using LvlIdx = RuleIdx ; // lvl may indicate the number of rules tried + using LvlIdx = RuleIdx ; // lvl may indicate the number of rules tried // static constexpr RuleIdx MaxRuleIdx = Node::MaxRuleIdx ; static constexpr RuleIdx NoIdx = Node::NoIdx ; // cxtors & casts - NodeData( ) = delete ; // if necessary, we must take care of the union + NodeData( ) = delete ; // if necessary, we must take care of the union NodeData( Name n , bool no_dir , bool locked=false ) : DataBase{n} { if (!no_dir) dir() = Node(_dir_name(),false/*no_dir*/,locked) ; } @@ -254,10 +318,17 @@ namespace Engine { Codec::Code & codec_code () { SWEAR( is_encode() , buildable ) ; return _if_encode.code ; } Codec::Code const& codec_code () const { SWEAR( is_encode() , buildable ) ; return _if_encode.code ; } // + bool newer ( FullDate const& fd ) const { return crc==Crc::None ? date.p>fd.p : date.d>fd.d ; } + void crc_date( Crc crc_ , FullDate const& fd ) { + crc = {} ; fence() ; // ensure crc is never associated with a wrong date, even in case of crash + date = fd ; fence() ; // . + crc = crc_ ; + } + // bool has_req ( Req ) const ; ReqInfo const& c_req_info( Req ) const ; ReqInfo & req_info ( Req ) const ; - ReqInfo & req_info ( ReqInfo const& ) const ; // make R/W while avoiding look up (unless allocation) + ReqInfo & req_info ( ReqInfo const& ) const ; // make R/W while avoiding look up (unless allocation) ::vector reqs ( ) const ; bool waiting ( ) const ; bool done ( ReqInfo const& , NodeGoal ) const ; @@ -370,8 +441,8 @@ namespace Engine { // template void add_watcher( ReqInfo& ri , Watcher watcher , RI& wri , CoarseDelay pressure ) ; // - bool/*modified*/ refresh( Crc , Ddate={} ) ; - void refresh( ) ; + bool/*modified*/ refresh( Crc , FullDate const& ={} ) ; + void refresh( ) ; private : void _set_buildable_raw( Req , DepDepth ) ; // req is for error reporting only bool/*done*/ _make_pre ( ReqInfo& ) ; @@ -388,38 +459,40 @@ namespace Engine { // void _set_match_gen(bool ok) ; // data + // START_OF_VERSIONING public : struct IfPlain { - Node dir ; // 31<=32 bits, shared - JobTgts job_tgts ; // 32 bits, owned , ordered by prio, valid if match_ok - RuleTgts rule_tgts ; // ~20<=32 bits, shared, matching rule_tgts issued from suffix on top of job_tgts, valid if match_ok - Job actual_job ; // 31<=32 bits, shared, job that generated node + Node dir ; // 31 < 32 bits, shared + JobTgts job_tgts ; // 32 bits, owned , ordered by prio, valid if match_ok + RuleTgts rule_tgts ; // ~20 < 32 bits, shared, matching rule_tgts issued from suffix on top of job_tgts, valid if match_ok + Job actual_job ; // 31 < 32 bits, shared, job that generated node } ; struct IfDecode { - Codec::Val val ; // 32 bits, offset in association file where the association line can be found + Codec::Val val ; // 32 bits, offset in association file where the association line can be found } ; struct IfEncode { - Codec::Code code ; // 32 bits, offset in association file where the association line can be found + Codec::Code code ; // 32 bits, offset in association file where the association line can be found } ; - //Name name ; // 32 bits, inherited - Watcher asking ; // 32 bits, last watcher needing this node - Crc crc = Crc::None ; // ~45<=64 bits, disk file CRC when file's mtime was date. 45 bits : MTBF=1000 years @ 1000 files generated per second. - Ddate date ; // ~40<=64 bits, deemed mtime (in ns) or when it was known non-existent. 40 bits : lifetime=30 years @ 1ms resolution + //Name name ; // 32 bits, inherited + Watcher asking ; // 32 bits, last watcher needing this node + Crc crc = Crc::None ; // ~45 < 64 bits, disk file CRC when file's mtime was date.p. 45 bits : MTBF=1000 years @ 1000 files generated per second. + FullDate date ; // ~40+40<128 bits, p : production date, d : if file mtime is d, crc is valid, 40 bits : 30 years @ms resolution private : union { - IfPlain _if_plain = {} ; // 128 bits - IfDecode _if_decode ; // 32 bits - IfEncode _if_encode ; // 32 bits + IfPlain _if_plain = {} ; // 28 bits + IfDecode _if_decode ; // 32 bits + IfEncode _if_encode ; // 32 bits } ; public : - MatchGen match_gen:NMatchGenBits = 0 ; // 8 bits, if deem !job_tgts.size() && !rule_tgts && !sure - Buildable buildable:4 = Buildable::Unknown ; // 4 bits, data independent, if Maybe => buildability is data dependent, if Plain => not yet computed - bool polluted :1 = false ; // 1 bit , if true <= node was polluted when produced by actual_job + MatchGen match_gen:NMatchGenBits = 0 ; // 8 bits, if deem !job_tgts.size() && !rule_tgts && !sure + Buildable buildable:4 = Buildable::Unknown ; // 4 bits, data independent, if Maybe => buildability is data dependent, if Plain => not yet computed + bool polluted :1 = false ; // 1 bit , if true <= node was polluted when produced by actual_job private : - RuleIdx _conform_idx = -+NodeStatus::Unknown ; // 16 bits, index to job_tgts to first job with execut.ing.ed prio level, if NoIdx <=> uphill or no job found - Tflags _actual_tflags ; // 8 bits, tflags associated with actual_job + RuleIdx _conform_idx = -+NodeStatus::Unknown ; // 16 bits, index to job_tgts to first job with execut.ing.ed prio level, if NoIdx <=> uphill or no job found + Tflags _actual_tflags ; // 8 bits, tflags associated with actual_job + // END_OF_VERSIONING } ; - static_assert(sizeof(NodeData)==48) ; // check expected size + static_assert(sizeof(NodeData)==56) ; // check expected size } @@ -468,20 +541,20 @@ namespace Engine { inline bool NodeData::done( Req r , NodeGoal ng ) const { return done(c_req_info(r),ng ) ; } inline bool NodeData::done( Req r ) const { return done(c_req_info(r) ) ; } - inline Manual NodeData::manual(Ddate d,bool empty) const { + inline Manual NodeData::manual(Ddate dd,bool empty) const { Manual res = {}/*garbage*/ ; if (crc==Crc::None) { - if (!d ) return Manual::Ok ; - else if (empty ) res = Manual::Empty ; - else res = Manual::Modif ; + if (!dd ) return Manual::Ok ; + else if (empty ) res = Manual::Empty ; + else res = Manual::Modif ; } else { - if (!d ) res = Manual::Unlnked ; - else if (d==date) return Manual::Ok ; - else if (empty ) res = Manual::Empty ; - else res = Manual::Modif ; + if (!dd ) res = Manual::Unlnked ; + else if (dd==date.d) return Manual::Ok ; + else if (empty ) res = Manual::Empty ; + else res = Manual::Modif ; } // - Trace("manual",idx(),d,crc,date,res,STR(empty)) ; + Trace("manual",idx(),dd,crc,date,res,STR(empty)) ; return res ; } @@ -524,41 +597,34 @@ namespace Engine { inline void NodeData::refresh() { FileInfo fi = Disk::FileInfo{name()} ; switch (manual(fi)) { - case Manual::Ok : break ; - case Manual::Unlnked : refresh( Crc::None ) ; break ; - case Manual::Empty : - case Manual::Modif : refresh( {} , fi.date ) ; break ; + case Manual::Ok : break ; + case Manual::Unlnked : refresh( Crc::None , Pdate(New) ) ; break ; + case Manual::Empty : refresh( Crc::Empty , fi.date ) ; break ; + case Manual::Modif : refresh( {} , fi.date ) ; break ; DF} } // - // Deps + // Dep // - inline Deps::Deps(::vmap const& deps , Accesses accesses , bool parallel ) { - ::vector ds ; ds.reserve(deps.size()) ; - for( auto const& [d,df] : deps ) { ds.emplace_back( d , accesses , df , parallel ) ; } - *this = Deps(ds) ; + inline bool Dep::up_to_date() const { + return !is_date && crc().match((*this)->crc,accesses) ; } - inline Deps::Deps( ::vector const& deps , Accesses accesses , Dflags dflags , bool parallel ) { - ::vector ds ; ds.reserve(deps.size()) ; - for( auto const& d : deps ) ds.emplace_back( d , accesses , dflags , parallel ) ; - *this = Deps(ds) ; + inline void Dep::acquire_crc() { + if ( is_date && (*this)->crc.valid() && (*this)->crc!=Crc::None && date()==(*this)->date.d ) crc((*this)->crc) ; } // - // Dep + // Deps // - inline bool Dep::up_to_date() const { - return !is_date && crc().match((*this)->crc,accesses) ; - } + inline DepsIter::DepsIter( Deps ds , Digest d ) : hdr{+ds?ds.items()+d.hdr:nullptr} , i_chunk{d.i_chunk} {} - inline void Dep::acquire_crc() { - if ( is_date && (*this)->crc.valid() && (*this)->crc.exists() && date()==(*this)->date ) crc((*this)->crc) ; + inline DepsIter::Digest DepsIter::digest(Deps ds) const { + return { hdr?NodeIdx(hdr-ds.items()):0 , i_chunk } ; } - } #endif diff --git a/src/lmakeserver/req.cc b/src/lmakeserver/req.cc index 7b14db5a..ce201efe 100644 --- a/src/lmakeserver/req.cc +++ b/src/lmakeserver/req.cc @@ -248,18 +248,13 @@ namespace Engine { bool overflow = (*this)->_send_err( intermediate , job->rule->name , +target?target->name():job->name() , n_err , lvl ) ; if (overflow) return true ; // - if ( !seen_stderr && job->run_status==RunStatus::Ok && !job->rule->is_special() ) { - try { - // show first stderr - Rule::SimpleMatch match ; - IFStream job_stream { job->ancillary_file() } ; - auto report_start = deserialize(job_stream) ; - auto report_end = deserialize(job_stream) ; - EndNoneAttrs end_none_attrs = job->rule->end_none_attrs.eval(job,match,report_start.rsrcs) ; - seen_stderr = (*this)->audit_stderr( report_end.end.msg , report_end.end.digest.stderr , end_none_attrs.max_stderr_len , lvl+1 ) ; - } catch(...) { - (*this)->audit_info( Color::Note , "no stderr available" , lvl+1 ) ; - } + if ( !seen_stderr && job->run_status==RunStatus::Ok && !job->rule->is_special() ) { // show first stderr + Rule::SimpleMatch match ; + JobInfo job_info = job->job_info() ; + EndNoneAttrs end_none_attrs = job->rule->end_none_attrs.eval(job,match,job_info.start.rsrcs) ; + // + if (!job_info.end.end.proc) (*this)->audit_info( Color::Note , "no stderr available" , lvl+1 ) ; + else seen_stderr = (*this)->audit_stderr( job_info.end.end.msg , job_info.end.end.digest.stderr , end_none_attrs.max_stderr_len , lvl+1 ) ; } if (intermediate) for( Dep const& d : job->deps ) @@ -437,11 +432,15 @@ namespace Engine { if (+clash_nodes) { ::vmap clash_nodes_ = mk_vmap(clash_nodes) ; ::sort( clash_nodes_ , []( ::pair const& a , ::pair b ) { return a.secondrule->special!=Special::Req) { + audit_info( Color::Warning , to_string("consider : lmake -R ",mk_shell_str(job->rule->name)," -J ",mk_shell_str(job->name())) ) ; + } else { + ::string dl ; + for( Dep const& d : job->deps ) append_to_string(dl,' ',mk_shell_str(d->name())) ; + audit_info( Color::Warning , to_string("consider : lmake",dl) ) ; + } } } diff --git a/src/lmakeserver/store.cc b/src/lmakeserver/store.cc index 8536d940..20d2b905 100644 --- a/src/lmakeserver/store.cc +++ b/src/lmakeserver/store.cc @@ -218,47 +218,42 @@ namespace Engine::Persistent { ::vector rules = rule_lst() ; ::umap rule_tab ; for( Rule r : Rule::s_lst() ) rule_tab[r->cmd_crc] = r ; SWEAR(rule_tab.size()==rules.size()) ; for( ::string const& jd : walk(from_dir,from_dir) ) { - { IFStream job_stream { jd } ; - JobInfoStart report_start ; - JobInfoEnd report_end ; - try { - deserialize(job_stream,report_start) ; - deserialize(job_stream,report_end ) ; - } catch (...) { goto NextJob ; } + { JobInfo job_info { jd } ; + if (!job_info.end.end.proc) goto NextJob ; // qualify report - if (report_start.pre_start.proc!=JobProc::Start) goto NextJob ; - if (report_start.start .proc!=JobProc::Start) goto NextJob ; - if (report_end .end .proc!=JobProc::End ) goto NextJob ; - if (report_end .end.digest.status!=Status::Ok ) goto NextJob ; // repairing jobs in error is useless + if (job_info.start.pre_start.proc!=JobProc::Start) goto NextJob ; + if (job_info.start.start .proc!=JobProc::Start) goto NextJob ; + if (job_info.end .end .proc!=JobProc::End ) goto NextJob ; + if (job_info.end .end.digest.status!=Status::Ok ) goto NextJob ; // repairing jobs in error is useless // find rule - auto it = rule_tab.find(report_start.rule_cmd_crc) ; + auto it = rule_tab.find(job_info.start.rule_cmd_crc) ; if (it==rule_tab.end()) goto NextJob ; // no rule Rule rule = it->second ; // find targets - ::vector targets ; targets.reserve(report_end.end.digest.targets.size()) ; - for( auto const& [tn,td] : report_end.end.digest.targets ) { + ::vector targets ; targets.reserve(job_info.end.end.digest.targets.size()) ; + for( auto const& [tn,td] : job_info.end.end.digest.targets ) { + if ( td.crc==Crc::None && !static_phony(td.tflags) ) continue ; // this is not a target if ( !td.crc.valid() ) goto NextJob ; // XXX : handle this case if ( td.date!=file_date(tn) ) goto NextJob ; // if dates do not match, we will rerun the job anyway, no interest to repair - if ( td.crc==Crc::None && !static_phony(td.tflags) ) continue ; // this is not a target // Node t{tn} ; - t->refresh(td.crc,td.date) ; + t->refresh( td.crc , {td.date,{}} ) ; // if file does not exist, the Epoch as a date is fine targets.emplace_back( t , td.tflags ) ; } ::sort(targets) ; // ease search in targets // find deps - ::vector deps ; deps.reserve(report_end.end.digest.deps.size()) ; - for( auto const& [dn,dd] : report_end.end.digest.deps ) { + ::vector deps ; deps.reserve(job_info.end.end.digest.deps.size()) ; + for( auto const& [dn,dd] : job_info.end.end.digest.deps ) { Dep dep { Node(dn) , dd } ; if ( dep.is_date ) goto NextJob ; // dep could not be identified when job ran, hum, better not to repair that if ( +dep.accesses && !dep.crc().valid() ) goto NextJob ; // no valid crc, no interest to repair as job will rerun anyway deps.emplace_back(dep) ; } // set job - Job job { {rule,::move(report_start.stems)} } ; + Job job { {rule,::move(job_info.start.stems)} } ; job->targets.assign(targets) ; job->deps .assign(deps ) ; - job->status = report_end.end.digest.status ; + job->status = job_info.end.end.digest.status ; job->exec_ok(true) ; // pretend job just ran // set target actual_job's for( Target t : targets ) { @@ -266,9 +261,7 @@ namespace Engine::Persistent { t->actual_tflags() = t.tflags ; } // restore job_data - OFStream job_data_stream {dir_guard(job->ancillary_file()) } ; - serialize(job_data_stream,report_start) ; - serialize(job_data_stream,report_end ) ; + job->write_job_info(job_info) ; } NextJob : ; } diff --git a/src/lmakeserver/store.x.hh b/src/lmakeserver/store.x.hh index cd93693c..f4872a94 100644 --- a/src/lmakeserver/store.x.hh +++ b/src/lmakeserver/store.x.hh @@ -263,20 +263,20 @@ namespace Engine::Persistent { // autolock header index key data misc // jobs - using JobFile = Store::AllocFile < false , JobHdr , Job , JobData > ; - using DepsFile = Store::VectorFile < false , void , Deps , Dep , NodeIdx , 4 > ; - using TargetsFile = Store::VectorFile < false , void , Targets , Target > ; + using JobFile = Store::AllocFile < false , JobHdr , Job , JobData > ; + using DepsFile = Store::VectorFile < false , void , Deps , Dep , NodeIdx , 4 > ; // Deps are compressed when Crc==None + using TargetsFile = Store::VectorFile < false , void , Targets , Target > ; // nodes - using NodeFile = Store::AllocFile < false , NodeHdr , Node , NodeData > ; - using JobTgtsFile = Store::VectorFile < false , void , JobTgts::Vector , JobTgt , RuleIdx > ; + using NodeFile = Store::AllocFile < false , NodeHdr , Node , NodeData > ; + using JobTgtsFile = Store::VectorFile < false , void , JobTgts::Vector , JobTgt , RuleIdx > ; // rules - using RuleStrFile = Store::VectorFile < false , void , RuleStr , char , uint32_t > ; - using RuleFile = Store::AllocFile < false , MatchGen , Rule , RuleStr > ; - using RuleTgtsFile = Store::SinglePrefixFile< false , void , RuleTgts , RuleTgt , void , true /*Reverse*/ > ; - using SfxFile = Store::SinglePrefixFile< false , void , PsfxIdx , char , PsfxIdx , true /*Reverse*/ > ; // map sfxes to root of pfxes, no lock : static - using PfxFile = Store::MultiPrefixFile < false , void , PsfxIdx , char , RuleTgts , false/*Reverse*/ > ; + using RuleStrFile = Store::VectorFile < false , void , RuleStr , char , uint32_t > ; + using RuleFile = Store::AllocFile < false , MatchGen , Rule , RuleStr > ; + using RuleTgtsFile = Store::SinglePrefixFile< false , void , RuleTgts , RuleTgt , void , true /*Reverse*/ > ; + using SfxFile = Store::SinglePrefixFile< false , void , PsfxIdx , char , PsfxIdx , true /*Reverse*/ > ; // map sfxes to root of pfxes, no lock : static + using PfxFile = Store::MultiPrefixFile < false , void , PsfxIdx , char , RuleTgts , false/*Reverse*/ > ; // commons - using NameFile = Store::SinglePrefixFile< true , void , Name , char , JobNode > ; // for Job's & Node's + using NameFile = Store::SinglePrefixFile< true , void , Name , char , JobNode > ; // for Job's & Node's static constexpr char StartMrkr = 0x0 ; // used to indicate a single match suffix (i.e. a suffix which actually is an entire file name) diff --git a/src/rpc_job.cc b/src/rpc_job.cc index 6e20b379..dfe14785 100644 --- a/src/rpc_job.cc +++ b/src/rpc_job.cc @@ -262,21 +262,30 @@ ::ostream& operator<<( ::ostream& os , JobServerRpcReq const& jsrr ) { } // -// JobInfoStart +// JobInfo // ::ostream& operator<<( ::ostream& os , JobInfoStart const& jis ) { return os << "JobInfoStart(" << jis.submit_attrs <<','<< jis.rsrcs <<','<< jis.pre_start <<','<< jis.start <<')' ; } -// -// JobInfoEnd -// - ::ostream& operator<<( ::ostream& os , JobInfoEnd const& jie ) { return os << "JobInfoEnd(" << jie.end <<')' ; } +JobInfo::JobInfo(::string const& filename) { + try { + IFStream job_stream { filename } ; + deserialize(job_stream,start) ; + deserialize(job_stream,end ) ; + } catch (...) {} // we get what we get +} + +void JobInfo::write(::string const& filename) const { + OFStream os{filename} ; + serialize(os,start) ; + serialize(os,end ) ; +} // // codec // diff --git a/src/rpc_job.hh b/src/rpc_job.hh index 53cbb006..fda2ae32 100644 --- a/src/rpc_job.hh +++ b/src/rpc_job.hh @@ -405,24 +405,24 @@ template ::ostream& operator<<( ::ostream& , DepDigestBase const& ) template struct DepDigestBase : NoVoid { friend ::ostream& operator<< <>( ::ostream& , DepDigestBase const& ) ; using Base = NoVoid ; - static constexpr bool HasBase = !::is_same_v ; + static constexpr bool HasBase = !::is_same_v ; // using Tag = FileTag ; using Crc = Hash::Crc ; using Ddate = Time::Ddate ; //cxtors & casts constexpr DepDigestBase( bool p=false ) : parallel{p} { crc ({}) ; } - constexpr DepDigestBase( Accesses a , Dflags dfs={} , bool p=false ) : dflags(dfs) , accesses{a} , parallel{p} { crc ({}) ; } - constexpr DepDigestBase( Accesses a , Crc c , Dflags dfs={} , bool p=false ) : dflags(dfs) , accesses{a} , parallel{p} { crc (c ) ; } - constexpr DepDigestBase( Accesses a , Ddate d , Dflags dfs={} , bool p=false ) : dflags(dfs) , accesses{a} , parallel{p} { date (d ) ; } - constexpr DepDigestBase( Accesses a , CrcDate const& cd , Dflags dfs={} , bool p=false ) : dflags(dfs) , accesses{a} , parallel{p} { crc_date(cd) ; } - constexpr DepDigestBase( Base b , Accesses a , Dflags dfs={} , bool p=false ) : Base{b} , dflags(dfs) , accesses{a} , parallel{p} { crc ({}) ; } - constexpr DepDigestBase( Base b , Accesses a , Crc c , Dflags dfs={} , bool p=false ) : Base{b} , dflags(dfs) , accesses{a} , parallel{p} { crc (c ) ; } - constexpr DepDigestBase( Base b , Accesses a , Ddate d , Dflags dfs={} , bool p=false ) : Base{b} , dflags(dfs) , accesses{a} , parallel{p} { date (d ) ; } - constexpr DepDigestBase( Base b , Accesses a , CrcDate const& cd , Dflags dfs={} , bool p=false ) : Base{b} , dflags(dfs) , accesses{a} , parallel{p} { crc_date(cd) ; } + constexpr DepDigestBase( Accesses a , Dflags dfs={} , bool p=false ) : accesses{a} , dflags(dfs) , parallel{p} { crc ({}) ; } + constexpr DepDigestBase( Accesses a , Crc c , Dflags dfs={} , bool p=false ) : accesses{a} , dflags(dfs) , parallel{p} { crc (c ) ; } + constexpr DepDigestBase( Accesses a , Ddate d , Dflags dfs={} , bool p=false ) : accesses{a} , dflags(dfs) , parallel{p} { date (d ) ; } + constexpr DepDigestBase( Accesses a , CrcDate const& cd , Dflags dfs={} , bool p=false ) : accesses{a} , dflags(dfs) , parallel{p} { crc_date(cd) ; } + constexpr DepDigestBase( Base b , Accesses a , Dflags dfs={} , bool p=false ) : Base{b} , accesses{a} , dflags(dfs) , parallel{p} { crc ({}) ; } + constexpr DepDigestBase( Base b , Accesses a , Crc c , Dflags dfs={} , bool p=false ) : Base{b} , accesses{a} , dflags(dfs) , parallel{p} { crc (c ) ; } + constexpr DepDigestBase( Base b , Accesses a , Ddate d , Dflags dfs={} , bool p=false ) : Base{b} , accesses{a} , dflags(dfs) , parallel{p} { date (d ) ; } + constexpr DepDigestBase( Base b , Accesses a , CrcDate const& cd , Dflags dfs={} , bool p=false ) : Base{b} , accesses{a} , dflags(dfs) , parallel{p} { crc_date(cd) ; } // initializing _crc in all cases (which crc_date does not do) is important to please compiler (gcc-11 -O3) - template constexpr DepDigestBase( DepDigestBase const& dd ) : dflags(dd.dflags) , accesses{dd.accesses} , parallel{dd.parallel} , _crc{} { crc_date(dd) ; } - template constexpr DepDigestBase( Base b , DepDigestBase const& dd ) : Base{b} , dflags(dd.dflags) , accesses{dd.accesses} , parallel{dd.parallel} , _crc{} { crc_date(dd) ; } + template constexpr DepDigestBase( DepDigestBase const& dd ) : accesses{dd.accesses} , dflags(dd.dflags) , parallel{dd.parallel} , _crc{} { crc_date(dd) ; } + template constexpr DepDigestBase( Base b , DepDigestBase const& dd ) : Base{b} , accesses{dd.accesses} , dflags(dd.dflags) , parallel{dd.parallel} , _crc{} { crc_date(dd) ; } // constexpr bool operator==(DepDigestBase const& other) const { if constexpr (HasBase) if (Base::operator!=(other) ) return false ; @@ -478,14 +478,17 @@ template struct DepDigestBase : NoVoid { } } // data - Dflags dflags ; // 6< 8 bits - Accesses accesses ; // 3< 8 bits - bool parallel:1 = false ; // 1 bit - bool is_date :1 = false ; // 1 bit + static constexpr uint8_t NSzBits = 6 ; + Accesses accesses ; // 3< 8 bits + Dflags dflags ; // 6< 8 bits + bool parallel:1 = false ; // 1 bit + bool is_date :1 = false ; // 1 bit + uint8_t sz :NSzBits = 0 ; // 6 bits, number of items in chunk following header (semantically before) + Accesses chunk_accesses ; // 3< 8 bits private : union { - Crc _crc = {} ; // ~46<64 bits - Ddate _date ; // ~45<64 bits + Crc _crc = {} ; // ~45<64 bits + Ddate _date ; // ~40<64 bits } ; } ; // END_OF_VERSIONING @@ -591,7 +594,7 @@ struct JobRpcReq { ::string msg ; // if proc == Start | LiveOut | Decode | Encode | End ::string file ; // if proc == Decode | Encode ::string ctx ; // if proc == Decode | Encode - uint8_t min_len ; // if proc == Encode + uint8_t min_len = 0 ; // if proc == Encode } ; struct MatchFlags { @@ -961,6 +964,18 @@ struct JobInfoEnd { JobRpcReq end = {} ; } ; +struct JobInfo { + // cxtors & casts + JobInfo( ) = default ; + JobInfo( ::string const& ancillary_file ) ; + JobInfo( JobInfoStart&& jis , JobInfoEnd&& jie ) : start{::move(jis)} , end{::move(jie)} {} + // ervices + void write(::string const& filename) const ; + // data + JobInfoStart start ; + JobInfoEnd end ; +} ; + // // codec // diff --git a/src/store/alloc.hh b/src/store/alloc.hh index 4b785644..835183a1 100644 --- a/src/store/alloc.hh +++ b/src/store/alloc.hh @@ -12,30 +12,82 @@ namespace Store { // free list sizes are linear until LinearSz, then logarithmic // single allocation is LinearSz==0 namespace Alloc { - template struct Hdr { - static constexpr uint8_t NFree = LinearSz ? LinearSz+NBits-n_bits(LinearSz+1) : 1 ; - NoVoid hdr ; - uint8_t n_free = 0 ; // starting at n_free, there is no element in free list - ::array free ; + + // sz to bucket mapping + // bucket and sz functions must be such that : + // = bucket(1 )==0 i.e. first bucket is for size 1 + // - bucket(sz+1)>=bucket(sz) i.e. buckets are sorted + // - bucket(sz+1)<=bucket(sz)+1 i.e. there are less buckets than sizes + // - bucket(sz(b) )==b i.e. sz is the inverse function of bucket + // - bucket(sz(b)+1)==b+1 i.e. sz returns the largest size that first in a bucket + // This implementation chooses a integer to float conversion. + // LinearSz is the largest size up to which there is a bucket for each size. + // For example, if LinearSz==4, bucket sizes are : 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, ... + + template constexpr size_t bucket(size_t sz) { + if (Mantissa==0) return 0 ; + // linear area + if (sz<=(1<>exp)+1 - (1< constexpr size_t sz(size_t bucket) { + if (Mantissa==0) return 1 ; + // linear area + if (bucket<(1<>Mantissa1) - 1 ; + size_t mantissa = (size_t(1)< constexpr bool chk() { + constexpr size_t N = 4<(1 ) != 0 ) return false ; + for( size_t s=1 ; s< N ; s++ ) if ( bucket(s+1 ) < bucket(s) ) return false ; + for( size_t s=1 ; s< N ; s++ ) if ( bucket(s+1 ) > bucket(s)+1 ) return false ; + for( size_t b=0 ; b(N) ; b++ ) if ( bucket(sz(b) ) != b ) return false ; + for( size_t b=0 ; b(N) ; b++ ) if ( bucket(sz(b)+1) != b+1 ) return false ; + return true ; + } + static_assert(chk<1>()) ; + static_assert(chk<2>()) ; + static_assert(chk<3>()) ; + static_assert(chk<4>()) ; + #endif + + template struct Hdr { + static constexpr size_t NFree = bucket(lsb_msk(8*sizeof(I)))+1 ; // number of necessary slot is highest possible index + 1 + NoVoid hdr ; + ::array free ; } ; - template struct Hdr { - static constexpr uint8_t NFree = 0 ; + template struct Hdr { + static constexpr size_t NFree = 0 ; NoVoid hdr ; } ; template struct Data { - static_assert( sizeof(NoVoid)>=sizeof(I) ) ; // else waste memory + static_assert( sizeof(NoVoid)>=sizeof(I) ) ; // else waste memory template Data(A&&... args) : data{::forward(args)...} {} union { - NoVoid data ; // when data is used - I nxt ; // when data is in free list + NoVoid data ; // when data is used + I nxt ; // when data is in free list } ; ~Data() { data.~NoVoid() ; } } ; + } - template struct AllocFile - : StructFile< false/*AutoLock*/ , Alloc::Hdr> , Idx_ , Alloc::Data , true/*Multi*/ > // if !LinearSz, Multi is useless - { using Base = StructFile< false/*AutoLock*/ , Alloc::Hdr> , Idx_ , Alloc::Data , true/*Multi*/ > ; // but easier to code - using BaseHdr = Alloc::Hdr> ; + template struct AllocFile + : StructFile< false/*AutoLock*/ , Alloc::Hdr> , Idx_ , Alloc::Data , true/*Multi*/ > // if !Mantissa, Multi is useless ... + { using Base = StructFile< false/*AutoLock*/ , Alloc::Hdr> , Idx_ , Alloc::Data , true/*Multi*/ > ; // ... but easier to code + using BaseHdr = Alloc::Hdr> ; using BaseData = Alloc::Data ; // using Hdr = Hdr_ ; @@ -50,7 +102,7 @@ namespace Store { static constexpr bool HasHdr = !is_void_v ; static constexpr bool HasData = !is_void_v ; static constexpr bool HasDataSz = ::Store::HasDataSz ; - static constexpr bool Multi = LinearSz && HasData ; + static constexpr bool Multi = Mantissa && HasData ; // template Idx emplace_back( Idx n , A&&... args ) = delete ; using Base::clear ; @@ -107,10 +159,8 @@ namespace Store { // statics private : - static uint8_t _s_bucket(Sz sz ) requires( Multi) { return sz<=LinearSz ? sz-1 : LinearSz+n_bits(sz)-n_bits(LinearSz+1) ; } - static uint8_t _s_bucket(Sz sz ) requires(!Multi) { SWEAR(sz==1,sz) ; return 0 ; } - static Sz _s_sz (uint8_t bucket) requires( Multi) { return bucket(sz ) ; } + static Sz _s_sz (Sz bucket) { return Alloc::sz (bucket) ; } // cxtors public : using Base::Base ; @@ -129,8 +179,8 @@ namespace Store { return Base::idx(base_at) ; } private : - Idx const& _free(uint8_t bucket) const requires(HasData) { return Base::hdr().free[bucket] ; } - Idx & _free(uint8_t bucket) requires(HasData) { return Base::hdr().free[bucket] ; } + Idx const& _free(Sz bucket) const requires(HasData) { return Base::hdr().free[bucket] ; } + Idx & _free(Sz bucket) requires(HasData) { return Base::hdr().free[bucket] ; } public : Lst lst() const requires( !Multi && HasData ) { return Lst(*this) ; } // services @@ -160,29 +210,13 @@ namespace Store { template Idx _emplace( Sz sz , A&&... args ) requires(HasData) { ULock lock{_mutex} ; SWEAR(writable) ; - uint8_t bucket = _s_bucket(sz ) ; // XXX : implement smaller granularity than 2x - Idx& free = _free (bucket) ; - Idx res = free ; - if (+res) { - free = Base::at(res).nxt ; - } else { // try find a larger bucket and split it if above linear zone - uint8_t avail_bucket = BaseHdr::NFree ; - if (bucket>=LinearSz) { - for( avail_bucket = bucket+1 ; avail_bucket=Base::hdr().n_free) return Base::emplace_back( _s_sz(bucket) , ::forward(args)... ) ; // no space available - // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - res = _free(avail_bucket) ; - _free(avail_bucket) = Base::at(res).nxt ; - fence() ; // ensure free list is always consistent - for( uint8_t i=avail_bucket-1 ; i>=bucket ; i-- ) { // put upper half in adequate free list - Idx upper(+res+_s_sz(i)) ; - Base::at(upper).nxt = 0 ; // free list was initially empty - fence() ; // ensure free list is always consistent - _free(i) = upper ; - } - } + Sz bucket = _s_bucket(sz ) ; + Idx& free = _free (bucket) ; + // vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv + if (!free) return Base::emplace_back( _s_sz(bucket) , ::forward(args)... ) ; + // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + Idx res = free ; + free = Base::at(res).nxt ; fence() ; // ensure free list is always consistent //vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv Base::emplace(res,::forward(args)...) ; @@ -193,15 +227,15 @@ namespace Store { if (!new_sz) { _pop(idx,old_sz) ; return ; } SWEAR( writable && new_sz<=old_sz , new_sz , old_sz ) ; ULock lock{_mutex} ; - uint8_t old_bucket = _s_bucket(old_sz) ; - uint8_t new_bucket = _s_bucket(new_sz) ; + Sz old_bucket = _s_bucket(old_sz) ; + Sz new_bucket = _s_bucket(new_sz) ; // deallocate extra room new_sz = _s_sz(new_bucket) ; old_sz = _s_sz(old_bucket) ; while (old_sz>new_sz) { // deallocate as much as possible in a single bucket and iterate - Sz extra_sz = old_sz-new_sz ; - uint8_t extra_bucket = _s_bucket(extra_sz) ; // the bucket that can contain extra_sz - Sz extra_bucket_sz = _s_sz(extra_bucket) ; SWEAR(extra_bucket_sz>=extra_sz) ; // _s_sz returns the largest size that fits in extra_bucket + Sz extra_sz = old_sz-new_sz ; + Sz extra_bucket = _s_bucket(extra_sz) ; // the bucket that can contain extra_sz + Sz extra_bucket_sz = _s_sz(extra_bucket) ; SWEAR(extra_bucket_sz>=extra_sz) ; // _s_sz returns the largest size that fits in extra_bucket { if (extra_bucket_sz>extra_sz) extra_bucket_sz = _s_sz(--extra_bucket) ; } SWEAR(extra_bucket_sz<=extra_sz) ; // but we want the largest bucket that fits in extra_sz // old_sz -= extra_bucket_sz ; @@ -216,19 +250,18 @@ namespace Store { Base::pop(idx) ; _dealloc(idx,_s_bucket(sz)) ; } - void _dealloc( Idx idx , uint8_t bucket ) requires( HasData) { + void _dealloc( Idx idx , Sz bucket ) requires( HasData) { Idx& free = _free(bucket) ; - Base::at(idx).nxt = free ; - Base::hdr().n_free = ::max(Base::hdr().n_free,bucket) ; + Base::at(idx).nxt = free ; fence() ; // ensure free list is always consistent free = idx ; } } ; - template void AllocFile::chk() const requires(!is_void_v) { + template void AllocFile::chk() const requires(!is_void_v) { Base::chk() ; ::vector free_map ; free_map.resize(size()) ; - for( uint8_t bucket = 0 ; bucket(+idx+_s_sz(bucket))<=size() , "free list out of range at ",idx ) ; diff --git a/src/store/vector.hh b/src/store/vector.hh index 0952df93..177df353 100644 --- a/src/store/vector.hh +++ b/src/store/vector.hh @@ -72,10 +72,10 @@ namespace Store { } - template,size_t MinSz=1,size_t LinearSz=16*MinSz> struct VectorFile - : AllocFile< false/*AutoLock*/ , Hdr_ , Idx_ , Vector::Chunk , Vector::Chunk::s_n_items(LinearSz) > - { using Base = AllocFile< false/*AutoLock*/ , Hdr_ , Idx_ , Vector::Chunk , Vector::Chunk::s_n_items(LinearSz) > ; - using Chunk = Vector::Chunk ; + template,size_t MinSz=1,uint8_t Mantissa=8> struct VectorFile + : AllocFile< false/*AutoLock*/ , Hdr_ , Idx_ , Vector::Chunk , Mantissa > + { using Base = AllocFile< false/*AutoLock*/ , Hdr_ , Idx_ , Vector::Chunk , Mantissa > ; + using Chunk = Vector::Chunk ; // using Hdr = Hdr_ ; using Idx = Idx_ ; @@ -171,7 +171,7 @@ namespace Store { IdxSz new_n = Chunk::s_n_items(chunk.sz+v.size()) ; // reallocate if (new_n>old_n) { - ::vector both = mk_vector(view(idx)) ; + ::vector both = mk_vector(view(idx)) ; for( Item const& x : v ) both.emplace_back(x) ; // vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv /**/ Base::pop (idx ) ; diff --git a/unit_tests/conflict.py b/unit_tests/conflict.py index eb7e5d77..352d1761 100644 --- a/unit_tests/conflict.py +++ b/unit_tests/conflict.py @@ -8,9 +8,7 @@ import lmake from lmake.rules import Rule,PyRule - lmake.manifest = ( - 'Lmakefile.py' - ,) + lmake.manifest = ('Lmakefile.py',) class Base(PyRule) : stems = { @@ -66,7 +64,7 @@ def cmd() : import ut - ut.lmake( 'chk' , new=1 , done=7 , may_rerun=2 , rerun=1 , steady=1 ) + ut.lmake( 'chk' , new=1 , done=8 , may_rerun=2 ) ut.lmake( 'chk' ) # ensure up to date - ut.lmake( 'chk.w' , new=0 , done=6 , may_rerun=2 , rerun=2 , steady=2 ) + ut.lmake( 'chk.w' , new=0 , done=7 , may_rerun=2 , rerun=1 , steady=1 ) ut.lmake( 'chk.w' ) # ensure up to date diff --git a/unit_tests/critical.py b/unit_tests/critical.py index 80fe9d04..14945dd9 100644 --- a/unit_tests/critical.py +++ b/unit_tests/critical.py @@ -3,7 +3,8 @@ # This program is free software: you can redistribute/modify under the terms of the GPL-v3 (https://www.gnu.org/licenses/gpl-3.0.html). # This program is distributed WITHOUT ANY WARRANTY, without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -n_good = 2 +n_goods = 10 +n_bads = 10 if __name__!='__main__' : @@ -32,9 +33,9 @@ class Critical(Rule) : target = 'tgt' def cmd() : lmake.depend('src1','src2',critical=True) - lmake.depend(*(f'good{i}' for i in range(n_good)),critical=True) - if step==1 : lmake.depend('src1','bad0','bad0','bad1') - else : lmake.depend('src1' ) + lmake.depend(*(f'good{i}' for i in range(n_goods)),critical=True) + if step==1 : lmake.depend('src1','bad0',*(f'bad{i}' for i in range(n_bads))) + else : lmake.depend('src1' ) else : @@ -44,10 +45,10 @@ def cmd() : print('2',file=open('src2','w')) print('step=1',file=open('step.py','w')) - ut.lmake( 'tgt' , may_rerun=2 , was_dep_err=1 , done=n_good , failed=2 , new=2 , rc=1 ) # must discover good_*, then bad_* + ut.lmake( 'tgt' , may_rerun=2 , was_dep_err=1 , done=n_goods , failed=n_bads , new=2 , rc=1 ) # must discover good_*, then bad_* print('new 1',file=open('src1','w')) - ut.lmake( 'tgt' , dep_err=1 , changed=1 , rc=1 ) # src* are not critical, so error fires immediately + ut.lmake( 'tgt' , dep_err=1 , changed=1 , rc=1 ) # src* are not critical, so error fires immediately print('step=2',file=open('step.py','w')) - ut.lmake( 'tgt' , steady=n_good-1+1 , done=1 , rc=0 ) # modified critical good_0 implies that bad_* are not remade + ut.lmake( 'tgt' , steady=n_goods-1+1 , done=1 , rc=0 ) # modified critical good_0 implies that bad_* are not remade