diff --git a/src/commands.rs b/src/commands.rs index bdd3a3d7f..31e1b6801 100644 --- a/src/commands.rs +++ b/src/commands.rs @@ -32,7 +32,7 @@ use number_prefix::{ Prefixed, Standalone, }; -use protobuf::RepeatedField; +use protobuf::{RepeatedField, ProtobufError}; use protocol::{ CacheStats, ClientRequest, @@ -545,43 +545,61 @@ fn handle_compile_response(mut creator: T, CompileResponse::CompileStarted(_) => { debug!("Server sent CompileStarted"); // Wait for CompileFinished. - let mut res = conn.read_one_response().chain_err(|| { - //TODO: something better here? - "error reading compile response from server" - })?; - if res.has_compile_finished() { - trace!("Server sent CompileFinished"); - handle_compile_finished(res.take_compile_finished(), - stdout, stderr) - } else { - bail!("unexpected response from server") + match conn.read_one_response() { + Ok(mut res) => { + if res.has_compile_finished() { + trace!("Server sent CompileFinished"); + return handle_compile_finished(res.take_compile_finished(), + stdout, stderr) + } else { + bail!("unexpected response from server") + } + } + + // Currently the shutdown behavior of the remote sccache server + // is to wait at most N seconds for all active connections to + // finish and then close everything. If we get unlucky and don't + // get a response then we just forge ahead locally and run the + // compilation ourselves. + Err(ProtobufError::IoError(ref e)) + if e.kind() == io::ErrorKind::UnexpectedEof => { + writeln!(io::stderr(), + "warning: sccache server looks like it shut down \ + unexpectedly, compiling locally instead").unwrap(); + } + + Err(e) => return Err(e).chain_err(|| { + //TODO: something better here? + "error reading compile response from server" + }) } } CompileResponse::UnhandledCompile(_) => { debug!("Server sent UnhandledCompile"); - //TODO: possibly capture output here for testing. - let mut cmd = creator.new_command_sync(exe); - cmd.args(&cmdline) - .current_dir(cwd); - if log_enabled!(Trace) { - trace!("running command: {:?}", cmd); - } - let output = try!(core.run(run_input_output(cmd, None))); - if !output.stdout.is_empty() { - try!(stdout.write_all(&output.stdout)); - } - if !output.stderr.is_empty() { - try!(stderr.write_all(&output.stderr)); - } - Ok(output.status.code().unwrap_or_else(|| { - if let Some(sig) = status_signal(output.status) { - println!("Compile terminated by signal {}", sig); - } - // Arbitrary. - 2 - })) } + }; + + //TODO: possibly capture output here for testing. + let mut cmd = creator.new_command_sync(exe); + cmd.args(&cmdline) + .current_dir(cwd); + if log_enabled!(Trace) { + trace!("running command: {:?}", cmd); } + let output = try!(core.run(run_input_output(cmd, None))); + if !output.stdout.is_empty() { + try!(stdout.write_all(&output.stdout)); + } + if !output.stderr.is_empty() { + try!(stderr.write_all(&output.stderr)); + } + Ok(output.status.code().unwrap_or_else(|| { + if let Some(sig) = status_signal(output.status) { + println!("Compile terminated by signal {}", sig); + } + // Arbitrary. + 2 + })) } /// Send a `Compile` request to the sccache server `conn`, and handle the response. diff --git a/src/compiler/compiler.rs b/src/compiler/compiler.rs index 4332a2731..f622f4f84 100644 --- a/src/compiler/compiler.rs +++ b/src/compiler/compiler.rs @@ -49,6 +49,7 @@ use std::time::{ }; use tempdir::TempDir; use util::fmt_duration_as_secs; +use tokio_core::reactor::{Handle, Timeout}; use errors::*; @@ -99,7 +100,8 @@ pub trait CompilerHasher: fmt::Debug + Send + 'static arguments: Vec, cwd: String, cache_control: CacheControl, - pool: CpuPool) + pool: CpuPool, + handle: Handle) -> SFuture<(CompileResult, process::Output)> { let out_file = self.output_file().into_owned(); @@ -126,6 +128,20 @@ pub trait CompilerHasher: fmt::Debug + Send + 'static storage.get(&key) }; + // Set a maximum time limit for the cache to respond before we forge + // ahead ourselves with a compilation. + let timeout = Duration::new(60, 0); + let timeout = Timeout::new(timeout, &handle).into_future().flatten(); + + let cache_status = cache_status.map(Some); + let timeout = timeout.map(|_| None).chain_err(|| "timeout error"); + let cache_status = cache_status.select(timeout).then(|r| { + match r { + Ok((e, _other)) => Ok(e), + Err((e, _other)) => Err(e), + } + }); + // Check the result of the cache lookup. Box::new(cache_status.and_then(move |result| { let duration = start.elapsed(); @@ -135,7 +151,7 @@ pub trait CompilerHasher: fmt::Debug + Send + 'static .collect::>(); let miss_type = match result { - Cache::Hit(mut entry) => { + Some(Cache::Hit(mut entry)) => { debug!("[{}]: Cache hit in {}", out_file, fmt_duration_as_secs(&duration)); let mut stdout = io::Cursor::new(vec!()); let mut stderr = io::Cursor::new(vec!()); @@ -161,14 +177,18 @@ pub trait CompilerHasher: fmt::Debug + Send + 'static (result, output) })) as SFuture<_> } - Cache::Miss => { + Some(Cache::Miss) => { debug!("[{}]: Cache miss", out_file); MissType::Normal } - Cache::Recache => { + Some(Cache::Recache) => { debug!("[{}]: Cache recache", out_file); MissType::ForcedRecache } + None => { + debug!("[{}]: Cache timed out", out_file); + MissType::TimedOut + } }; // Cache miss, so compile it. @@ -296,6 +316,8 @@ pub enum MissType { Normal, /// Cache lookup was overridden, recompilation was forced. ForcedRecache, + /// Cache took too long to respond. + TimedOut, } /// Information about a successful cache write. @@ -619,6 +641,7 @@ mod test { use std::time::Duration; use std::usize; use test::utils::*; + use tokio_core::reactor::Core; #[test] fn test_detect_compiler_kind_gcc() { @@ -717,6 +740,8 @@ mod test { let creator = new_creator(); let f = TestFixture::new(); let pool = CpuPool::new(1); + let core = Core::new().unwrap(); + let handle = core.handle(); let storage = DiskCache::new(&f.tempdir.path().join("cache"), usize::MAX, &pool); @@ -753,7 +778,8 @@ mod test { arguments.clone(), cwd.clone(), CacheControl::Default, - pool.clone()).wait().unwrap(); + pool.clone(), + handle.clone()).wait().unwrap(); // Ensure that the object file was created. assert_eq!(true, fs::metadata(&obj).and_then(|m| Ok(m.len() > 0)).unwrap()); match cached { @@ -776,7 +802,8 @@ mod test { arguments, cwd, CacheControl::Default, - pool.clone()).wait().unwrap(); + pool.clone(), + handle).wait().unwrap(); // Ensure that the object file was created. assert_eq!(true, fs::metadata(&obj).and_then(|m| Ok(m.len() > 0)).unwrap()); assert_eq!(CompileResult::CacheHit(Duration::new(0, 0)), cached); @@ -792,6 +819,8 @@ mod test { let creator = new_creator(); let f = TestFixture::new(); let pool = CpuPool::new(1); + let core = Core::new().unwrap(); + let handle = core.handle(); let storage = DiskCache::new(&f.tempdir.path().join("cache"), usize::MAX, &pool); @@ -828,7 +857,8 @@ mod test { arguments.clone(), cwd.clone(), CacheControl::Default, - pool.clone()).wait().unwrap(); + pool.clone(), + handle.clone()).wait().unwrap(); // Ensure that the object file was created. assert_eq!(true, fs::metadata(&obj).and_then(|m| Ok(m.len() > 0)).unwrap()); match cached { @@ -852,7 +882,8 @@ mod test { arguments, cwd, CacheControl::Default, - pool).wait().unwrap(); + pool, + handle).wait().unwrap(); // Ensure that the object file was created. assert_eq!(true, fs::metadata(&obj).and_then(|m| Ok(m.len() > 0)).unwrap()); assert_eq!(CompileResult::CacheHit(Duration::new(0, 0)), cached); @@ -868,6 +899,8 @@ mod test { let creator = new_creator(); let f = TestFixture::new(); let pool = CpuPool::new(1); + let core = Core::new().unwrap(); + let handle = core.handle(); let storage = DiskCache::new(&f.tempdir.path().join("cache"), usize::MAX, &pool); @@ -908,7 +941,8 @@ mod test { arguments.clone(), cwd.clone(), CacheControl::Default, - pool.clone()).wait().unwrap(); + pool.clone(), + handle.clone()).wait().unwrap(); // Ensure that the object file was created. assert_eq!(true, fs::metadata(&obj).and_then(|m| Ok(m.len() > 0)).unwrap()); match cached { @@ -928,7 +962,8 @@ mod test { arguments, cwd, CacheControl::ForceRecache, - pool).wait().unwrap(); + pool, + handle).wait().unwrap(); // Ensure that the object file was created. assert_eq!(true, fs::metadata(&obj).and_then(|m| Ok(m.len() > 0)).unwrap()); match cached { @@ -950,6 +985,8 @@ mod test { let creator = new_creator(); let f = TestFixture::new(); let pool = CpuPool::new(1); + let core = Core::new().unwrap(); + let handle = core.handle(); let storage = DiskCache::new(&f.tempdir.path().join("cache"), usize::MAX, &pool); @@ -973,7 +1010,8 @@ mod test { arguments, cwd, CacheControl::Default, - pool).wait().unwrap(); + pool, + handle).wait().unwrap(); assert_eq!(cached, CompileResult::Error); assert_eq!(exit_status(1), res.status); // Shouldn't get anything on stdout, since that would just be preprocessor spew! diff --git a/src/server.rs b/src/server.rs index 9a4ccb5a2..c12f2292a 100644 --- a/src/server.rs +++ b/src/server.rs @@ -244,6 +244,15 @@ impl SccacheServer { handle: handle.clone(), timeout_dur: timeout, }; + let shutdown_idle = shutdown_idle.map(|a| { + info!("shutting down due to being idle"); + a + }); + + let shutdown = shutdown.map(|a| { + info!("shutting down due to explicit signal"); + a + }); let server = future::select_all(vec![ Box::new(server) as Box>, @@ -255,6 +264,9 @@ impl SccacheServer { core.run(server) .map_err(|p| p.0)?; + info!("moving into the shutdown phase now, waiting at most 10 seconds \ + for all client requests to complete"); + // Once our server has shut down either due to inactivity or a manual // request we still need to give a bit of time for all active // connections to finish. This `wait` future will resolve once all @@ -265,6 +277,8 @@ impl SccacheServer { core.run(wait.select(Timeout::new(Duration::new(10, 0), &handle)?)) .map_err(|p| p.0)?; + info!("ok, fully shutting down now"); + Ok(()) } } @@ -543,7 +557,8 @@ impl SccacheService arguments, cwd, cache_control, - self.pool.clone()); + self.pool.clone(), + self.handle.clone()); let me = self.clone(); let task = result.then(move |result| { let mut res = ServerResponse::new(); @@ -569,6 +584,10 @@ impl SccacheService stats.cache_misses += 1; stats.forced_recaches += 1; } + MissType::TimedOut => { + stats.cache_misses += 1; + stats.cache_timeouts += 1; + } } stats.cache_read_miss_duration += duration; cache_write = Some(future); @@ -649,6 +668,8 @@ struct ServerStats { pub cache_hits: u64, /// The count of cache misses for handled compile requests. pub cache_misses: u64, + /// The count of cache misses because the cache took too long to respond. + pub cache_timeouts: u64, /// The count of compilations which were successful but couldn't be cached. pub non_cacheable_compilations: u64, /// The count of compilations which forcibly ignored the cache. @@ -678,6 +699,7 @@ impl Default for ServerStats { cache_errors: u64::default(), cache_hits: u64::default(), cache_misses: u64::default(), + cache_timeouts: u64::default(), non_cacheable_compilations: u64::default(), forced_recaches: u64::default(), cache_write_errors: u64::default(), @@ -720,6 +742,7 @@ impl ServerStats { set_stat!(stats_vec, self.requests_executed, "Compile requests executed"); set_stat!(stats_vec, self.cache_hits, "Cache hits"); set_stat!(stats_vec, self.cache_misses, "Cache misses"); + set_stat!(stats_vec, self.cache_timeouts, "Cache timeouts"); set_stat!(stats_vec, self.forced_recaches, "Forced recaches"); set_stat!(stats_vec, self.cache_write_errors, "Cache write errors"); set_stat!(stats_vec, self.compile_fails, "Compilation failures");