Commit 6ea82dc4 authored by Jens Korinth's avatar Jens Korinth
Browse files

Closes #137 - TaPaSCo is stuck after all jobs finished in verbose mode

parent 961d9bc6
Pipeline #91 passed with stage
in 3 minutes and 27 seconds
...@@ -83,6 +83,7 @@ class VivadoComposer()(implicit cfg: Configuration) extends Composer { ...@@ -83,6 +83,7 @@ class VivadoComposer()(implicit cfg: Configuration) extends Composer {
stdoutString => logger.trace("Vivado: {}", stdoutString), stdoutString => logger.trace("Vivado: {}", stdoutString),
stderrString => logger.trace("Vivado ERR: {}", stderrString) stderrString => logger.trace("Vivado ERR: {}", stderrString)
)) ))
lt.closeAll
// check retcode // check retcode
if (r == InterruptibleProcess.TIMEOUT_RETCODE) { if (r == InterruptibleProcess.TIMEOUT_RETCODE) {
......
...@@ -104,6 +104,7 @@ class MultiFileWatcher(pollInterval: Int = MultiFileWatcher.POLL_INTERVAL) exten ...@@ -104,6 +104,7 @@ class MultiFileWatcher(pollInterval: Int = MultiFileWatcher.POLL_INTERVAL) exten
def run() { def run() {
try { try {
while (! _files.isEmpty || ! _waitingFor.isEmpty) { while (! _files.isEmpty || ! _waitingFor.isEmpty) {
Thread.sleep(pollInterval)
val waits = _waitingFor.synchronized { _waitingFor.toList } val waits = _waitingFor.synchronized { _waitingFor.toList }
waits foreach { p => waits foreach { p =>
logger.trace("waiting for {}", p) logger.trace("waiting for {}", p)
...@@ -117,7 +118,6 @@ class MultiFileWatcher(pollInterval: Int = MultiFileWatcher.POLL_INTERVAL) exten ...@@ -117,7 +118,6 @@ class MultiFileWatcher(pollInterval: Int = MultiFileWatcher.POLL_INTERVAL) exten
publish(LinesAdded(p, lines)) publish(LinesAdded(p, lines))
} }
} }
Thread.sleep(pollInterval)
} }
_watchThread.set(None) _watchThread.set(None)
} catch { case e: InterruptedException => _watchThread.set(None) } } catch { case e: InterruptedException => _watchThread.set(None) }
......
...@@ -56,8 +56,8 @@ private class DefaultResourceMonitor extends ResourceMonitor { ...@@ -56,8 +56,8 @@ private class DefaultResourceMonitor extends ResourceMonitor {
! ((cons fold ResourceConsumer.NullConsumer) (_ + _) usesMoreThan _available) ! ((cons fold ResourceConsumer.NullConsumer) (_ + _) usesMoreThan _available)
} }
def doStart(t: ResourceConsumer): Unit = if (canStart(t)) _cons += t def doStart(t: ResourceConsumer): Unit = if (canStart(t)) _cons.synchronized { _cons += t }
def didFinish(t: ResourceConsumer): Unit = _cons -= t def didFinish(t: ResourceConsumer): Unit = _cons.synchronized { _cons -= t }
def canStart(t: ResourceConsumer): Boolean = Slurm.enabled || (t.canStart && check(_cons + t)) def canStart(t: ResourceConsumer): Boolean = Slurm.enabled || (t.canStart && check(_cons + t))
def status: String = "%d active consumers, %d/%d CPUs, %1.1f/%1.1f GiB RAM, %d total licences in use".format( def status: String = "%d active consumers, %d/%d CPUs, %1.1f/%1.1f GiB RAM, %d total licences in use".format(
_cons.size, current.cpus, _cpus, _cons.size, current.cpus, _cpus,
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment