Commit 40e9463a authored by Jens Korinth's avatar Jens Korinth

Platform: Add latency function

* extended tapasco_benchmark to record latencies for runtimes between
  2^0 and 2^31 clock cycles
* extended benchmark Json to record new data
* wrote linear interpolation base class to interpolate between
  measurements for both transfer speed and latencies
* fixed unit test, supplied Arbitrary for the InterruptLatency class
* updated and pretty-printed the Json example
parent de5ccefa
......@@ -3,17 +3,20 @@
* @brief A class wrapping a lock-free cumulative (running) average.
* @author J. Korinth, TU Darmstadt (jk@esa.cs.tu-darmstadt.de)
**/
#ifndef __CUMULATIVE_AVERAGE_HPP__
#define __CUMULATIVE_AVERAGE_HPP__
#ifndef CUMULATIVE_AVERAGE_HPP__
#define CUMULATIVE_AVERAGE_HPP__
#include <atomic>
#include <limits>
/**
* CumulativeAverage provides a stateful running average of a value.
**/
template <typename T> class CumulativeAverage {
public:
CumulativeAverage(T const init) : _data({.value = init, .delta = init, .count = 1}) {}
CumulativeAverage(T const init) : _data({.value = init, .delta = init,
.min = std::numeric_limits<T>::max(), .max = std::numeric_limits<T>::min(),
.count = 1}) {}
virtual ~CumulativeAverage() {}
T update(T const t) {
......@@ -24,22 +27,28 @@ public:
n.count = o.count + 1;
n.value = (o.value * o.count + t) / n.count;
n.delta = n.value - o.value;
n.min = n.min < t ? n.min : t;
n.max = n.max > t ? n.max : t;
} while (! _data.compare_exchange_strong(o, n));
return n.delta;
}
T operator ()(T const t) { return update(t); }
T operator ()() const { return _data.load().value; }
size_t size() const { return _data.load().count; }
T delta() const { return _data.load().delta; }
T operator ()() const { return _data.load().value; }
size_t size() const { return _data.load().count; }
T delta() const { return _data.load().delta; }
T min() const { T min = _data.load().min; return min < std::numeric_limits<T>::max() ? min : -1; }
T max() const { T max = _data.load().max; return max > std::numeric_limits<T>::min() ? max : -1; }
private:
struct data_t {
T value;
T delta;
T min;
T max;
size_t count;
};
std::atomic<struct data_t> _data; // thread-safe storage
};
#endif // __CUMULATIVE_AVERAGE_HPP__
#endif // CUMULATIVE_AVERAGE_HPP__
/* vim: set foldmarker=@{,@} foldlevel=0 foldmethod=marker : */
......@@ -8,8 +8,8 @@
* should run at 100 Mhz (assumption of timing calculations).
* @author J. Korinth, TU Darmstadt (jk@esa.cs.tu-darmstadt.de)
**/
#ifndef __INTERRUPT_LATENCY_HPP__
#define __INTERRUPT_LATENCY_HPP__
#ifndef INTERRUPT_LATENCY_HPP__
#define INTERRUPT_LATENCY_HPP__
#include <atomic>
#include <thread>
......@@ -51,11 +51,17 @@ public:
int x, y;
getyx(stdscr, y, x);
future<void> f = async(launch::async, [&]() { trigger(stop, clock_cycles, cavg); });
const size_t m_runs = 100.0 * pow(M_E, -log(runtime_usecs / 10000.0));
const size_t min_runs = m_runs > 100 ? m_runs : 100;
do {
mvprintw(y, x, "Runtime: %8zu us, Latency: %8.2f", runtime_usecs, cavg());
move(y, 0);
clrtoeol();
mvprintw(y, x, "Runtime: %8zu us, Latency: % 12.1f, Min: % 12.1f, Max: % 12.1f, Count: %zu/%zu",
runtime_usecs, cavg(), cavg.min(), cavg.max(), cavg.size(), min_runs);
refresh();
usleep(1000);
} while (getch() == ERR && (fabs(cavg.delta()) > 0.01 || cavg.size() < 10000));
} while (getch() == ERR && (fabs(cavg.delta()) > 0.01 || cavg.size() < min_runs));
stop = true;
f.get();
move(y+1, 0);
......@@ -63,6 +69,30 @@ public:
return cavg();
}
double atcycles(uint32_t const clock_cycles, size_t const min_runs = 100) {
CumulativeAverage<double> cavg { 0 };
bool stop = false;
initscr(); noecho(); curs_set(0); timeout(0);
int x, y, maxx, maxy;
getyx(stdscr, y, x);
getmaxyx(stdscr, maxy, maxx);
future<void> f = async(launch::async, [&]() { trigger(stop, clock_cycles, cavg); });
do {
move(y, 0);
clrtoeol();
mvprintw(y, x, "Runtime: %12zu cc, Latency: % 12.1f, Min: % 12.1f, Max: % 12.1f, Count: %zu/%zu",
clock_cycles, cavg(), cavg.min(), cavg.max(), cavg.size(), min_runs);
refresh();
usleep(1000);
} while (getch() == ERR && (fabs(cavg.delta()) > 0.001 || cavg.size() < min_runs));
stop = true;
f.get();
move((y+1) % maxy, 0);
endwin();
return cavg();
}
private:
void trigger(volatile bool& stop, uint32_t const clock_cycles, CumulativeAverage<double>& cavg) {
tapasco_res_t res;
......@@ -87,5 +117,5 @@ private:
Tapasco& tapasco;
};
#endif /* __INTERRUPT_LATENCY_HPP__ */
#endif /* INTERRUPT_LATENCY_HPP__ */
/* vim: set foldmarker=@{,@} foldlevel=0 foldmethod=marker : */
......@@ -17,7 +17,7 @@ public:
if (r) throw r;
}
virtual ~MonitorScreen() {
for (struct slot_t *sp : slots) delete sp;
for (slot_t *sp : slots) delete sp;
}
protected:
......@@ -30,7 +30,7 @@ protected:
int start_col = (cols - w * colc) / 2;
int sid = 0;
int y = h;
for (const struct slot_t *sp : slots) {
for (const slot_t *sp : slots) {
render_slot(sp, start_row, start_col);
if (--y > 0)
start_row += rowc;
......@@ -72,7 +72,7 @@ protected:
intc_isr[intc] = 0xDEADBEEF;
}
}
for (struct slot_t *sp : slots) {
for (slot_t *sp : slots) {
if (platform::platform_read_ctl(sp->base_addr + 0x0c, 4, &sp->isr,
platform::PLATFORM_CTL_FLAGS_NONE) != platform::PLATFORM_SUCCESS) {
sp->isr = 0xDEADBEEF;
......@@ -108,7 +108,7 @@ private:
platform::platform_ctl_addr_t base_addr;
};
void render_slot(const struct slot_t *slot, int start_row, int start_col) {
void render_slot(const slot_t *slot, int start_row, int start_col) {
attron(A_REVERSE);
mvprintw(start_row++, start_col, "# : %10u ", slot->slot_id);
mvprintw(start_row, start_col, "ID :");
......@@ -290,7 +290,7 @@ private:
return cnt > 0 ? 0 : -3;
}
vector<struct slot_t *> slots;
vector<slot_t *> slots;
vector<platform::platform_ctl_addr_t> intc_addr;
uint32_t intc_isr[4];
Tapasco *tapasco;
......
......@@ -37,6 +37,14 @@ struct transfer_speed_t {
}; }
};
struct interrupt_latency_t {
size_t cycle_count;
double latency_us;
Json to_json() const { return Json::object {
{"Cycle Count", static_cast<double>(cycle_count)},
{"Latency", latency_us}
}; }
};
int main(int argc, const char *argv[]) {
Tapasco tapasco;
......@@ -45,7 +53,9 @@ int main(int argc, const char *argv[]) {
struct utsname uts;
uname(&uts);
vector<Json> speed;
struct transfer_speed_t ts;
//struct transfer_speed_t ts;
vector<Json> latency;
struct interrupt_latency_t ls;
string platform = "vc709";
if (argc < 2) {
......@@ -67,7 +77,7 @@ int main(int argc, const char *argv[]) {
// measure for chunk sizes 2^8 - 2^31 (2GB) bytes
for (int i = 8; i < 32; ++i) {
ts.chunk_sz = 1 << i;
/*ts.chunk_sz = 1 << i;
ts.speed_r = tp(ts.chunk_sz, TransferSpeed::OP_COPYFROM);
ts.speed_w = tp(ts.chunk_sz, TransferSpeed::OP_COPYTO);
ts.speed_rw = tp(ts.chunk_sz, TransferSpeed::OP_COPYFROM | TransferSpeed::OP_COPYTO);
......@@ -77,12 +87,16 @@ int main(int argc, const char *argv[]) {
<< ", r/w: " << ts.speed_rw << " MiB/s"
<< endl;
Json json = ts.to_json();
speed.push_back(json);
speed.push_back(json);*/
}
// measure average job roundtrip latency in the interval 1us - 100ms
double const rl = il(0);
cout << "Latency @ random runtime between 1us-100ms: " << rl << " us" << endl;
// measure average job roundtrip latency for clock cycles counts
// between 2^0 and 2^31
for (size_t i = 0; i < 32; ++i) {
ls.cycle_count = 1UL << i;
ls.latency_us = il.atcycles(ls.cycle_count);
cout << "Latency @ " << ls.cycle_count << "cc runtime: " << ls.latency_us << " us" << endl;
}
// record current time
time_t tt = chrono::system_clock::to_time_t(chrono::system_clock::now());
......@@ -102,9 +116,9 @@ int main(int argc, const char *argv[]) {
}
},
{"Transfer Speed", speed},
{"Job Roundtrip Overhead", rl},
{"Interrupt Latency", latency},
{"Library Versions", Json::object {
{"TPC API", tapasco::tapasco_version()},
{"Tapasco API", tapasco::tapasco_version()},
{"Platform API", platform::platform_version()}
}
}
......
{"Host": {"Machine": "x86_64", "Node": "mountdoom", "Operating System": "Linux", "Release": "3.19.8-100.fc20.x86_64", "Version": "#1 SMP Tue May 12 17:08:50 UTC 2015"}, "Job Roundtrip Overhead": 90.900809919008182, "Library Versions": {"Platform API": "1.2.1", "TPC API": "1.2"}, "Timestamp": "2016-04-20 16:33:49", "Transfer Speed": [{"Chunk Size": 256, "Read": 49.329030801393962, "ReadWrite": 55.568662824206989, "Write": 48.829431984156649}, {"Chunk Size": 512, "Read": 99.711073192785094, "ReadWrite": 106.41911067833198, "Write": 91.843507830807283}, {"Chunk Size": 1024, "Read": 194.52247367498106, "ReadWrite": 206.27983738461907, "Write": 174.38052151696462}, {"Chunk Size": 2048, "Read": 371.6782428456554, "ReadWrite": 380.91467182463464, "Write": 325.40049755718161}, {"Chunk Size": 4096, "Read": 684.745320993988, "ReadWrite": 686.36036417201785, "Write": 586.86389287107374}, {"Chunk Size": 8192, "Read": 1250.5008574526241, "ReadWrite": 1218.1428227833037, "Write": 1030.7088569409982}, {"Chunk Size": 16384, "Read": 2026.1898750593398, "ReadWrite": 1950.3061380522922, "Write": 1742.6575694856201}, {"Chunk Size": 32768, "Read": 2968.1108195912561, "ReadWrite": 2829.2075588814191, "Write": 2634.9623567740964}, {"Chunk Size": 65536, "Read": 3574.6895965870904, "ReadWrite": 3507.9793903906839, "Write": 3264.0111182613077}, {"Chunk Size": 131072, "Read": 4045.004247835624, "ReadWrite": 4001.5013015672289, "Write": 3815.5274786894447}, {"Chunk Size": 262144, "Read": 4384.3815829308041, "ReadWrite": 4200.9501087840708, "Write": 3998.2952464074478}, {"Chunk Size": 524288, "Read": 4577.7959688291603, "ReadWrite": 4289.9518289243551, "Write": 2948.3745818585521}, {"Chunk Size": 1048576, "Read": 4178.4256565802325, "ReadWrite": 3983.4937552016727, "Write": 3995.3812092607886}, {"Chunk Size": 2097152, "Read": 4428.582495387981, "ReadWrite": 3738.9455160871075, "Write": 3867.7089852089889}, {"Chunk Size": 4194304, "Read": 3889.6524963123138, "ReadWrite": 3545.0135744438817, "Write": 3583.2993572493579}, {"Chunk Size": 8388608, "Read": 3598.3527292972121, "ReadWrite": 3293.1267171672935, "Write": 3382.630246092891}, {"Chunk Size": 16777216, "Read": 3429.053370509484, "ReadWrite": 3334.438865174192, "Write": 3332.0624094774985}, {"Chunk Size": 33554432, "Read": 3322.1144740468026, "ReadWrite": 3182.0719464635727, "Write": 3178.9272608933888}]}
{
"Host" : {
"Machine" : "x86_64",
"Node" : "mountdoom",
"Operating System" : "Linux",
"Release" : "3.19.8-100.fc20.x86_64",
"Version" : "#1 SMP Tue May 12 17:08:50 UTC 2015"
},
"Library Versions" : {
"Platform API" : "1.2.1",
"TPC API" : "1.2"
},
"Timestamp" : "2016-04-20 16:33:49",
"Transfer Speed" : [ {
"Chunk Size" : 256,
"Read" : 49.329030801393962,
"ReadWrite" : 55.568662824206989,
"Write" : 48.829431984156649
}, {
"Chunk Size" : 512,
"Read" : 99.711073192785094,
"ReadWrite" : 106.41911067833198,
"Write" : 91.843507830807283
}, {
"Chunk Size" : 1024,
"Read" : 194.52247367498106,
"ReadWrite" : 206.27983738461907,
"Write" : 174.38052151696462
}, {
"Chunk Size" : 2048,
"Read" : 371.6782428456554,
"ReadWrite" : 380.91467182463464,
"Write" : 325.40049755718161
}, {
"Chunk Size" : 4096,
"Read" : 684.745320993988,
"ReadWrite" : 686.36036417201785,
"Write" : 586.86389287107374
}, {
"Chunk Size" : 8192,
"Read" : 1250.5008574526241,
"ReadWrite" : 1218.1428227833037,
"Write" : 1030.7088569409982
}, {
"Chunk Size" : 16384,
"Read" : 2026.1898750593398,
"ReadWrite" : 1950.3061380522922,
"Write" : 1742.6575694856201
}, {
"Chunk Size" : 32768,
"Read" : 2968.1108195912561,
"ReadWrite" : 2829.2075588814191,
"Write" : 2634.9623567740964
}, {
"Chunk Size" : 65536,
"Read" : 3574.6895965870904,
"ReadWrite" : 3507.9793903906839,
"Write" : 3264.0111182613077
}, {
"Chunk Size" : 131072,
"Read" : 4045.004247835624,
"ReadWrite" : 4001.5013015672289,
"Write" : 3815.5274786894447
}, {
"Chunk Size" : 262144,
"Read" : 4384.3815829308041,
"ReadWrite" : 4200.9501087840708,
"Write" : 3998.2952464074478
}, {
"Chunk Size" : 524288,
"Read" : 4577.7959688291603,
"ReadWrite" : 4289.9518289243551,
"Write" : 2948.3745818585521
}, {
"Chunk Size" : 1048576,
"Read" : 4178.4256565802325,
"ReadWrite" : 3983.4937552016727,
"Write" : 3995.3812092607886
}, {
"Chunk Size" : 2097152,
"Read" : 4428.582495387981,
"ReadWrite" : 3738.9455160871075,
"Write" : 3867.7089852089889
}, {
"Chunk Size" : 4194304,
"Read" : 3889.6524963123138,
"ReadWrite" : 3545.0135744438817,
"Write" : 3583.2993572493579
}, {
"Chunk Size" : 8388608,
"Read" : 3598.3527292972121,
"ReadWrite" : 3293.1267171672935,
"Write" : 3382.630246092891
}, {
"Chunk Size" : 16777216,
"Read" : 3429.053370509484,
"ReadWrite" : 3334.438865174192,
"Write" : 3332.0624094774985
}, {
"Chunk Size" : 33554432,
"Read" : 3322.1144740468026,
"ReadWrite" : 3182.0719464635727,
"Write" : 3178.9272608933888
} ],
"Interrupt Latency": [ {
"Cycle Count": 1,
"Latency": 7.0
}, {
"Cycle Count": 2,
"Latency": 7.5
}, {
"Cycle Count": 4,
"Latency": 8.0
}, {
"Cycle Count": 123456,
"Latency": 10.0
} ]
}
......@@ -20,14 +20,7 @@
<appender-ref ref="STDOUT" />
</root>
<logger name="de.tu_darmstadt.cs.esa.tapasco.itapasco" level="debug">
<logger name="de.tu_darmstadt.cs.esa.tapasco.jobs.executors" level="trace">
<appender-ref ref="STDOUT-DETAIL" />
</logger>
<logger name="de.tu_darmstadt.cs.esa.tapasco.itapasco.view" level="trace"/>
<logger name="de.tu_darmstadt.cs.esa.tapasco.itapasco.controller" level="trace"/>
<logger name="de.tu_darmstadt.cs.esa.tapasco.itapasco.common" level="trace"/>
</configuration>
......@@ -19,9 +19,11 @@
/**
* @file Benchmark.scala
* @brief Model: TPC IP Benchmark.
* @todo Scaladoc is missing.
* @authors J. Korinth, TU Darmstadt (jk@esa.cs.tu-darmstadt.de)
**/
package de.tu_darmstadt.cs.esa.tapasco.base
import de.tu_darmstadt.cs.esa.tapasco.util.LinearInterpolator
import builder._
import java.time.LocalDateTime
import java.nio.file._
......@@ -29,13 +31,44 @@ import java.nio.file._
final case class LibraryVersions(platform: String, tapasco: String)
final case class Host(machine: String, node: String, operatingSystem: String, release: String, version: String)
final case class TransferSpeedMeasurement(chunkSize: Int, read: Double, write: Double, readWrite: Double)
final case class InterruptLatency(clockCycles: Int, latency: Double)
/** Defines an interpolation on [[InterruptLatency]] elements. */
final class LatencyInterpolator(data: Seq[InterruptLatency])
extends LinearInterpolator[Int, Double](data map { il => (il.clockCycles, il.latency) }) {
def interpolate(cc: Int, left: (Int, Double), right: (Int, Double)): Double = {
val l = left._1.toDouble
val r = right._1.toDouble
((cc.toDouble - l) / (r - l)) * (right._2 - left._2) + left._2
}
}
/** Defines an interpolation on [[TransferSpeedMeasurements]]. */
final class TransferSpeedInterpolator(data: Seq[TransferSpeedMeasurement])
extends LinearInterpolator[Int, (Double, Double, Double)](data map { ts: TransferSpeedMeasurement =>
(ts.chunkSize, (ts.read, ts.write, ts.readWrite))
}) {
/** Shorthand type. */
type Rwrw = (Double, Double, Double)
def interpolate(cs: Int, left: (Int, Rwrw), right: (Int, Rwrw)): Rwrw = {
val l = left._1.toDouble
val r = right._1.toDouble
val f = (cs.toDouble - l) / (r - l)
(f * (right._2._1 - left._2._1) + left._2._1,
f * (right._2._2 - left._2._2) + left._2._2,
f * (right._2._3 - left._2._3) + left._2._3)
}
}
final case class Benchmark (
descPath: Path,
timestamp: LocalDateTime,
host: Host,
libraryVersions: LibraryVersions,
transferSpeed: Seq[TransferSpeedMeasurement],
interruptLatency: Double
) extends Description(descPath)
descPath: Path,
timestamp: LocalDateTime,
host: Host,
libraryVersions: LibraryVersions,
transferSpeed: Seq[TransferSpeedMeasurement],
interruptLatency: Seq[InterruptLatency]
) extends Description(descPath) {
/** Function to compute interpolated latency values. */
lazy val latency = new LatencyInterpolator(interruptLatency)
/** Function to compute interpolated transfer speed values. */
lazy val speed = new TransferSpeedInterpolator(transferSpeed)
}
object Benchmark extends Builds[Benchmark]
......@@ -86,13 +86,18 @@ package object json {
(JsPath \ "ReadWrite").format[Double]
) (TransferSpeedMeasurement.apply _, unlift(TransferSpeedMeasurement.unapply _))
implicit val interruptLatencyFormat: Format[InterruptLatency] = (
(JsPath \ "Cycle Count").format[Int] ~
(JsPath \ "Latency").format[Double]
) (InterruptLatency.apply _, unlift(InterruptLatency.unapply _))
implicit val benchmarkReads: Reads[Benchmark] = (
(JsPath \ "DescPath").readNullable[Path].map(_ getOrElse Paths.get("N/A")) ~
(JsPath \ "Timestamp").read[LocalDateTime] ~
(JsPath \ "Host").read[Host] ~
(JsPath \ "Library Versions").read[LibraryVersions] ~
(JsPath \ "Transfer Speed").read[Seq[TransferSpeedMeasurement]] ~
(JsPath \ "Job Roundtrip Overhead").read[Double]
(JsPath \ "Interrupt Latency").read[Seq[InterruptLatency]]
) (Benchmark. apply _)
implicit val benchmarkWrites: Writes[Benchmark] = (
(JsPath \ "DescPath").write[Path].transform((js: JsObject) => js - "DescPath") ~
......@@ -100,7 +105,7 @@ package object json {
(JsPath \ "Host").write[Host] ~
(JsPath \ "Library Versions").write[LibraryVersions] ~
(JsPath \ "Transfer Speed").write[Seq[TransferSpeedMeasurement]] ~
(JsPath \ "Job Roundtrip Overhead").write[Double]
(JsPath \ "Interrupt Latency").write[Seq[InterruptLatency]]
) (unlift(Benchmark.unapply _))
/* Benchmark @} */
......
......@@ -51,7 +51,7 @@ object Heuristics {
def apply(bd: Composition, freq: Frequency, target: Target): Configuration => Value = cfg => {
val maxClockCycles: Seq[Int] = bd.composition map (ce => findAverageClockCycles(ce.kernel, target)(cfg))
val t = 1.0 / (freq * 1000000.0)
val t_irq = target.pd.benchmark map (_.interruptLatency / 1000000000.0) getOrElse 0.0
val t_irq = target.pd.benchmark map (_.latency(maxClockCycles.max) / 1000000000.0) getOrElse 0.0
val jobsla = maxClockCycles map (_ * t + t_irq/* + t_setup*/)
bd.composition map (_.count) zip jobsla map (x => x._1 / x._2) reduce (_ + _)
}
......
......@@ -44,7 +44,11 @@ protected object HighLevelSynthesis extends Executor[HighLevelSynthesisJob] {
val importTasks = results flatMap {
case ((k, t), Some(Success(_, zip))) => {
val avgCC = FileAssetManager.reports.cosimReport(k.name, t) map (_.latency.avg)
logger.trace("searching for co-simulation report for {} @ {}", k.name: Any, t)
val rpt = FileAssetManager.reports.cosimReport(k.name, t)
logger.trace("co-simulation report: {}", rpt)
val avgCC = rpt map (_.latency.avg)
logger.trace("average clock cycles: {}", avgCC)
if (avgCC.isEmpty && k.testbenchFiles.length > 0) {
logger.warn("executed HLS with co-sim for {}, but no co-simulation report was found", k)
}
......
//
// Copyright (C) 2017 Jens Korinth, TU Darmstadt
//
// This file is part of Tapasco (TPC).
//
// Tapasco is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Tapasco is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with Tapasco. If not, see <http://www.gnu.org/licenses/>.
//
/**
* @file LinearInterpolator.scala
* @brief Generic linear interpolation between abstract values.
* @authors J. Korinth, TU Darmstadt (jk@esa.cs.tu-darmstadt.de)
**/
package de.tu_darmstadt.cs.esa.tapasco.util
/** LinearInterpolator is the abstract base class for linear interpolations of arbitrary
* types. It defines a regular interpolation in from a data set and an interpolation
* function on its base types A and B.
* @tparam A function co-domain, i.e., the ordering.
* @tparam B function domain, i.e., the interpolated values.
*/
abstract class LinearInterpolator[A <% Ordered[A], B](data: Seq[(A, B)])(implicit oa: Ordering[A]) extends Function[A, B] {
private lazy val min = data map (_._1) min
private lazy val max = data map (_._1) max
private lazy val dmp = data.toMap[A, B]
/** Computes the interpolated value for a. */
def apply(a: A): B = a match {
case v if v <= min => dmp(min)
case v if v >= max => dmp(max)
case v => findPos(a) match {
case (ll, lr) if ll equals lr => dmp(lr)
case (ll, lr) => interpolate(a, (ll, dmp(ll)), (lr, dmp(lr)))
}
}
/** Abstract function to interpolate between two values. */
protected def interpolate(a: A, left: (A, B), right: (A, B)): B
/** Find the tuple of data elements in between which the given position lies.
* Corner handling: repeat last value as constant.
*/
private def findPos(a: A, as: Seq[A] = (data map (_._1)).toSeq.sorted): (A, A) = as match {
case ll +: lr +: ls if lr <= a => findPos(a, as.tail)
case ll +: lr +: ls if ll <= a && lr >= a => (ll, lr)
case lr +: Seq() => (lr, lr)
case _ => throw new Exception("invalid data set: data")
}
}
......@@ -62,6 +62,16 @@ class BenchmarkSpec extends FlatSpec with Matchers with Checkers {
})
}
"All interrupt latency measurements" should "be read and written correctly" in {
import play.api.libs.json._
check(forAll { ilm: InterruptLatency =>
val json = Json.prettyPrint(Json.toJson(ilm))
val ptsm = Json.fromJson[InterruptLatency](Json.parse(json))
ptsm.get.equals(ilm)
})
}
"All valid benchmarks" should "be read and written correctly" in {
import play.api.libs.json._
......@@ -100,7 +110,8 @@ class BenchmarkSpec extends FlatSpec with Matchers with Checkers {
c.host.release should equal ("3.19.8-100.fc20.x86_64")
c.host.version should equal ("#1 SMP Tue May 12 17:08:50 UTC 2015")
// interrupt latency
c.interruptLatency should equal (90.900809919008182)
c.interruptLatency should equal (List(InterruptLatency(1,7.0), InterruptLatency(2,7.5), InterruptLatency(4,8.0),
InterruptLatency(123456,10.0)))
// library versions
c.libraryVersions.platform should equal ("1.2.1")
c.libraryVersions.tapasco should equal ("1.2")
......@@ -120,37 +131,6 @@ class BenchmarkSpec extends FlatSpec with Matchers with Checkers {
ce.readWrite should equal (55.568662824206989)
}
"A Benchmark file with unknown entries" should "be parsed correctly" in {
val oc = Benchmark.from(jsonPath.resolve("correct-benchmark.json"))
lazy val c = oc.right.get
assert(oc.isRight)
// host data
c.host.machine should equal ("x86_64")
c.host.node should equal ("mountdoom")
c.host.operatingSystem should equal ("Linux")
c.host.release should equal ("3.19.8-100.fc20.x86_64")
c.host.version should equal ("#1 SMP Tue May 12 17:08:50 UTC 2015")
// interrupt latency
c.interruptLatency should equal (90.900809919008182)
// library versions
c.libraryVersions.platform should equal ("1.2.1")
c.libraryVersions.tapasco should equal ("1.2")
// timestamp
c.timestamp should equal (LocalDate.of(2016, 4, 20).atTime(16,33,49))
// transfer speed
c.transferSpeed should have length (18)
var ce = c.transferSpeed(17)
ce.chunkSize should equal (33554432)
ce.read should equal (3322.1144740468026)
ce.write should equal (3178.9272608933888)
ce.readWrite should equal (3182.0719464635727)
ce = c.transferSpeed(0)
ce.chunkSize should equal (256)
ce.read should equal (49.329030801393962)
ce.write should equal (48.829431984156649)
ce.readWrite should equal (55.568662824206989)
}
"An invalid Benchmark file" should "not be parsed" in {
val oc1 = Benchmark.from(jsonPath.resolve("invalid-benchmark.json"))
assert(oc1.isLeft)
......@@ -173,6 +153,12 @@ class BenchmarkSpec extends FlatSpec with Matchers with Checkers {
} yield TransferSpeedMeasurement(cs, r, w, rw)
implicit val arbTsm: Arbitrary[TransferSpeedMeasurement] = Arbitrary(tsmGen)
val ilmGen: Gen[InterruptLatency] = for {
cl <- posIntsPowerTwo
l <- Gen.posNum[Double]
} yield InterruptLatency(cl, l)
implicit val arbIlm: Arbitrary[InterruptLatency] = Arbitrary(ilmGen)
val hostGen = for {
machine <- Arbitrary.arbitrary[String]
node <- Arbitrary.arbitrary[String]
......@@ -197,7 +183,7 @@ class BenchmarkSpec extends FlatSpec with Matchers with Checkers {
host <- Arbitrary.arbitrary[Host]
lv <- Arbitrary.arbitrary[LibraryVersions]
tsm <- Arbitrary.arbitrary[Seq[TransferSpeedMeasurement]]
il <- Gen.posNum[Double]
il <- Arbitrary.arbitrary[Seq[InterruptLatency]]
} yield Benchmark(java.nio.file.Paths.get("N/A"), timestamp, host, lv, tsm, il)
implicit val arbBenchmark: Arbitrary[Benchmark] = Arbitrary(benchmarkGen)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment