Commit 0dff8108 authored by Jens Korinth's avatar Jens Korinth

tapasco_benchmark: Extend latency data

* latency data now contains min, max and average
* new Json format implemented in scala
* fixed several minor issues in case of errors
parent 35f0b38e
...@@ -69,7 +69,7 @@ public: ...@@ -69,7 +69,7 @@ public:
return cavg(); return cavg();
} }
double atcycles(uint32_t const clock_cycles, size_t const min_runs = 100) { double atcycles(uint32_t const clock_cycles, size_t const min_runs = 100, double *min = NULL, double *max = NULL) {
CumulativeAverage<double> cavg { 0 }; CumulativeAverage<double> cavg { 0 };
bool stop = false; bool stop = false;
initscr(); noecho(); curs_set(0); timeout(0); initscr(); noecho(); curs_set(0); timeout(0);
...@@ -90,6 +90,8 @@ public: ...@@ -90,6 +90,8 @@ public:
move((y+1) % maxy, 0); move((y+1) % maxy, 0);
endwin(); endwin();
if (min) *min = cavg.min();
if (max) *max = cavg.max();
return cavg(); return cavg();
} }
......
...@@ -62,7 +62,8 @@ public: ...@@ -62,7 +62,8 @@ public:
private: private:
void transfer(volatile bool& stop, size_t const chunk_sz, long opmask) { void transfer(volatile bool& stop, size_t const chunk_sz, long opmask) {
tapasco_handle_t h; tapasco_handle_t h;
uint8_t *data = new uint8_t[chunk_sz]; uint8_t *data = new (std::nothrow) uint8_t[chunk_sz];
if (! data) return;
for (size_t i = 0; i < chunk_sz; ++i) for (size_t i = 0; i < chunk_sz; ++i)
data[i] = rand(); data[i] = rand();
...@@ -79,7 +80,7 @@ private: ...@@ -79,7 +80,7 @@ private:
bytes += chunk_sz; bytes += chunk_sz;
tapasco.free(h, TAPASCO_DEVICE_ALLOC_FLAGS_NONE); tapasco.free(h, TAPASCO_DEVICE_ALLOC_FLAGS_NONE);
} }
delete data; delete[] data;
} }
static const std::string maskToString(long const opmask) { static const std::string maskToString(long const opmask) {
......
...@@ -40,9 +40,13 @@ struct transfer_speed_t { ...@@ -40,9 +40,13 @@ struct transfer_speed_t {
struct interrupt_latency_t { struct interrupt_latency_t {
size_t cycle_count; size_t cycle_count;
double latency_us; double latency_us;
double min_latency_us;
double max_latency_us;
Json to_json() const { return Json::object { Json to_json() const { return Json::object {
{"Cycle Count", static_cast<double>(cycle_count)}, {"Cycle Count", static_cast<double>(cycle_count)},
{"Latency", latency_us} {"Avg Latency", latency_us},
{"Min Latency", min_latency_us},
{"Max Latency", max_latency_us}
}; } }; }
}; };
...@@ -53,7 +57,7 @@ int main(int argc, const char *argv[]) { ...@@ -53,7 +57,7 @@ int main(int argc, const char *argv[]) {
struct utsname uts; struct utsname uts;
uname(&uts); uname(&uts);
vector<Json> speed; vector<Json> speed;
//struct transfer_speed_t ts; struct transfer_speed_t ts;
vector<Json> latency; vector<Json> latency;
struct interrupt_latency_t ls; struct interrupt_latency_t ls;
...@@ -76,26 +80,30 @@ int main(int argc, const char *argv[]) { ...@@ -76,26 +80,30 @@ int main(int argc, const char *argv[]) {
} }
// measure for chunk sizes 2^8 - 2^31 (2GB) bytes // measure for chunk sizes 2^8 - 2^31 (2GB) bytes
for (int i = 8; i < 32; ++i) { for (int i = 10; i < 32; ++i) {
/*ts.chunk_sz = 1 << i; ts.chunk_sz = 1 << i;
ts.speed_r = tp(ts.chunk_sz, TransferSpeed::OP_COPYFROM); ts.speed_r = tp(ts.chunk_sz, TransferSpeed::OP_COPYFROM);
ts.speed_w = tp(ts.chunk_sz, TransferSpeed::OP_COPYTO); ts.speed_w = tp(ts.chunk_sz, TransferSpeed::OP_COPYTO);
ts.speed_rw = tp(ts.chunk_sz, TransferSpeed::OP_COPYFROM | TransferSpeed::OP_COPYTO); ts.speed_rw = tp(ts.chunk_sz, TransferSpeed::OP_COPYFROM | TransferSpeed::OP_COPYTO);
cout << "Transfer speed @ chunk_sz = " << (ts.chunk_sz/1024) << " KiB:" cout << "Transfer speed @ chunk_sz = " << (ts.chunk_sz/1024) << " KiB:"
<< " read " << ts.speed_r << " MiB/s" << " read " << ts.speed_r << " MiB/s"
<< ", write: " << ts.speed_w << " MiB/s" << ", write: " << ts.speed_w << " MiB/s"
<< ", r/w: " << ts.speed_rw << " MiB/s" << ", r/w: " << ts.speed_rw << " MiB/s"
<< endl; << endl;
Json json = ts.to_json(); if (ts.speed_r > 0.0 || ts.speed_w > 0 || ts.speed_rw > 0) {
speed.push_back(json);*/ Json json = ts.to_json();
speed.push_back(json);
} else break;
} }
// measure average job roundtrip latency for clock cycles counts // measure average job roundtrip latency for clock cycles counts
// between 2^0 and 2^31 // between 2^0 and 2^31
for (size_t i = 0; i < 32; ++i) { for (size_t i = 0; i < 32; ++i) {
ls.cycle_count = 1UL << i; ls.cycle_count = 1UL << i;
ls.latency_us = il.atcycles(ls.cycle_count); ls.latency_us = il.atcycles(ls.cycle_count, 10, &ls.min_latency_us, &ls.max_latency_us);
cout << "Latency @ " << ls.cycle_count << "cc runtime: " << ls.latency_us << " us" << endl; cout << "Latency @ " << ls.cycle_count << "cc runtime: " << ls.latency_us << " us" << endl;
Json json = ls.to_json();
latency.push_back(json);
} }
// record current time // record current time
...@@ -123,10 +131,10 @@ int main(int argc, const char *argv[]) { ...@@ -123,10 +131,10 @@ int main(int argc, const char *argv[]) {
} }
} }
}; };
// dump it // dump it
stringstream ss; stringstream ss;
ss << getenv("TAPASCO_HOME") << "/platform/" << platform << "/" << platform << ".benchmark"; ss << platform << ".benchmark";
cout << "Dumping benchmark JSON to " << (argc >= 2 ? argv[1] : ss.str()) << endl; cout << "Dumping benchmark JSON to " << (argc >= 2 ? argv[1] : ss.str()) << endl;
ofstream f(argc >= 2 ? argv[1] : ss.str()); ofstream f(argc >= 2 ? argv[1] : ss.str());
f << benchmark.dump(); f << benchmark.dump();
......
...@@ -35,7 +35,7 @@ final case class Host(machine: String, node: String, operatingSystem: String, re ...@@ -35,7 +35,7 @@ final case class Host(machine: String, node: String, operatingSystem: String, re
/** Transfer speed measurement: R/W/RW speeds at given chunk size (in bytes). */ /** Transfer speed measurement: R/W/RW speeds at given chunk size (in bytes). */
final case class TransferSpeedMeasurement(chunkSize: Int, read: Double, write: Double, readWrite: Double) final case class TransferSpeedMeasurement(chunkSize: Int, read: Double, write: Double, readWrite: Double)
/** Interrupt latency in us at given PE runtime (in clock cycles). */ /** Interrupt latency in us at given PE runtime (in clock cycles). */
final case class InterruptLatency(clockCycles: Int, latency: Double) final case class InterruptLatency(clockCycles: Int, latency: Double, min: Double, max: Double)
/** Defines an interpolation on [[InterruptLatency]] elements. */ /** Defines an interpolation on [[InterruptLatency]] elements. */
final class LatencyInterpolator(data: Seq[InterruptLatency]) final class LatencyInterpolator(data: Seq[InterruptLatency])
extends LinearInterpolator[Int, Double](data map { il => (il.clockCycles, il.latency) }) { extends LinearInterpolator[Int, Double](data map { il => (il.clockCycles, il.latency) }) {
......
...@@ -68,7 +68,7 @@ package object json { ...@@ -68,7 +68,7 @@ package object json {
implicit val libraryVersionsFormat: Format[LibraryVersions] = ( implicit val libraryVersionsFormat: Format[LibraryVersions] = (
(JsPath \ "Platform API").format[String] ~ (JsPath \ "Platform API").format[String] ~
(JsPath \ "TPC API").format[String] (JsPath \ "Tapasco API").format[String]
) (LibraryVersions.apply _, unlift(LibraryVersions.unapply _)) ) (LibraryVersions.apply _, unlift(LibraryVersions.unapply _))
implicit val hostFormat: Format[Host] = ( implicit val hostFormat: Format[Host] = (
...@@ -88,7 +88,9 @@ package object json { ...@@ -88,7 +88,9 @@ package object json {
implicit val interruptLatencyFormat: Format[InterruptLatency] = ( implicit val interruptLatencyFormat: Format[InterruptLatency] = (
(JsPath \ "Cycle Count").format[Int] ~ (JsPath \ "Cycle Count").format[Int] ~
(JsPath \ "Latency").format[Double] (JsPath \ "Avg Latency").format[Double] ~
(JsPath \ "Min Latency").format[Double] ~
(JsPath \ "Max Latency").format[Double]
) (InterruptLatency.apply _, unlift(InterruptLatency.unapply _)) ) (InterruptLatency.apply _, unlift(InterruptLatency.unapply _))
implicit val benchmarkReads: Reads[Benchmark] = ( implicit val benchmarkReads: Reads[Benchmark] = (
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment