diff --git a/README.md b/README.md index 83440dc..04344b7 100644 --- a/README.md +++ b/README.md @@ -58,6 +58,7 @@ We all stand on the shoulders of giants and get by with a little help from our f * [Sigar](https://support.hyperic.com/display/SIGAR/Home) (version 1.6.5 under [Apache 2 license](http://www.apache.org/licenses/LICENSE-2.0)), for providing a portable interface for gathering system information. * [spray-json](http://spray.io/) (version 1.3.2 under [Apache 2 license](http://www.apache.org/licenses/LICENSE-2.0)), for (de)serializing JSON. * [scala-influxdb-client](https://github.com/paulgoldbaum/scala-influxdb-client) (version 0.4.5 under [MIT license](https://github.com/paulgoldbaum/scala-influxdb-client/blob/master/LICENSE)), for using an asynchronous scala API for InfluxDB. +* [protobuf-java](https://developers.google.com/protocol-buffers/) (version 2.6.1 under [new BSD license](http://www.opensource.org/licenses/bsd-license.php)), for using the JAVA protobuf API. # License This software is licensed under the *GNU Affero General Public License*, quoted below. diff --git a/docker/Dockerfile-cli b/docker/Dockerfile-cli index d8d2085..e1e9b66 100644 --- a/docker/Dockerfile-cli +++ b/docker/Dockerfile-cli @@ -1,10 +1,10 @@ -FROM alpine:latest +FROM alpine:3.3 ENV POWERAPI_PACKAGE powerapi-cli ENV LIBPFM_PACKAGE libpfm-4.6.0 ENV INSTALL_PACKAGES ca-certificates linux-headers libc-dev make gcc patch -ENV RUNTIME_PACKAGES bluez bluez-dev openjdk7-jre bash procps +ENV RUNTIME_PACKAGES bluez bluez-dev openjdk8-jre bash procps COPY docker/libpfm/config.mk.patch /root/ COPY ${POWERAPI_PACKAGE}/target/universal/${POWERAPI_PACKAGE}/ /root/${POWERAPI_PACKAGE}/ @@ -12,7 +12,7 @@ COPY ${POWERAPI_PACKAGE}/target/universal/${POWERAPI_PACKAGE}/ /root/${POWERAPI_ VOLUME /conf RUN apk update && apk upgrade && apk add $INSTALL_PACKAGES $RUNTIME_PACKAGES && \ - wget https://circle-artifacts.com/gh/andyshinn/alpine-pkg-glibc/6/artifacts/0/home/ubuntu/alpine-pkg-glibc/packages/x86_64/glibc-2.21-r2.apk && apk --allow-untrusted add glibc-2.21-r2.apk && rm -f glibc-2.21-r2.apk && \ + wget https://circle-artifacts.com/gh/sgerrand/alpine-pkg-glibc/6/artifacts/0/home/ubuntu/alpine-pkg-glibc/packages/x86_64/glibc-2.21-r2.apk && apk --allow-untrusted add glibc-2.21-r2.apk && rm -f glibc-2.21-r2.apk && \ wget http://downloads.sourceforge.net/project/perfmon2/libpfm4/${LIBPFM_PACKAGE}.tar.gz && tar -C /root -xzvf ${LIBPFM_PACKAGE}.tar.gz && patch -d /root/$LIBPFM_PACKAGE -p1 < /root/config.mk.patch && (cd /root/$LIBPFM_PACKAGE; make lib; make install) && rm -rf /root/config.mk.patch /root/$LIBPFM_PACKAGE ${LIBPFM_PACKAGE}.tar.gz && \ rm -rf /root/${POWERAPI_PACKAGE}/conf && ln -s /conf/ /root/${POWERAPI_PACKAGE}/conf && \ apk del glibc $INSTALL_PACKAGES && \ diff --git a/docker/Dockerfile-code-energy-analysis b/docker/Dockerfile-code-energy-analysis new file mode 100644 index 0000000..6aabf13 --- /dev/null +++ b/docker/Dockerfile-code-energy-analysis @@ -0,0 +1,21 @@ +FROM openjdk:8-jre + +ENV POWERAPI_PACKAGE powerapi-code-energy-analysis +ENV LIBPFM_PACKAGE libpfm-4.6.0 + +ENV INSTALL_PACKAGES wget make gcc +ENV RUNTIME_PACKAGES cgroup-tools + +COPY ${POWERAPI_PACKAGE}/target/universal/${POWERAPI_PACKAGE} /root/${POWERAPI_PACKAGE}/ + +VOLUME /conf /tmp + +RUN apt-get update && apt-get -y upgrade && apt-get -y install $INSTALL_PACKAGES $RUNTIME_PACKAGES && \ + wget http://downloads.sourceforge.net/project/perfmon2/libpfm4/${LIBPFM_PACKAGE}.tar.gz && tar -C /root -xzvf ${LIBPFM_PACKAGE}.tar.gz && (cd /root/$LIBPFM_PACKAGE; make lib; make install) && rm -rf /root/$LIBPFM_PACKAGE ${LIBPFM_PACKAGE}.tar.gz && \ + rm -rf /root/${POWERAPI_PACKAGE}/conf && ln -s /conf/ /root/${POWERAPI_PACKAGE}/conf && \ + apt-get -y remove --auto-remove $INSTALL_PACKAGES && \ + rm -rf /var/lib/apt/lists/* + +WORKDIR /root/$POWERAPI_PACKAGE + +ENTRYPOINT ["./bin/powerapi"] diff --git a/docker/agent/Dockerfile-agent b/docker/agent/Dockerfile-agent new file mode 100644 index 0000000..c1705d0 --- /dev/null +++ b/docker/agent/Dockerfile-agent @@ -0,0 +1,34 @@ +FROM ubuntu:14.04 + +ARG NB_CORES +ARG UNHALTED_CYCLES_EVT +ARG UNHALTED_REF_CYCLES_EVT + +ENV LIBPFM_PACKAGE libpfm-4.6.0 + +COPY docker/agent/Makefile /root/powerapi-agent/ +COPY docker/agent/payload.proto /root/powerapi-agent/ +COPY docker/agent/perf_util.c /root/powerapi-agent/ +COPY docker/agent/perf_util.h /root/powerapi-agent/ +COPY docker/agent/powerapi-agent.c /root/powerapi-agent/ +COPY docker/agent/run.sh /root + +VOLUME /tmp /apps + +ENV INSTALL_PACKAGES ca-certificates pkg-config wget make gcc autoconf automake libtool g++ +ENV RUNTIME_PACKAGES libdw-dev libunwind8-dev + +RUN apt-get update && apt-get -y upgrade && apt-get -y install $INSTALL_PACKAGES $RUNTIME_PACKAGES && \ + wget http://downloads.sourceforge.net/project/perfmon2/libpfm4/${LIBPFM_PACKAGE}.tar.gz && tar -C /root -xzvf ${LIBPFM_PACKAGE}.tar.gz && (cd /root/$LIBPFM_PACKAGE; make lib; make install) && rm -rf /root/$LIBPFM_PACKAGE ${LIBPFM_PACKAGE}.tar.gz && \ + wget https://github.com/google/protobuf/releases/download/v2.6.1/protobuf-2.6.1.tar.gz && tar -C /root -xzvf protobuf-2.6.1.tar.gz && (cd /root/protobuf-2.6.1; ./autogen.sh; ./configure; make; make install) && rm -rf /root/protobuf-2.6.1 protobuf-2.6.1.tar.gz && \ + ldconfig && \ + wget https://github.com/protobuf-c/protobuf-c/releases/download/v1.2.1/protobuf-c-1.2.1.tar.gz && tar -C /root -xzvf protobuf-c-1.2.1.tar.gz && (cd /root/protobuf-c-1.2.1; ./configure; make; make install) && rm -rf /root/protobuf-c-1.2.1 protobuf-c-1.2.1.tar.gz && \ + ldconfig && \ + (cd /root/powerapi-agent; make NB_CORES=${NB_CORES} UNHALTED_CYCLES_EVT=${UNHALTED_CYCLES_EVT} UNHALTED_REF_CYCLES_EVT=${UNHALTED_REF_CYCLES_EVT}) && \ + mv /root/powerapi-agent/powerapi-agent /root/powerapi-agent-app && rm -rf /root/powerapi-agent && mv /root/powerapi-agent-app /root/powerapi-agent && \ + apt-get -y remove --auto-remove $INSTALL_PACKAGES && \ + rm -rf /var/lib/apt/lists/* + +WORKDIR /root + +ENTRYPOINT ["./run.sh"] diff --git a/docker/agent/Makefile b/docker/agent/Makefile new file mode 100644 index 0000000..1be0bb7 --- /dev/null +++ b/docker/agent/Makefile @@ -0,0 +1,25 @@ +CC=cc +CFLAGS=-c -Wall -O0 +INCLUDE=-I/usr/local/include +LDFLAGS=-L/usr/local/lib -lpfm -lprotobuf-c -L/usr/lib -lunwind -lunwind-generic -lunwind-ptrace -ldw +SOURCES=payload.pb-c.c perf_util.c powerapi-agent.c +OBJECTS=$(SOURCES:.c=.o) +EXECUTABLE=powerapi-agent + +all: $(SOURCES) $(EXECUTABLE) + +$(EXECUTABLE): $(OBJECTS) + $(CC) $(OBJECTS) -o $@ $(LDFLAGS) + +payload.pb-c.c: payload.proto + protoc-c --c_out=. $< + +powerapi-agent.o: + $(CC) $(CFLAGS) -D NB_CORES=$(NB_CORES) -D UNHALTED_CYCLES_EVT=\"$(UNHALTED_CYCLES_EVT)\" -D UNHALTED_REF_CYCLES_EVT=\"$(UNHALTED_REF_CYCLES_EVT)\" $(INCLUDE) powerapi-agent.c -o $@ + +.c.o: + $(CC) $(CFLAGS) $(INCLUDE) $< -o $@ + +clean: + rm -f $(OBJECTS) $(EXECUTABLE) + rm -rf payload.pb-c.c payload.pb-c.h diff --git a/docker/agent/init.sh b/docker/agent/init.sh new file mode 100755 index 0000000..82cfba8 --- /dev/null +++ b/docker/agent/init.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +docker create -v /apps --name apps_binary alpine /bin/true 2>/dev/null + +if [ $? -ne 0 ]; then + echo "The docker volume for application binaries has already been created." +fi + +exit 0 diff --git a/docker/agent/payload.proto b/docker/agent/payload.proto new file mode 100644 index 0000000..9941b2b --- /dev/null +++ b/docker/agent/payload.proto @@ -0,0 +1,19 @@ +syntax = "proto2"; + +option java_package = "org.powerapi.module.libpfm"; +option java_outer_classname = "PayloadProtocol"; + +message MapEntry { + required string key=1; + required uint64 value=2; +} + +message Payload { + required uint32 core=1; + required uint32 pid=2; + required uint32 tid=3; + required uint64 timestamp=4; + repeated MapEntry counters=5; + repeated string traces=6; +} + diff --git a/docker/agent/perf_util.c b/docker/agent/perf_util.c new file mode 100644 index 0000000..2fb5d4a --- /dev/null +++ b/docker/agent/perf_util.c @@ -0,0 +1,773 @@ +/* + * perf_util.c - helper functions for perf_events + * + * Copyright (c) 2009 Google, Inc + * Contributed by Stephane Eranian + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, + * INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A + * PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF + * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include "perf_util.h" + +/* the **fd parameter must point to a null pointer on the first call + * max_fds and num_fds must both point to a zero value on the first call + * The return value is success (0) vs. failure (non-zero) + */ +int +perf_setup_argv_events(const char **argv, perf_event_desc_t **fds, int *num_fds) +{ + perf_event_desc_t *fd; + pfm_perf_encode_arg_t arg; + int new_max, ret, num, max_fds; + int group_leader; + + if (!(argv && fds && num_fds)) + return -1; + + fd = *fds; + if (fd) { + max_fds = fd[0].max_fds; + if (max_fds < 2) + return -1; + num = *num_fds; + } else { + max_fds = num = 0; /* bootstrap */ + } + group_leader = num; + + while(*argv) { + if (num == max_fds) { + if (max_fds == 0) + new_max = 2; + else + new_max = max_fds << 1; + + if (new_max < max_fds) { + warn("too many entries"); + goto error; + } + fd = realloc(fd, new_max * sizeof(*fd)); + if (!fd) { + warn("cannot allocate memory"); + goto error; + } + /* reset newly allocated chunk */ + memset(fd + max_fds, 0, (new_max - max_fds) * sizeof(*fd)); + max_fds = new_max; + + /* update max size */ + fd[0].max_fds = max_fds; + } + /* ABI compatibility, set before calling libpfm */ + fd[num].hw.size = sizeof(fd[num].hw); + + memset(&arg, 0, sizeof(arg)); + arg.attr = &fd[num].hw; + arg.fstr = &fd[num].fstr; /* fd[].fstr is NULL */ + + ret = pfm_get_os_event_encoding(*argv, PFM_PLM0|PFM_PLM3, PFM_OS_PERF_EVENT_EXT, &arg); + if (ret != PFM_SUCCESS) { + warnx("event %s: %s", *argv, pfm_strerror(ret)); + goto error; + } + + fd[num].name = strdup(*argv); + fd[num].group_leader = group_leader; + fd[num].idx = arg.idx; + fd[num].cpu = arg.cpu; + + num++; + argv++; + } + *num_fds = num; + *fds = fd; + return 0; +error: + perf_free_fds(fd, num); + return -1; +} + +int +perf_setup_list_events(const char *ev, perf_event_desc_t **fd, int *num_fds) +{ + const char **argv; + char *p, *q, *events; + int i, ret, num = 0; + + if (!(ev && fd && num_fds)) + return -1; + + events = strdup(ev); + if (!events) + return -1; + + q = events; + while((p = strchr(q, ','))) { + num++; + q = p + 1; + } + num++; + num++; /* terminator */ + + argv = malloc(num * sizeof(char *)); + if (!argv) { + free(events); + return -1; + } + + i = 0; q = events; + while((p = strchr(q, ','))) { + *p = '\0'; + argv[i++] = q; + q = p + 1; + } + argv[i++] = q; + argv[i] = NULL; + + ret = perf_setup_argv_events(argv, fd, num_fds); + free(argv); + free(events); /* strdup in perf_setup_argv_events() */ + return ret; +} + +void +perf_free_fds(perf_event_desc_t *fds, int num_fds) +{ + int i; + + for (i = 0 ; i < num_fds; i++) { + free(fds[i].name); + free(fds[i].fstr); + } + free(fds); +} + +int +perf_get_group_nevents(perf_event_desc_t *fds, int num, int idx) +{ + int leader; + int i; + + if (idx < 0 || idx >= num) + return 0; + + leader = fds[idx].group_leader; + + for (i = leader + 1; i < num; i++) { + if (fds[i].group_leader != leader) { + /* This is a new group leader, so the previous + * event was the final event of the preceding + * group. + */ + return i - leader; + } + } + return i - leader; +} + +int +perf_read_buffer(perf_event_desc_t *hw, void *buf, size_t sz) +{ + struct perf_event_mmap_page *hdr = hw->buf; + size_t pgmsk = hw->pgmsk; + void *data; + unsigned long tail; + size_t avail_sz, m, c; + + /* + * data points to beginning of buffer payload + */ + data = ((void *)hdr)+sysconf(_SC_PAGESIZE); + + /* + * position of tail within the buffer payload + */ + tail = hdr->data_tail & pgmsk; + + /* + * size of what is available + * + * data_head, data_tail never wrap around + */ + avail_sz = hdr->data_head - hdr->data_tail; + if (sz > avail_sz) + return -1; + + /* + * sz <= avail_sz, we can satisfy the request + */ + + /* + * c = size till end of buffer + * + * buffer payload size is necessarily + * a power of two, so we can do: + */ + c = pgmsk + 1 - tail; + + /* + * min with requested size + */ + m = c < sz ? c : sz; + + /* copy beginning */ + memcpy(buf, data+tail, m); + + /* + * copy wrapped around leftover + */ + if ((sz - m) > 0) + memcpy(buf+m, data, sz - m); + + //printf("\nhead=%lx tail=%lx new_tail=%lx sz=%zu\n", hdr->data_head, hdr->data_tail, hdr->data_tail+sz, sz); + hdr->data_tail += sz; + + return 0; +} + +void +perf_skip_buffer(perf_event_desc_t *hw, size_t sz) +{ + struct perf_event_mmap_page *hdr = hw->buf; + + if ((hdr->data_tail + sz) > hdr->data_head) + sz = hdr->data_head - hdr->data_tail; + + hdr->data_tail += sz; +} + +static size_t +__perf_handle_raw(perf_event_desc_t *hw) +{ + size_t sz = 0; + uint32_t raw_sz, i; + char *buf; + int ret; + + ret = perf_read_buffer_32(hw, &raw_sz); + if (ret) { + warnx("cannot read raw size"); + return -1; + } + + sz += sizeof(raw_sz); + + printf("\n\tRAWSZ:%u\n", raw_sz); + + buf = malloc(raw_sz); + if (!buf) { + warn("cannot allocate raw buffer"); + return -1; + } + + + ret = perf_read_buffer(hw, buf, raw_sz); + if (ret) { + warnx("cannot read raw data"); + free(buf); + return -1; + } + + if (raw_sz) + putchar('\t'); + + for(i=0; i < raw_sz; i++) { + printf("0x%02x ", buf[i] & 0xff ); + if (((i+1) % 16) == 0) + printf("\n\t"); + } + if (raw_sz) + putchar('\n'); + + free(buf); + + return sz + raw_sz; +} + +static int +perf_display_branch_stack(perf_event_desc_t *desc, FILE *fp) +{ + struct perf_branch_entry b; + uint64_t nr, n; + int ret; + + ret = perf_read_buffer(desc, &n, sizeof(n)); + if (ret) + errx(1, "cannot read branch stack nr"); + + fprintf(fp, "\n\tBRANCH_STACK:%"PRIu64"\n", n); + nr = n; + /* + * from most recent to least recent take branch + */ + while (nr--) { + ret = perf_read_buffer(desc, &b, sizeof(b)); + if (ret) + errx(1, "cannot read branch stack entry"); + + fprintf(fp, "\tFROM:0x%016"PRIx64" TO:0x%016"PRIx64" MISPRED:%c\n", + b.from, + b.to, + !(b.mispred || b.predicted) ? '-': + (b.mispred ? 'Y' :'N')); + } + return (int)(n * sizeof(b) + sizeof(n)); +} + +static int +perf_display_regs_user(perf_event_desc_t *hw, FILE *fp) +{ + return 0; +} + +static int +perf_display_stack_user(perf_event_desc_t *hw, FILE *fp) +{ + uint64_t nr; + char buf[512]; + size_t sz; + int ret; + + ret = perf_read_buffer(hw, &nr, sizeof(nr)); + if (ret) + errx(1, "cannot user stack size"); + + fprintf(fp, "USER_STACK: SZ:%"PRIu64"\n", nr); + + /* consume content */ + while (nr) { + sz = nr; + if (sz > sizeof(buf)) + sz = sizeof(buf); + + ret = perf_read_buffer(hw, buf, sz); + if (ret) + errx(1, "cannot user stack content"); + nr -= sz; + } + + return 0; +} + +int +perf_display_sample(perf_event_desc_t *fds, int num_fds, int idx, struct perf_event_header *ehdr, FILE *fp) +{ + perf_event_desc_t *hw; + struct { uint32_t pid, tid; } pid; + struct { uint64_t value, id; } grp; + uint64_t time_enabled, time_running; + size_t sz; + uint64_t type, fmt; + uint64_t val64; + const char *str; + int ret, e; + + if (!fds || !fp || !ehdr || num_fds < 0 || idx < 0 || idx >= num_fds) + return -1; + + sz = ehdr->size - sizeof(*ehdr); + + hw = fds+idx; + + type = hw->hw.sample_type; + fmt = hw->hw.read_format; + + /* + * the sample_type information is laid down + * based on the PERF_RECORD_SAMPLE format specified + * in the perf_event.h header file. + * That order is different from the enum perf_event_sample_format + */ + if (type & PERF_SAMPLE_IP) { + const char *xtra = " "; + ret = perf_read_buffer_64(hw, &val64); + if (ret) { + warnx("cannot read IP"); + return -1; + } + + /* + * MISC_EXACT_IP indicates that kernel is returning + * th IIP of an instruction which caused the event, i.e., + * no skid + */ + if (hw->hw.precise_ip && (ehdr->misc & PERF_RECORD_MISC_EXACT_IP)) + xtra = " (exact) "; + + fprintf(fp, "IIP:%#016"PRIx64"%s", val64, xtra); + sz -= sizeof(val64); + } + + if (type & PERF_SAMPLE_TID) { + ret = perf_read_buffer(hw, &pid, sizeof(pid)); + if (ret) { + warnx( "cannot read PID"); + return -1; + } + + fprintf(fp, "PID:%d TID:%d ", pid.pid, pid.tid); + sz -= sizeof(pid); + } + + if (type & PERF_SAMPLE_TIME) { + ret = perf_read_buffer_64(hw, &val64); + if (ret) { + warnx( "cannot read time"); + return -1; + } + + fprintf(fp, "TIME:%'"PRIu64" ", val64); + sz -= sizeof(val64); + } + + if (type & PERF_SAMPLE_ADDR) { + ret = perf_read_buffer_64(hw, &val64); + if (ret) { + warnx( "cannot read addr"); + return -1; + } + + fprintf(fp, "ADDR:%#016"PRIx64" ", val64); + sz -= sizeof(val64); + } + + if (type & PERF_SAMPLE_ID) { + ret = perf_read_buffer_64(hw, &val64); + if (ret) { + warnx( "cannot read id"); + return -1; + } + + fprintf(fp, "ID:%"PRIu64" ", val64); + sz -= sizeof(val64); + } + + if (type & PERF_SAMPLE_STREAM_ID) { + ret = perf_read_buffer_64(hw, &val64); + if (ret) { + warnx( "cannot read stream_id"); + return -1; + } + fprintf(fp, "STREAM_ID:%"PRIu64" ", val64); + sz -= sizeof(val64); + } + + if (type & PERF_SAMPLE_CPU) { + struct { uint32_t cpu, reserved; } cpu; + ret = perf_read_buffer(hw, &cpu, sizeof(cpu)); + if (ret) { + warnx( "cannot read cpu"); + return -1; + } + fprintf(fp, "CPU:%u ", cpu.cpu); + sz -= sizeof(cpu); + } + + if (type & PERF_SAMPLE_PERIOD) { + ret = perf_read_buffer_64(hw, &val64); + if (ret) { + warnx( "cannot read period"); + return -1; + } + fprintf(fp, "PERIOD:%'"PRIu64" ", val64); + sz -= sizeof(val64); + } + + /* struct read_format { + * { u64 value; + * { u64 time_enabled; } && PERF_FORMAT_ENABLED + * { u64 time_running; } && PERF_FORMAT_RUNNING + * { u64 id; } && PERF_FORMAT_ID + * } && !PERF_FORMAT_GROUP + * + * { u64 nr; + * { u64 time_enabled; } && PERF_FORMAT_ENABLED + * { u64 time_running; } && PERF_FORMAT_RUNNING + * { u64 value; + * { u64 id; } && PERF_FORMAT_ID + * } cntr[nr]; + * } && PERF_FORMAT_GROUP + * }; + */ + if (type & PERF_SAMPLE_READ) { + uint64_t values[3]; + uint64_t nr; + + if (fmt & PERF_FORMAT_GROUP) { + ret = perf_read_buffer_64(hw, &nr); + if (ret) { + warnx( "cannot read nr"); + return -1; + } + + sz -= sizeof(nr); + + time_enabled = time_running = 1; + + if (fmt & PERF_FORMAT_TOTAL_TIME_ENABLED) { + ret = perf_read_buffer_64(hw, &time_enabled); + if (ret) { + warnx( "cannot read timing info"); + return -1; + } + sz -= sizeof(time_enabled); + } + + if (fmt & PERF_FORMAT_TOTAL_TIME_RUNNING) { + ret = perf_read_buffer_64(hw, &time_running); + if (ret) { + warnx( "cannot read timing info"); + return -1; + } + sz -= sizeof(time_running); + } + + fprintf(fp, "ENA=%'"PRIu64" RUN=%'"PRIu64" NR=%"PRIu64"\n", time_enabled, time_running, nr); + + values[1] = time_enabled; + values[2] = time_running; + while(nr--) { + grp.id = -1; + ret = perf_read_buffer_64(hw, &grp.value); + if (ret) { + warnx( "cannot read group value"); + return -1; + } + sz -= sizeof(grp.value); + + if (fmt & PERF_FORMAT_ID) { + ret = perf_read_buffer_64(hw, &grp.id); + if (ret) { + warnx( "cannot read leader id"); + return -1; + } + sz -= sizeof(grp.id); + } + + e = perf_id2event(fds, num_fds, grp.id); + if (e == -1) + str = "unknown sample event"; + else + str = fds[e].name; + + values[0] = grp.value; + grp.value = perf_scale(values); + + fprintf(fp, "\t%'"PRIu64" %s (%"PRIu64"%s)\n", + grp.value, str, + grp.id, + time_running != time_enabled ? ", scaled":""); + + } + } else { + time_enabled = time_running = 0; + /* + * this program does not use FORMAT_GROUP when there is only one event + */ + ret = perf_read_buffer_64(hw, &val64); + if (ret) { + warnx( "cannot read value"); + return -1; + } + sz -= sizeof(val64); + + if (fmt & PERF_FORMAT_TOTAL_TIME_ENABLED) { + ret = perf_read_buffer_64(hw, &time_enabled); + if (ret) { + warnx( "cannot read timing info"); + return -1; + } + sz -= sizeof(time_enabled); + } + + if (fmt & PERF_FORMAT_TOTAL_TIME_RUNNING) { + ret = perf_read_buffer_64(hw, &time_running); + if (ret) { + warnx( "cannot read timing info"); + return -1; + } + sz -= sizeof(time_running); + } + if (fmt & PERF_FORMAT_ID) { + ret = perf_read_buffer_64(hw, &val64); + if (ret) { + warnx( "cannot read leader id"); + return -1; + } + sz -= sizeof(val64); + } + + fprintf(fp, "ENA=%'"PRIu64" RUN=%'"PRIu64"\n", time_enabled, time_running); + + values[0] = val64; + values[1] = time_enabled; + values[2] = time_running; + val64 = perf_scale(values); + + fprintf(fp, "\t%'"PRIu64" %s %s\n", + val64, fds[0].name, + time_running != time_enabled ? ", scaled":""); + } + } + + if (type & PERF_SAMPLE_CALLCHAIN) { + uint64_t nr, ip; + + ret = perf_read_buffer_64(hw, &nr); + if (ret) { + warnx( "cannot read callchain nr"); + return -1; + } + sz -= sizeof(nr); + + while(nr--) { + ret = perf_read_buffer_64(hw, &ip); + if (ret) { + warnx( "cannot read ip"); + return -1; + } + + sz -= sizeof(ip); + + fprintf(fp, "\t0x%"PRIx64"\n", ip); + } + } + + if (type & PERF_SAMPLE_RAW) { + ret = __perf_handle_raw(hw); + if (ret == -1) + return -1; + sz -= ret; + } + + if (type & PERF_SAMPLE_BRANCH_STACK) { + ret = perf_display_branch_stack(hw, fp); + sz -= ret; + } + + if (type & PERF_SAMPLE_REGS_USER) { + ret = perf_display_regs_user(hw, fp); + sz -= ret; + } + + if (type & PERF_SAMPLE_STACK_USER) { + ret = perf_display_stack_user(hw, fp); + sz -= ret; + } + + if (type & PERF_SAMPLE_WEIGHT) { + ret = perf_read_buffer_64(hw, &val64); + if (ret) { + warnx( "cannot read weight"); + return -1; + } + fprintf(fp, "WEIGHT:%'"PRIu64" ", val64); + sz -= sizeof(val64); + } + + if (type & PERF_SAMPLE_DATA_SRC) { + ret = perf_read_buffer_64(hw, &val64); + if (ret) { + warnx( "cannot read data src"); + return -1; + } + fprintf(fp, "DATA_SRC:%'"PRIu64" ", val64); + sz -= sizeof(val64); + } + + /* + * if we have some data left, it is because there is more + * than what we know about. In fact, it is more complicated + * because we may have the right size but wrong layout. But + * that's the best we can do. + */ + if (sz) { + warnx("did not correctly parse sample leftover=%zu", sz); + perf_skip_buffer(hw, sz); + } + + fputc('\n',fp); + return 0; +} + +uint64_t +display_lost(perf_event_desc_t *hw, perf_event_desc_t *fds, int num_fds, FILE *fp) +{ + struct { uint64_t id, lost; } lost; + const char *str; + int e, ret; + + ret = perf_read_buffer(hw, &lost, sizeof(lost)); + if (ret) { + warnx("cannot read lost info"); + return 0; + } + + e = perf_id2event(fds, num_fds, lost.id); + if (e == -1) + str = "unknown lost event"; + else + str = fds[e].name; + + fprintf(fp, "<<>>\n", + lost.lost, + str); + + return lost.lost; +} + +void +display_exit(perf_event_desc_t *hw, FILE *fp) +{ + struct { pid_t pid, ppid, tid, ptid; } grp; + int ret; + + ret = perf_read_buffer(hw, &grp, sizeof(grp)); + if (ret) { + warnx("cannot read exit info"); + return; + } + + fprintf(fp,"[%d] exited\n", grp.pid); +} + +void +display_freq(int mode, perf_event_desc_t *hw, FILE *fp) +{ + struct { uint64_t time, id, stream_id; } thr; + int ret; + + ret = perf_read_buffer(hw, &thr, sizeof(thr)); + if (ret) { + warnx("cannot read throttling info"); + return; + } + + fprintf(fp, "%s value=%"PRIu64" event ID=%"PRIu64"\n", + mode ? "Throttled" : "Unthrottled", + thr.id, + thr.stream_id); +} diff --git a/docker/agent/perf_util.h b/docker/agent/perf_util.h new file mode 100644 index 0000000..4571500 --- /dev/null +++ b/docker/agent/perf_util.h @@ -0,0 +1,163 @@ +/* + * perf_util.h - helper functions for perf_events + * + * Copyright (c) 2009 Google, Inc + * Contributed by Stephane Eranian + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, + * INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A + * PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF + * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef __PERF_UTIL_H__ +#define __PERF_UTIL_H__ + +#include +#include +#include +#include + +typedef struct { + struct perf_event_attr hw; + uint64_t values[3]; + uint64_t prev_values[3]; + char *name; + uint64_t id; /* event id kernel */ + void *buf; + size_t pgmsk; + int group_leader; + int fd; + int max_fds; + int idx; /* opaque libpfm event identifier */ + int cpu; /* cpu to program */ + char *fstr; /* fstr from library, must be freed */ +} perf_event_desc_t; + +/* handy shortcut */ +#define PERF_FORMAT_SCALE (PERF_FORMAT_TOTAL_TIME_ENABLED|PERF_FORMAT_TOTAL_TIME_RUNNING) + +extern int perf_setup_argv_events(const char **argv, perf_event_desc_t **fd, int *num_fds); +extern int perf_setup_list_events(const char *events, perf_event_desc_t **fd, int *num_fds); +extern int perf_read_buffer(perf_event_desc_t *hw, void *buf, size_t sz); +extern void perf_free_fds(perf_event_desc_t *fds, int num_fds); +extern void perf_skip_buffer(perf_event_desc_t *hw, size_t sz); + +static inline int +perf_read_buffer_32(perf_event_desc_t *hw, void *buf) +{ + return perf_read_buffer(hw, buf, sizeof(uint32_t)); +} + +static inline int +perf_read_buffer_64(perf_event_desc_t *hw, void *buf) +{ + return perf_read_buffer(hw, buf, sizeof(uint64_t)); +} + +/* + * values[0] = raw count + * values[1] = TIME_ENABLED + * values[2] = TIME_RUNNING + */ +static inline uint64_t +perf_scale(uint64_t *values) +{ + uint64_t res = 0; + + if (!values[2] && !values[1] && values[0]) + warnx("WARNING: time_running = 0 = time_enabled, raw count not zero\n"); + + if (values[2] > values[1]) + warnx("WARNING: time_running > time_enabled\n"); + + if (values[2]) + res = (uint64_t)((double)values[0] * values[1]/values[2]); + return res; +} + +static inline uint64_t +perf_scale_delta(uint64_t *values, uint64_t *prev_values) +{ + double pval[3], val[3]; + uint64_t res = 0; + + if (!values[2] && !values[1] && values[0]) + warnx("WARNING: time_running = 0 = time_enabled, raw count not zero\n"); + + if (values[2] > values[1]) + warnx("WARNING: time_running > time_enabled\n"); + + if (values[2] - prev_values[2]) { + /* covnert everything to double to avoid overflows! */ + pval[0] = prev_values[0]; + pval[1] = prev_values[1]; + pval[2] = prev_values[2]; + val[0] = values[0]; + val[1] = values[1]; + val[2] = values[2]; + res = (uint64_t)(((val[0] - pval[0]) * (val[1] - pval[1])/ (val[2] - pval[2]))); + } + return res; +} + + +/* + * TIME_RUNNING/TIME_ENABLED + */ +static inline double +perf_scale_ratio(uint64_t *values) +{ + if (!values[1]) + return 0.0; + + return values[2]*1.0/values[1]; +} + +static inline int +perf_fd2event(perf_event_desc_t *fds, int num_events, int fd) +{ + int i; + + for(i=0; i < num_events; i++) + if (fds[i].fd == fd) + return i; + return -1; +} + +/* + * id = PERF_FORMAT_ID + */ +static inline int +perf_id2event(perf_event_desc_t *fds, int num_events, uint64_t id) +{ + int j; + for(j=0; j < num_events; j++) + if (fds[j].id == id) + return j; + return -1; +} + +static inline int +perf_is_group_leader(perf_event_desc_t *fds, int idx) +{ + return fds[idx].group_leader == idx; +} + +extern int perf_get_group_nevents(perf_event_desc_t *fds, int num, int leader); +extern int perf_display_sample(perf_event_desc_t *fds, int num_fds, int idx, struct perf_event_header *ehdr, FILE *fp); +extern uint64_t display_lost(perf_event_desc_t *hw, perf_event_desc_t *fds, int num_fds, FILE *fp); +extern void display_exit(perf_event_desc_t *hw, FILE *fp); +extern void display_freq(int mode, perf_event_desc_t *hw, FILE *fp); +#endif diff --git a/docker/agent/powerapi-agent.c b/docker/agent/powerapi-agent.c new file mode 100644 index 0000000..6541464 --- /dev/null +++ b/docker/agent/powerapi-agent.c @@ -0,0 +1,798 @@ +#define _GNU_SOURCE 1 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "perf_util.h" + +#include +#include +#include +#include + +#include +#include +#include + +#include "payload.pb-c.h" + +#define MAX_THREADS 4096 + +typedef struct { + void* __internal; + int length; +} array; + +typedef struct { + int a; + int b; +} tuple; + +typedef struct { + array fds; + pid_t tid; +} perf_event_desc_t_ext ; + +typedef struct { + char *event; + uint64_t value; +} counter_field; + +typedef struct { + int fd; + int core; +} socket_t; + + +typedef struct { + pid_t tid; + unw_addr_space_t as; + struct UPT_info *ui; +} unw_addr_space_t_ext; + +char **events_str; +int nb_cores = NB_CORES; +int threshold = 0; +char software_cmd[256] = ""; + +Dwfl* dwfl; +size_t pgsz = 0; +static int buffer_pages = 8; +static array sockets = { .__internal = NULL, .length = 0 }; +static array events; +static array current_pids = { .__internal = NULL, .length = 0 }; +static array current_threads = { .__internal = NULL, .length = 0 }; +static array fds_desc = { .__internal = NULL, .length = 0 }; +static array addr_spaces = { .__internal = NULL, .length = 0 }; + +int sleep_step_ms = 250; + +static int cmp(const void * a, const void * b) { + return (*(int*)a - *(int*)b); +} + +void sleep_ms(int milliseconds) { + struct timespec ts; + ts.tv_sec = milliseconds / 1000; + ts.tv_nsec = (milliseconds % 1000) * 1000000; + nanosleep(&ts, NULL); +} + +static void add_unw_space(unw_addr_space_t_ext addr_space) { + addr_spaces.__internal = realloc(addr_spaces.__internal, (addr_spaces.length + 1) * sizeof(unw_addr_space_t_ext)); + ((unw_addr_space_t_ext*)addr_spaces.__internal)[addr_spaces.length] = addr_space; + addr_spaces.length += 1; +} + +static int tid2space(pid_t tid) { + int i = 0; + unw_addr_space_t_ext *addr_spaces_tmp = (unw_addr_space_t_ext*)addr_spaces.__internal; + + for (i = 0; i < addr_spaces.length; i++) { + if (addr_spaces_tmp[i].tid == tid) break; + } + + return i; +} + +static void del_unw_space(pid_t tid) { + unw_addr_space_t_ext *addr_spaces_tmp = (unw_addr_space_t_ext*)addr_spaces.__internal; + int i = tid2space(tid), j = 0; + unw_destroy_addr_space(addr_spaces_tmp[i].as); + _UPT_destroy(addr_spaces_tmp[i].ui); + + for (j = i; j < addr_spaces.length - 1; j++) { + addr_spaces_tmp[j] = addr_spaces_tmp[j + 1]; + } + + addr_spaces.__internal = realloc(addr_spaces.__internal, (addr_spaces.length - 1) * sizeof(unw_addr_space_t_ext)); + addr_spaces.length -= 1; +} + +/** + * Update all running pids for the underlying command. + */ +static void update_pids() { + FILE * in; + char buffer[512]; + char pgrep_cmd[512]; + int retry = 0; + + array pids = { .__internal = NULL, .length = 0 }; + snprintf(pgrep_cmd, sizeof(pgrep_cmd), "ps -C %s -o pid --no-header", software_cmd); + + while (pids.length == 0 && retry < 10) { + if ((in = popen(pgrep_cmd, "r"))) { + while (fgets(buffer, sizeof(buffer), in) != NULL) { + pids.__internal = realloc(pids.__internal, (pids.length + 1) * sizeof(pid_t)); + ((pid_t*)pids.__internal)[pids.length] = atol(buffer); + pids.length += 1; + } + + pclose(in); + + if (pids.length == 0) { + retry += 1; + sleep_ms(sleep_step_ms); + } + } + } + + if (pids.length > 0) { + if (current_pids.__internal != NULL) free(current_pids.__internal); + current_pids.__internal = malloc(sizeof(pids.__internal)); + memcpy((pid_t*)current_pids.__internal, (pid_t*)pids.__internal, pids.length * sizeof(pid_t)); + current_pids.length = pids.length; + free(pids.__internal); + } + + else { + current_pids.__internal = NULL; + current_pids.length = 0; + } +} + +/** + * Get the current timestamp in nanoseconds. + */ +static uint64_t current_timestamp_ns() { + struct timespec ts; + timespec_get(&ts, TIME_UTC); + return (uint64_t)(ts.tv_sec * 1000000000L + ts.tv_nsec); +} + +/** + * Clean all resources needed by perf and libpfm. + */ +static void clean_perf_resources(perf_event_desc_t* desc) { + close(desc->fd); + if (desc->group_leader) munmap(desc->buf, (buffer_pages + 1) * pgsz); + free(desc->name); + free(desc->fstr); +} + +/** + * Allow to dynamically add a fd structure to a global variable. + */ +static void add_fd_desc(perf_event_desc_t_ext desc_ext) { + fds_desc.__internal = realloc(fds_desc.__internal, (fds_desc.length + 1) * sizeof(perf_event_desc_t_ext)); + ((perf_event_desc_t_ext*)fds_desc.__internal)[fds_desc.length] = desc_ext; + fds_desc.length += 1; +} + +/** + * Allow to dynamically erase a fd structure from a global variable. + */ +static void del_fd_desc(pid_t tid) { + int i, j; + perf_event_desc_t_ext *fds_desc_tmp = (perf_event_desc_t_ext*)fds_desc.__internal; + + for (i = 0; i < fds_desc.length; i++) { + if (fds_desc_tmp[i].tid == tid) { + perf_event_desc_t *fds_tmp = (perf_event_desc_t*)fds_desc_tmp[i].fds.__internal; + + for (j = 0; j < fds_desc_tmp[i].fds.length; j++) { + clean_perf_resources(&fds_tmp[j]); + } + break; + } + } + + for (j = i; j < fds_desc.length - 1; j++) { + fds_desc_tmp[j] = fds_desc_tmp[j + 1]; + } + + fds_desc.__internal = realloc(fds_desc.__internal, (fds_desc.length - 1) * sizeof(perf_event_desc_t_ext)); + fds_desc.length -= 1; +} + +/** + * Get the corresponding indexes of a fd in order to retrieve a fd structure next. + */ +static tuple fd2event(int fd) { + perf_event_desc_t_ext *fds_desc_tmp = (perf_event_desc_t_ext*)fds_desc.__internal; + int i, j; + tuple ret = { .a = -1, .b = -1 }; + + for (i = 0; i < fds_desc.length; i++) { + perf_event_desc_t *fds_tmp = (perf_event_desc_t*)fds_desc_tmp[i].fds.__internal; + + for (j = 0; j < fds_desc_tmp[i].fds.length; j++) { + if(fds_tmp[j].fd == fd) { + ret.a = i; + ret.b = j; + break; + } + } + } + + return ret; +} + +/** + * Get the corresponding event associated to an unique id. + */ +static int id2event(array fds, int id) { + perf_event_desc_t *_fds = (perf_event_desc_t*)fds.__internal; + int i, n = fds.length; + + for (i = 0; i < n ; i++) { + if(_fds[i].id == id) return i; + } + + return -1; +} + +/** + * Open all counters for a tid (one counter per core/tid) and store the information inside a global variable. + */ +static array open_counters(pid_t tid) { + int ret, i, j; + uint64_t *val; + size_t sz; + char **_events = (char**)events.__internal; + char **all_events = calloc(1, nb_cores * events.length * sizeof(char*)); + array fds = { .__internal = NULL, .length = 0 }; + for(i = 0; i < nb_cores * events.length; i++) { + all_events[i] = calloc(1, strlen(_events[i % events.length]) + 1); + all_events[i][0] = '\0'; + strncat(all_events[i], _events[i % events.length], strlen(_events[i % events.length])); + } + + ret = perf_setup_argv_events((const char **)all_events, ((perf_event_desc_t**)&(fds.__internal)), &fds.length); + if (ret || (fds.length == 0)) exit(1); + + perf_event_desc_t *_fds = (perf_event_desc_t*)fds.__internal; + + for(i = 0; i < nb_cores; i++) { + _fds[i * events.length].fd = -1; + + for (j = 0; j < events.length; j++) { + _fds[i * events.length + j].hw.disabled = !j; + _fds[i * events.length + j].cpu = i; + + if (!j) { + _fds[i * events.length + j].group_leader = 1; + _fds[i * events.length + j].hw.wakeup_events = 1; + _fds[i * events.length + j].hw.sample_type = PERF_SAMPLE_IP|PERF_SAMPLE_READ|PERF_SAMPLE_PERIOD|PERF_SAMPLE_CPU|PERF_SAMPLE_TID; + _fds[i * events.length + j].hw.sample_period = threshold; + _fds[i * events.length + j].hw.read_format = PERF_FORMAT_GROUP|PERF_FORMAT_ID; + } + + _fds[i * events.length + j].fd = perf_event_open(&_fds[i * events.length + j].hw, tid, i, _fds[i * events.length].fd, 0); + if (_fds[i * events.length + j].fd == -1) errx(1, "cannot attach event %s", _fds[i * events.length + j].name); + } + + sz = (1 + 2 * events.length) * sizeof(uint64_t); + val = malloc(sz); + if (!val) errx(1, "cannot allocated memory"); + + if (_fds[i * events.length].fd == -1) errx(1, "cannot create event 0"); + ret = read(_fds[i * events.length].fd, val, sz); + if (ret == -1) errx(1, "cannot read id %zu", sizeof(val)); + + for (j = 0; j < events.length; j++) { + _fds[i * events.length + j].id = val[2 * (j + 1)]; + //printf("%"PRIu64" %s\n", _fds[i * events.length + j].id, _fds[i * events.length + j].name); + } + + //printf("\n\n"); + + _fds[i * events.length].buf = mmap(NULL, (buffer_pages + 1) * pgsz, PROT_READ|PROT_WRITE, MAP_SHARED, _fds[i * events.length].fd, 0); + if (_fds[i * events.length].buf == MAP_FAILED) err(1, "cannot mmap buffer"); + + _fds[i * events.length].pgmsk = (buffer_pages * pgsz) - 1; + + ret = fcntl(_fds[i * events.length].fd, F_SETFL, fcntl(_fds[i * events.length].fd, F_GETFL, 0) | O_ASYNC); + if (ret == -1) errx(1, "cannot set ASYNC"); + + ret = fcntl(_fds[i * events.length].fd, F_SETSIG, SIGIO); + if (ret == -1) err(1, "cannot setsig"); + + ret = fcntl(_fds[i * events.length].fd, F_SETOWN, getpid()); + if (ret == -1) err(1, "cannot setown"); + + ret = ioctl(_fds[i * events.length].fd, PERF_EVENT_IOC_REFRESH, PERF_IOC_FLAG_GROUP); + if (ret == -1) err(1, "cannot refresh"); + + ret = ioctl(_fds[i * events.length].fd, PERF_EVENT_IOC_RESET, PERF_IOC_FLAG_GROUP); + if (ret == -1) err(1, "cannot refresh"); + + free(val); + } + + for(i = 0; i < nb_cores * events.length; i++) { + free(all_events[i]); + } + free(all_events); + + return fds; +} + +/** + * Utility function to get the difference between two arrays. + */ +static array get_diff_arrays(array array1, array array2) { + int diff[256]; + int *arr1 = (int*)array1.__internal, *arr2 = (int*)array2.__internal; + int n1 = array1.length, n2 = array2.length; + int i = 0, j = 0, k = 0; + array ret; + + while (i < n1 && j < n2) { + if(arr1[i] == arr2[j]) { + i++; + j++; + } + else if(arr1[i] < arr2[j]) { + diff[k] = arr1[i]; + i++; + k++; + } + else if(arr1[i] > arr2[j]) j++; + } + + while (i < n1) { + diff[k] = arr1[i]; + i++; + k++; + } + + ret.__internal = malloc(sizeof(diff)); + memcpy(ret.__internal, diff, sizeof(diff)); + ret.length = k; + + return ret; +} + +/** + * Update all running threads, clean local and external data when a thread does not exist anymore, open all counters and update data for new threads otherwise. + */ +static void update_threads() { + array threads = { .__internal = NULL, .length = 0 }; + int i; + + update_pids(); + + for (i = 0; i < current_pids.length; i++) { + char dirname[64]; + DIR *dir; + struct dirent *entry; + int value = -1; + char dummy; + pid_t pid = ((pid_t*)current_pids.__internal)[i]; + + snprintf(dirname, sizeof dirname, "/proc/%ld/task/", (long)pid); + dir = opendir(dirname); + + if (!dir) errx(1, "pid %i does not exist: %s", pid, strerror(errno)); + + while ((entry = readdir(dir)) != NULL) { + value = -1; + if (sscanf(entry->d_name, "%d%c", &value, &dummy) != 1) continue; + threads.__internal = realloc(threads.__internal, (threads.length + 1) * sizeof(pid_t)); + ((pid_t*)threads.__internal)[threads.length] = (pid_t)value; + threads.length += 1; + } + + if (dir) closedir(dir); + } + + if (threads.length > 0) { + qsort(threads.__internal, threads.length, sizeof(pid_t), cmp); + array old_threads = get_diff_arrays(current_threads, threads); + array new_threads = get_diff_arrays(threads, current_threads); + + for (i = 0; i < new_threads.length; i++) { + perf_event_desc_t_ext desc; + array fds = open_counters(((pid_t*)new_threads.__internal)[i]); + desc.tid = ((pid_t*)new_threads.__internal)[i]; + desc.fds = fds; + add_fd_desc(desc); + unw_addr_space_t_ext addr_space; + addr_space.tid = ((pid_t*)new_threads.__internal)[i]; + addr_space.ui = _UPT_create(((pid_t*)new_threads.__internal)[i]); + addr_space.as = unw_create_addr_space(&_UPT_accessors, 0); + add_unw_space(addr_space); + } + + for (i = 0; i < old_threads.length; i++) { + del_fd_desc(((pid_t*)old_threads.__internal)[i]); + del_unw_space(((pid_t*)old_threads.__internal)[i]); + Payload payload = PAYLOAD__INIT; + void *buf; + unsigned len; + payload.core = (uint32_t) 0; + payload.pid = (uint32_t) 0; + payload.tid = (uint32_t) ((pid_t*)old_threads.__internal)[i]; + payload.timestamp = (uint64_t) 0; + + len = payload__get_packed_size(&payload); + buf = malloc(len); + payload__pack(&payload, buf); + + int32_t sz = htonl((uint32_t)len); + send(((socket_t*)sockets.__internal)[0].fd, &sz, sizeof(uint32_t), 0); + send(((socket_t*)sockets.__internal)[0].fd, buf, len, 0); + + free(buf); + } + + if (current_threads.__internal != NULL) free(current_threads.__internal); + current_threads.__internal = malloc(sizeof(threads.__internal)); + memcpy((pid_t*)current_threads.__internal, (pid_t*)threads.__internal, threads.length * sizeof(pid_t)); + current_threads.length = threads.length; + free(threads.__internal); + if (old_threads.__internal != NULL) free(old_threads.__internal); + if (new_threads.__internal != NULL) free(new_threads.__internal); + } + + else { + if (current_threads.__internal != NULL) free(current_threads.__internal); + current_threads.length = 0; + } +} + +/** + * Use libdwarf to retrieve the function name pointed by a given adress (instruction pointer). + */ +static const char* ip_to_function_name(const void* ip, pid_t tid) { + Dwarf_Addr addr; + Dwfl_Module* module; + const char* function_name; + if(dwfl_linux_proc_report(dwfl, tid)) errx(1, "dwfl_linux_proc_report failed"); + + addr = (uintptr_t)ip; + module = dwfl_addrmodule(dwfl, addr); + function_name = dwfl_module_addrname(module, addr); + + if (dwfl_report_end(dwfl, NULL, NULL)) errx(1, "dwfl_report_end failed"); + + return function_name; +} + +/** + * Callback executed when an interruption is launched. + * This callback get informations about hardware counters with perf/libpfm and use ptrace for remote stack unwinding. + * It also uses protobluff to send data over unix domain socket. + */ +static void interrupt_handler(int n, siginfo_t *info, void *context) { + uint64_t timestamp = current_timestamp_ns(); + struct perf_event_header ehdr; + unw_cursor_t cursor; + int ret, evt_id, length = 0; + tuple indexes; + perf_event_desc_t* hw; + uint64_t nr, val64; + uint32_t cpu = 0, pid = 0, tid = 0, val32 = 0; + struct { uint64_t value, id; } grp; + const char *event; + counter_field fields[events.length]; + + ret = ioctl(info->si_fd, PERF_EVENT_IOC_DISABLE, PERF_IOC_FLAG_GROUP); + if (ret == -1) goto reset_fd; // errx(1, "cannot disabled"); + + if (info->si_code < 0) goto reset_fd; //errx(1, "signal not generated by kernel"); + if (info->si_code != POLL_HUP) goto reset_fd; //errx(1, "signal not generated by SIGIO"); + + indexes = fd2event(info->si_fd); + if (indexes.a == -1 || indexes.b == -1) goto reset_fd;// errx(1, "no event associated with fd=%d", info->si_fd); + + array fds = ((perf_event_desc_t_ext*)fds_desc.__internal)[indexes.a].fds; + perf_event_desc_t *_fds = (perf_event_desc_t*)fds.__internal; + + ret = perf_read_buffer(_fds + indexes.b, &ehdr, sizeof(ehdr)); + if (ret) goto reset_fd; //errx(1, "cannot read event header"); + + if (ehdr.type != PERF_RECORD_SAMPLE) { + warnx("unexpected sample type=%d, skipping\n", ehdr.type); + perf_skip_buffer(_fds + indexes.b, ehdr.size); + goto reset_fd; + } + + hw = _fds + indexes.b; + + // PERF_SAMPLE_IP + ret = perf_read_buffer_64(hw, &val64); + if (ret) warnx("cannot read IP"); + + // PERF_SAMPLE_TID + ret = perf_read_buffer_32(hw, &pid); + if (ret) warnx("cannot read PID"); + ret = perf_read_buffer_32(hw, &tid); + if (ret) warnx("cannot read TID"); + + ptrace(PTRACE_ATTACH, tid, 0, 0); + wait(NULL); + + // PERF_SAMPLE_CPU + ret = perf_read_buffer_32(hw, &cpu); + if (ret) warnx("cannot read CPU"); + ret = perf_read_buffer_32(hw, &val32); + if (ret) warnx("cannot read reserved field CPU"); + + // PERF_SAMPLE_PERIOD + ret = perf_read_buffer_64(hw, &val64); + if (ret) warnx("cannot read PERIOD"); + + // PERF_SAMPLE_READ + ret = perf_read_buffer_64(hw, &nr); + if (ret) warnx("cannot read nr"); + + while(nr--) { + grp.id = -1; + + ret = perf_read_buffer_64(hw, &grp.value); + if (ret) {warnx("cannot read group value"); } + + ret = perf_read_buffer_64(hw, &grp.id); + if (ret) warnx("cannot read leader id"); + + evt_id = id2event(fds, grp.id); + if (evt_id == -1) event = "unknown"; + else event = _fds[evt_id].name; + + fields[nr].event = (char*)event; + fields[nr].value = grp.value; + + //printf("\tPID: %"PRIu32", TID: %"PRIu32", CPU: %"PRIu32", TIMESTAMP: %"PRIu64"; %"PRIu64" %s (group: %"PRIu64")\n", pid, tid, cpu, timestamp, grp.value, event, grp.id); + } + + //printf("\n\n"); + + unw_addr_space_t_ext addr_space = ((unw_addr_space_t_ext*)(addr_spaces.__internal))[tid2space(tid)]; + ret = unw_init_remote(&cursor, addr_space.as, addr_space.ui); + if (ret) goto detach_tid; + + char *strings[256]; + + while(unw_step(&cursor) > 0) { + unw_word_t ip; + unw_get_reg(&cursor, UNW_REG_IP, &ip); + if (ip == 0) break; + char *name = (char*)ip_to_function_name((void*)ip, tid); + // FIX: it sometimes appears that the name is set to the null character when an address is not reachable (why?) + if (name == '\0') break; + strings[length] = malloc((strlen(name) + 1) * sizeof(char)); + strings[length][0] = '\0'; + strncat(strings[length], name, strlen(name)); + length += 1; + if (strcmp(name, "main") == 0) break; + } + + if (length > 0) { + int i = 0; + Payload payload = PAYLOAD__INIT; + void *buf; + unsigned len; + payload.core = (uint32_t) cpu; + payload.pid = (uint32_t) pid; + payload.tid = (uint32_t) tid; + payload.timestamp = (uint64_t) timestamp; + payload.n_counters = events.length; + payload.n_traces = length; + + payload.counters = malloc(payload.n_counters * sizeof(MapEntry)); + payload.traces = malloc(payload.n_traces * sizeof(char*)); + + for (i = 0; i < payload.n_counters; i++) { + payload.counters[i] = malloc(sizeof(MapEntry)); + map_entry__init(payload.counters[i]); + payload.counters[i]->key = fields[i].event; + payload.counters[i]->value = (uint64_t)fields[i].value; + } + + for (i = 0; i < payload.n_traces; i++) { + payload.traces[i] = strings[i]; + } + + len = payload__get_packed_size(&payload); + buf = malloc(len); + payload__pack(&payload, buf); + + int32_t sz = htonl((uint32_t)len); + send(((socket_t*)sockets.__internal)[cpu].fd, &sz, sizeof(uint32_t), 0); + send(((socket_t*)sockets.__internal)[cpu].fd, buf, len, 0); + + free(buf); + for (i = 0; i < length ; i++) free(strings[i]); + } + +detach_tid: + ptrace(PTRACE_DETACH, tid, 0, 0); +reset_fd: + ret = ioctl(info->si_fd, PERF_EVENT_IOC_REFRESH, PERF_IOC_FLAG_GROUP); + if (ret == -1) err(1, "cannot refresh"); + ret = ioctl(info->si_fd, PERF_EVENT_IOC_RESET, PERF_IOC_FLAG_GROUP); + if (ret == -1) errx(1, "cannot reset"); + ret = ioctl(info->si_fd, PERF_EVENT_IOC_ENABLE, PERF_IOC_FLAG_GROUP); + if (ret == -1) errx(1, "cannot enable"); +} + +/** + * Main code of the PowerAPI's agent. + * This agent is responsible to set the interruption mode on an external program and to get detailed information when such interruption occurs. + * The stack trace and the associated hardware counters are retrieved and send to a PowerAPI which is running as a daemon. + * One Unix server socket (a control socket) is created by PowerAPI for handling softwares and connexion, and server sockets are created per core by the Agent to send interruption data to PowerAPI. + * + * argv[1] => threshold + * argv[2] => software's label + * argv[3] => software's cmd used as a direct pgrep parameter. + */ +int main(int argc, char **argv) { + struct sigaction act; + sigset_t new, old; + char *message; + int ret, i, j; + struct sockaddr_un addr; + int socket_servers[nb_cores]; + int control_socket; + socket_t _sockets[nb_cores]; + char software_label[256] = ""; + + pgsz = sysconf(_SC_PAGESIZE); + events_str = malloc(2 * sizeof(char*)); + events_str[0] = strdup(UNHALTED_CYCLES_EVT); + events_str[1] = strdup(UNHALTED_REF_CYCLES_EVT); + events.__internal = events_str; + events.length = 2; + + threshold = atol(argv[1]); + strcpy(software_label, argv[2]); + strcpy(software_cmd, argv[3]); + + char *debuginfo_path = NULL; + + Dwfl_Callbacks callbacks = { + .find_elf=dwfl_linux_proc_find_elf, + .find_debuginfo=dwfl_standard_find_debuginfo, + .debuginfo_path=&debuginfo_path, + }; + + dwfl = dwfl_begin(&callbacks); + + for (i = 0; i < nb_cores; i++) { + char socket_path[256]; + snprintf(socket_path, sizeof(socket_path), "/tmp/agent-%d-%s.sock", i, software_label); + socket_servers[i] = socket(AF_UNIX, SOCK_STREAM, 0); + if (socket_servers[i] == -1) errx(1, "Socket error %s", socket_path); + + memset(&addr, 0, sizeof(addr)); + addr.sun_family = AF_UNIX; + strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path) - 1); + + unlink(socket_path); + + ret = bind(socket_servers[i], (struct sockaddr*)&addr, sizeof(addr)); + if (ret < 0) errx(1, "Bind error"); + + ret = listen(socket_servers[i], 1); + if (ret < 0) errx(1, "Listen error"); + } + + sockets.__internal = _sockets; + sockets.length = nb_cores; + + control_socket = socket(AF_UNIX, SOCK_STREAM, 0); + memset(&addr, 0, sizeof(addr)); + addr.sun_family = AF_UNIX; + strncpy(addr.sun_path, "/tmp/agent-control.sock", sizeof(addr.sun_path) - 1); + if (connect(control_socket, (struct sockaddr*)&addr, sizeof(addr)) == -1) errx(1, "Connect error"); + message = (char*)malloc(128 * sizeof(char)); + snprintf(message, 128, "%s\n", software_label); + send(control_socket, message, strlen(message), 0); + free(message); + message = (char*)malloc(128 * sizeof(char)); + snprintf(message, 128, "%s\n", software_cmd); + send(control_socket, message, strlen(message), 0); + free(message); + + for (i = 0; i < nb_cores; i++) { + socket_t sock; + sock.fd = accept(socket_servers[i], NULL, NULL); + if (sock.fd < 0) errx(1, "Accept error"); + sock.core = i; + _sockets[i] = sock; + } + + ret = pfm_initialize(); + if (ret != PFM_SUCCESS) errx(1, "Cannot initialize library: %s", pfm_strerror(ret)); + + memset(&act, 0, sizeof(act)); + act.sa_sigaction = interrupt_handler; + act.sa_flags = SA_SIGINFO; + sigaction(SIGIO, &act, 0); + + sigemptyset(&old); + sigemptyset(&new); + sigaddset(&new, SIGIO); + + ret = sigprocmask(SIG_SETMASK, NULL, &old); + if (ret) errx(1, "sigprocmask failed"); + + if (sigismember(&old, SIGIO)) { + warnx("program started with masked signal, unmasking it now\n"); + ret = sigprocmask(SIG_UNBLOCK, &new, NULL); + if (ret) errx(1, "sigprocmask failed"); + } + + update_threads(); + + while (current_threads.length > 0) { + sleep_ms(sleep_step_ms); + update_threads(); + } + + message = (char*)malloc(128 * sizeof(char)); + snprintf(message, 128, "END\n"); + send(control_socket, message, strlen(message), 0); + free(message); + + free(dwfl); + + perf_event_desc_t_ext *fds_desc_tmp = (perf_event_desc_t_ext*)fds_desc.__internal; + for (i = 0; i < fds_desc.length; i++) { + perf_event_desc_t *fds_tmp = (perf_event_desc_t*)fds_desc_tmp[i].fds.__internal; + + for (j = 0; j < fds_desc.length; j++) { + clean_perf_resources(&fds_tmp[j]); + } + } + + if (fds_desc.__internal != NULL) free(fds_desc.__internal); + if (addr_spaces.__internal != NULL) free(addr_spaces.__internal); + if (current_pids.__internal != NULL) free(current_pids.__internal); + if (current_threads.__internal != NULL) free(current_threads.__internal); + + for (i = 0; i < sockets.length; i++) { + close(((socket_t*)sockets.__internal)[i].fd); + close(socket_servers[i]); + char socket_path[256]; + snprintf(socket_path, sizeof(socket_path), "/tmp/agent-%d-%s.sock", i, software_label); + unlink(socket_path); + } + close(control_socket); + + for (i = 0; i < events.length; i++) { + free(((char**)events.__internal)[i]); + } + free(events.__internal); + + pfm_terminate(); + + return 0; +} + diff --git a/docker/agent/run.sh b/docker/agent/run.sh new file mode 100755 index 0000000..68a985a --- /dev/null +++ b/docker/agent/run.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +( kill -SIGSTOP $BASHPID; exec "$4" "${@: 5}" ) & +app_pid=$! +./powerapi-agent $1 $2 $3 & +agent_pid=$! +kill -SIGCONT $app_pid +wait $agent_pid + +exit 0 \ No newline at end of file diff --git a/docker/sampling/Dockerfile-sampling b/docker/sampling/Dockerfile-sampling index f38c0f3..2f8758a 100644 --- a/docker/sampling/Dockerfile-sampling +++ b/docker/sampling/Dockerfile-sampling @@ -1,10 +1,10 @@ -FROM alpine:latest +FROM alpine:3.3 ENV POWERAPI_PACKAGE powerapi-sampling ENV LIBPFM_PACKAGE libpfm-4.6.0 ENV INSTALL_PACKAGES ca-certificates linux-headers libc-dev make gcc patch -ENV RUNTIME_PACKAGES bluez bluez-dev openjdk7-jre bash util-linux cpulimit procps +ENV RUNTIME_PACKAGES bluez bluez-dev openjdk8-jre bash util-linux cpulimit procps COPY docker/libpfm/config.mk.patch /root/ COPY docker/sampling/run.sh /root/ @@ -13,7 +13,7 @@ COPY ${POWERAPI_PACKAGE}/target/universal/${POWERAPI_PACKAGE}/ /root/${POWERAPI_ VOLUME /conf RUN apk update && apk upgrade && apk add $INSTALL_PACKAGES $RUNTIME_PACKAGES && \ - wget https://circle-artifacts.com/gh/andyshinn/alpine-pkg-glibc/6/artifacts/0/home/ubuntu/alpine-pkg-glibc/packages/x86_64/glibc-2.21-r2.apk && apk --allow-untrusted add glibc-2.21-r2.apk && rm -f glibc-2.21-r2.apk && \ + wget https://circle-artifacts.com/gh/sgerrand/alpine-pkg-glibc/6/artifacts/0/home/ubuntu/alpine-pkg-glibc/packages/x86_64/glibc-2.21-r2.apk && apk --allow-untrusted add glibc-2.21-r2.apk && rm -f glibc-2.21-r2.apk && \ wget http://downloads.sourceforge.net/project/perfmon2/libpfm4/${LIBPFM_PACKAGE}.tar.gz && tar -C /root -xzvf ${LIBPFM_PACKAGE}.tar.gz && patch -d /root/$LIBPFM_PACKAGE -p1 < /root/config.mk.patch && (cd /root/$LIBPFM_PACKAGE; make lib; make install) && rm -rf /root/config.mk.patch /root/$LIBPFM_PACKAGE ${LIBPFM_PACKAGE}.tar.gz && \ wget http://people.seas.harvard.edu/~apw/stress/stress-1.0.4.tar.gz && tar -C /root -xzvf stress-1.0.4.tar.gz && (cd /root/stress-1.0.4; ./configure; make; make install) && rm -rf /root/stress-1.0.4 && \ rm -rf /${POWERAPI_PACKAGE}/conf && ln -s /conf/ /root/${POWERAPI_PACKAGE}/conf && \ diff --git a/mwg-plugin/build.sbt b/mwg-plugin/build.sbt new file mode 100644 index 0000000..5cf8f39 --- /dev/null +++ b/mwg-plugin/build.sbt @@ -0,0 +1,27 @@ +name := "mwg-plugin" + +organization := "org.powerapi" + +/*resolvers += Resolver.mavenLocal + +libraryDependencies ++= Seq( + "org.kevoree.mwg" % "api" % "7" changing(), + "org.kevoree.mwg" % "core" % "7" changing(), + "org.kevoree.mwg.plugins" % "ml" % "7" changing() + +)*/ + +//it should be a jar-with-dependencies +unmanagedJars in Compile ++= { + baseDirectory.value + val m2 = Path.userHome / ".m2/repository" + val baseDirectories = (m2 / "org/kevoree/mwg/core/7-SNAPSHOT") +++ (m2 / "org/kevoree/mwg/api/7-SNAPSHOT") +++ (m2 / "org/kevoree/mwg/plugins/ml/7-SNAPSHOT") + val customJars = (baseDirectories ** "*.jar") + customJars.classpath +} + +licenses := Seq("AGPL-3.0" -> url("http://www.gnu.org/licenses/agpl-3.0.txt")) + +publishMavenStyle := true + +crossPaths := false \ No newline at end of file diff --git a/mwg-plugin/src/main/java/org/powerapi/reporter/mwg/plugins/NodeAggregatorPlugin.java b/mwg-plugin/src/main/java/org/powerapi/reporter/mwg/plugins/NodeAggregatorPlugin.java new file mode 100644 index 0000000..c4a761f --- /dev/null +++ b/mwg-plugin/src/main/java/org/powerapi/reporter/mwg/plugins/NodeAggregatorPlugin.java @@ -0,0 +1,18 @@ +package org.powerapi.reporter.mwg.plugins; + +import org.mwg.Graph; +import org.mwg.Node; +import org.mwg.plugin.AbstractPlugin; +import org.mwg.plugin.NodeFactory; + +public class NodeAggregatorPlugin extends AbstractPlugin { + + public NodeAggregatorPlugin() { + declareNodeType(PolynomialAggregatorNode.NAME, new NodeFactory() { + @Override + public Node create(long world, long time, long id, Graph graph) { + return new PolynomialAggregatorNode(world,time,id,graph); + } + }); + } +} diff --git a/mwg-plugin/src/main/java/org/powerapi/reporter/mwg/plugins/PolynomialAggregatorNode.java b/mwg-plugin/src/main/java/org/powerapi/reporter/mwg/plugins/PolynomialAggregatorNode.java new file mode 100644 index 0000000..6c0f324 --- /dev/null +++ b/mwg-plugin/src/main/java/org/powerapi/reporter/mwg/plugins/PolynomialAggregatorNode.java @@ -0,0 +1,57 @@ +package org.powerapi.reporter.mwg.plugins; + +import org.mwg.Callback; +import org.mwg.Graph; +import org.mwg.Node; +import org.mwg.core.utility.CoreDeferCounterSync; +import org.mwg.ml.algorithm.regression.PolynomialNode; +import org.mwg.plugin.AbstractNode; + +public class PolynomialAggregatorNode extends AbstractNode { + + public static final String ATT_VALUE = "value"; + public static final String REL_CHILD = "child"; + + public static final String NAME = "PolynomialAggregatorNodeTest"; + + public PolynomialAggregatorNode(long p_world, long p_time, long p_id, Graph p_graph) { + super(p_world, p_time, p_id, p_graph); + } + + @Override + public void setProperty(String propertyName, byte propertyType, Object propertyValue) { + throw new RuntimeException("PolynomialAggregatorNode node has no settable value. Please read the documentation about " + + "how it works"); + } + + @Override + public Object get(String propertyName) { + if(propertyName.equals(ATT_VALUE)) { + final CoreDeferCounterSync counter = new CoreDeferCounterSync(1); + final Callback callback = counter.wrap(); + this.rel(REL_CHILD, new Callback() { + @Override + public void on(Node[] result) { + //todo should be optimize + double sum = 0; + for (int i = 0; i < result.length; i++) { + sum = sum + (Double) result[i].get(PolynomialNode.VALUE); + } + callback.on(sum); + counter.count(); + } + }); + + return counter.waitResult(); + } + + throw new RuntimeException("PolynomialAggregatorNode node has one reachable property: " + ATT_VALUE + +". Please read the documentation about how it works"); + } + + + + + + +} diff --git a/mwg-server/build.sbt b/mwg-server/build.sbt new file mode 100644 index 0000000..d40538f --- /dev/null +++ b/mwg-server/build.sbt @@ -0,0 +1,27 @@ +name := "mwg-server" + +organization := "org.powerapi" + +/*resolvers += Resolver.mavenLocal + +libraryDependencies ++= Seq( + "org.kevoree.mwg" % "api" % "7" changing(), + "org.kevoree.mwg" % "core" % "7" changing(), + "org.kevoree.mwg.plugins" % "ml" % "7" changing() + +)*/ + +//it should be a jar-with-dependencies +unmanagedJars in Compile ++= { + baseDirectory.value + val m2 = Path.userHome / ".m2/repository" + val baseDirectories = (m2 / "org/kevoree/mwg/core/7-SNAPSHOT") +++ (m2 / "org/kevoree/mwg/api/7-SNAPSHOT") +++ (m2 / "org/kevoree/mwg/plugins/websocket/7-SNAPSHOT") +++ (m2 / "org/kevoree/mwg/plugins/leveldb/7-SNAPSHOT") + val customJars = (baseDirectories ** "*.jar") + customJars.classpath +} + +licenses := Seq("AGPL-3.0" -> url("http://www.gnu.org/licenses/agpl-3.0.txt")) + +publishMavenStyle := true + +crossPaths := false \ No newline at end of file diff --git a/mwg-server/src/main/java/MwgGraphServer.java b/mwg-server/src/main/java/MwgGraphServer.java new file mode 100644 index 0000000..2e5d9bf --- /dev/null +++ b/mwg-server/src/main/java/MwgGraphServer.java @@ -0,0 +1,36 @@ +import org.mwg.Callback; +import org.mwg.Graph; +import org.mwg.GraphBuilder; +import org.mwg.WSServer; + +/** + * Created by ludovicmouline on 14/09/16. + */ +public class MwgGraphServer { + + public static final String DB_PATH = "mwgDB"; + + public static void main(String[] args) { + final Graph graph = new GraphBuilder() + .withStorage(new org.mwg.LevelDBStorage(DB_PATH)) + .withMemorySize(1000) + .build(); + + final WSServer server = new WSServer(graph,9876); + + graph.connect(new Callback() { + @Override + public void on(Boolean aBoolean) { + server.start(); + } + }); + + Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() { + @Override + public void run() { + server.stop(); + graph.disconnect(null); + } + })); + } +} diff --git a/powerapi-code-energy-analysis/build.sbt b/powerapi-code-energy-analysis/build.sbt new file mode 100644 index 0000000..a94d162 --- /dev/null +++ b/powerapi-code-energy-analysis/build.sbt @@ -0,0 +1,28 @@ +name := "powerapi-code-energy-analysis" + +mappings in Universal ++= { + val dir = baseDirectory.value.getParentFile + + (for { + (file, relativePath) <- (dir * "README*" --- dir) pair relativeTo (dir) + } yield file -> s"$relativePath") ++ + (for { + (file, relativePath) <- (dir * "LICENSE*" --- dir) pair relativeTo (dir) + } yield file -> s"$relativePath") +} + +mappings in Universal ++= { + val dir = baseDirectory.value.getParentFile / "external-libs" / "sigar-bin" + + for { + (file, relativePath) <- (dir.*** --- dir) pair relativeTo(dir) + } yield file -> s"/lib/sigar-bin/$relativePath" +} + +scriptClasspath ++= Seq("../conf", "../scripts") + +packageName in Universal := name.value + +topLevelDirectory := Some(name.value) + +executableScriptName := "powerapi" diff --git a/powerapi-code-energy-analysis/src/main/scala/org/powerapi/code/energy/footprint/Analysis.scala b/powerapi-code-energy-analysis/src/main/scala/org/powerapi/code/energy/footprint/Analysis.scala new file mode 100644 index 0000000..5b7df82 --- /dev/null +++ b/powerapi-code-energy-analysis/src/main/scala/org/powerapi/code/energy/footprint/Analysis.scala @@ -0,0 +1,70 @@ +/* + * This software is licensed under the GNU Affero General Public License, quoted below. + * + * This file is a part of PowerAPI. + * + * Copyright (C) 2011-2016 Inria, University of Lille 1. + * + * PowerAPI is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of + * the License, or (at your option) any later version. + * + * PowerAPI is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with PowerAPI. + * + * If not, please consult http://www.gnu.org/licenses/agpl-3.0.html. + */ +package org.powerapi.code.energy.footprint + +import scala.sys +import scala.sys.process.stringSeqToProcess +import org.powerapi.PowerMeter +import org.powerapi.core.Configuration +import org.powerapi.module.disk.simple.DiskSimpleModule +import org.powerapi.module.libpfm.LibpfmInterruptionCoreModule + +object Analysis extends Configuration(None) with App { + @volatile var powerMeters = Seq[PowerMeter]() + @volatile var unixServers = Seq[UnixServerSocket]() + + val controlThread = new Thread { + var running = true + + override def run(): Unit = { + while(running) { + Thread.sleep(5000) + } + } + + def cancel(): Unit = running = false + } + + val shutdownHookThread = scala.sys.ShutdownHookThread { + println("PowerAPI is shutting down ...") + unixServers.foreach(_.cancel()) + powerMeters.foreach(_.shutdown()) + controlThread.cancel() + } + + + if (System.getProperty("os.name").toLowerCase.indexOf("nix") >= 0 || System.getProperty("os.name").toLowerCase.indexOf("nux") >= 0) Seq("bash", "scripts/system.bash").! + + val powerapi = PowerMeter.loadModule(LibpfmInterruptionCoreModule(), DiskSimpleModule(None)) + val server = new UnixServerSocket(powerapi) + server.start() + powerMeters +:= powerapi + unixServers +:= server + + controlThread.start() + controlThread.join() + + shutdownHookThread.join() + shutdownHookThread.remove() + sys.exit(0) +} diff --git a/powerapi-code-energy-analysis/src/main/scala/org/powerapi/code/energy/footprint/UnixServerSocket.scala b/powerapi-code-energy-analysis/src/main/scala/org/powerapi/code/energy/footprint/UnixServerSocket.scala new file mode 100644 index 0000000..c1d0abc --- /dev/null +++ b/powerapi-code-energy-analysis/src/main/scala/org/powerapi/code/energy/footprint/UnixServerSocket.scala @@ -0,0 +1,311 @@ +/* + * This software is licensed under the GNU Affero General Public License, quoted below. + * + * This file is a part of PowerAPI. + * + * Copyright (C) 2011-2016 Inria, University of Lille 1. + * + * PowerAPI is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of + * the License, or (at your option) any later version. + * + * PowerAPI is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with PowerAPI. + * + * If not, please consult http://www.gnu.org/licenses/agpl-3.0.html. + */ +package org.powerapi.code.energy.footprint + +import java.io.{BufferedReader, DataInputStream, EOFException, File, IOException, InputStreamReader} +import java.nio.channels.Channels +import java.util.concurrent.TimeUnit + +import com.paulgoldbaum.influxdbclient.Parameter.Precision +import com.paulgoldbaum.influxdbclient.{InfluxDB, Point} +import com.typesafe.config.Config + +import scala.concurrent.duration.{DurationInt, FiniteDuration, DurationLong} +import collection.JavaConversions._ +import jnr.unixsocket.{UnixServerSocketChannel, UnixSocketAddress, UnixSocketChannel} +import org.apache.logging.log4j.LogManager +import org.powerapi.core.{ConfigValue, Configuration} +import org.powerapi.module.libpfm.PayloadProtocol.Payload +import org.powerapi.{PowerDisplay, PowerMeter, PowerMonitoring} +import org.powerapi.core.power._ +import org.powerapi.core.TickChannel.{publishTick, tickTopic} +import org.powerapi.core.target.Application +import org.powerapi.module.PowerChannel.AggregatePowerReport +import org.powerapi.module.libpfm.AgentTick +import org.powerapi.module.libpfm.PCInterruptionChannel.InterruptionTick + +import scala.concurrent.Await + +class InterruptionInfluxDisplay(host: String, port: Int, user: String, pwd: String, dbName: String, measurement: String, flushingFrequency: FiniteDuration) extends PowerDisplay { + val timeout = 10.seconds + val influxdb = InfluxDB.connect(host, port, user, pwd) + val database = influxdb.selectDatabase(dbName) + + val measurements = Await.result(database.query("show measurements"), timeout).series + + val run = { + if (measurements.nonEmpty && measurements.head.records.map(record => record("name").toString).contains(measurement)) { + val series = Await.result(database.query("select last(cpu), run from \"" + measurement + "\""), timeout).series + if (series.isEmpty) 1 else series.head.points("run").head.toString.toInt + 1 + } + else 1 + } + + var baseTimestamp = System.currentTimeMillis + val points = scala.collection.mutable.ListBuffer[Point]() + + override def display(aggregatePowerReport: AggregatePowerReport): Unit = { + this.synchronized { + val currentTimestamp = System.currentTimeMillis + val rawPowers = aggregatePowerReport.rawPowers + val disk = rawPowers.filter(_.tick.isInstanceOf[AgentTick]).map(_.power.toWatts).sum + + points ++= (for (rawPower <- rawPowers.filter(_.tick.isInstanceOf[InterruptionTick])) yield { + val interruptionTick = rawPower.tick.asInstanceOf[InterruptionTick] + val cpu = rawPower.power.toWatts + + Point(measurement, interruptionTick.timestamp) + .addField("cpu", cpu) + .addField("disk", if (interruptionTick.triggering) disk else 0.0) + .addTag("core", s"${interruptionTick.cpu}") + .addTag("method", s"${interruptionTick.fullMethodName}") + .addTag("run", s"$run") + .addTag("tid", s"${interruptionTick.tid}") + }) + + if (currentTimestamp > baseTimestamp + flushingFrequency.toMillis) { + database.bulkWrite(points, precision = Precision.NANOSECONDS) + points.clear() + baseTimestamp = currentTimestamp + } + } + } + + def cancel(): Unit = { + if (points.nonEmpty) { + database.bulkWrite(points, precision = Precision.NANOSECONDS) + points.clear() + } + + database.close() + } +} + + +/** + * Client of a PowerAPI's agent on the DataSocket for getting all sent payloads and + * for mapping them to special ticks for the internal components of PowerAPI. + * + * @author Maxime Colmant + */ +class DataRequest(dataChannel: UnixSocketChannel, monitor: PowerMonitoring) extends Thread { + private val log = LogManager.getLogger + @volatile var running = true + + val stream = new DataInputStream(Channels.newInputStream(dataChannel)) + + override def run(): Unit = { + while (running) { + try { + val size = stream.readInt() + val payloadBytes: Array[Byte] = Array.fill(size) { + 0 + } + + stream.read(payloadBytes, 0, payloadBytes.length) + val payload = Payload.parseFrom(payloadBytes) + log.debug("{}", payload.toString) + val tick = AgentTick(tickTopic(monitor.muid), payload.getTimestamp, payload) + publishTick(tick)(monitor.eventBus) + } + catch { + case _: EOFException => cancel() + case ex: Throwable => log.error(ex) + } + } + } + + def cancel(): Unit = { + running = false + try { + join(1.seconds.toMillis) + } + catch { + case _: InterruptedException => + } + finally { + stream.close() + dataChannel.close() + } + } +} + +/** + * Represent a connexion to the ControlSocket and connect to the DataSocket. + * + * @author Maxime Colmant + */ +class ControlRequest(controlChannel: UnixSocketChannel, configuration: UnixServerSocketConfiguration, pMeter: PowerMeter) extends Thread { + private val log = LogManager.getLogger + @volatile var running = true + + val reader = new BufferedReader(new InputStreamReader(Channels.newInputStream(controlChannel))) + //val PID = """^(\d+)\s?$""".r + val pids = collection.mutable.ArrayBuffer[Int]() + val requests = collection.mutable.ListBuffer[DataRequest]() + var monitor: Option[PowerMonitoring] = None + var display: Option[InterruptionInfluxDisplay] = None + + override def run(): Unit = { + try { + val label = reader.readLine() + val software = reader.readLine() + display = Some(new InterruptionInfluxDisplay(configuration.influxHost, configuration.influxPort, configuration.influxUser, configuration.influxPwd, configuration.influxDB, label, configuration.influxFlushingFreq)) + monitor = Some(pMeter.monitor(Application(software))(MAX).to(display.get)) + + requests ++= { + for (core <- configuration.topology.values.flatten) yield { + val path = new File(s"/tmp/agent-$core-$label.sock") + val address = new UnixSocketAddress(path) + new DataRequest(UnixSocketChannel.open(address), monitor.get) + } + } + + requests.foreach(_.start()) + // Expects a message when the app is stopped (any kind of message) + reader.readLine() + requests.foreach(_.cancel()) + display.get.cancel() + monitor.get.cancel() + reader.close() + } + catch { + case _: IOException => + case ex: Throwable => log.error(ex) + } + } + + def cancel(): Unit = { + try { + join(1.seconds.toMillis) + } + catch { + case _: InterruptedException => + } + finally { + requests.foreach(_.cancel()) + monitor.get.cancel() + reader.close() + } + } +} + +class UnixServerSocketConfiguration extends Configuration(None) { + lazy val topology: Map[Int, Set[Int]] = load { conf => + (for (item: Config <- conf.getConfigList("powerapi.cpu.topology")) + yield (item.getInt("core"), item.getIntList("indexes").map(_.toInt).toSet)).toMap + } match { + case ConfigValue(values) => values + case _ => Map() + } + + lazy val influxHost: String = load { conf => + conf.getString("powerapi.influx.host") + } match { + case ConfigValue(value) => value + case _ => "" + } + + lazy val influxPort: Int = load { conf => + conf.getInt("powerapi.influx.port") + } match { + case ConfigValue(value) => value + case _ => 8086 + } + + lazy val influxUser: String = load { conf => + conf.getString("powerapi.influx.username") + } match { + case ConfigValue(value) => value + case _ => "" + } + + lazy val influxPwd: String = load { conf => + conf.getString("powerapi.influx.pwd") + } match { + case ConfigValue(value) => value + case _ => "" + } + + lazy val influxDB: String = load { conf => + conf.getString("powerapi.influx.database") + } match { + case ConfigValue(value) => value + case _ => "" + } + + lazy val influxFlushingFreq: FiniteDuration = load { conf => + conf.getDuration("powerapi.influx.flushing-frequency", TimeUnit.NANOSECONDS) + } match { + case ConfigValue(value) => value.nanoseconds + case _ => 20l.seconds + } +} + +/** + * A UnixSocketServer is responsible to open a control flow connexion for PowerAPI agents. + * + * @author Maxime Colmant + */ +class UnixServerSocket(pMeter: PowerMeter) extends Thread { + private val log = LogManager.getLogger + @volatile var running = true + + val path = new File("/tmp/agent-control.sock") + val address = new UnixSocketAddress(path) + val server = UnixServerSocketChannel.open() + server.socket().bind(address) + val requests = collection.mutable.ListBuffer[ControlRequest]() + val configuration = new UnixServerSocketConfiguration() + + override def run(): Unit = { + while (running) { + try { + val request = new ControlRequest(server.accept(), configuration, pMeter) + request.start() + requests += request + } + catch { + case _: IOException => + case ex: Throwable => log.error(ex.getMessage) + } + } + } + + def cancel(): Unit = { + running = false + try { + join(1.seconds.toMillis) + } + catch { + case _: InterruptedException => + } + finally { + requests.foreach(request => { + request.cancel() + }) + server.close() + path.delete() + } + } +} diff --git a/powerapi-code-energy-analysis/src/test/scala/org/powerapi/code/energy/footprint/UnixServerSocketSuite.scala b/powerapi-code-energy-analysis/src/test/scala/org/powerapi/code/energy/footprint/UnixServerSocketSuite.scala new file mode 100644 index 0000000..580043b --- /dev/null +++ b/powerapi-code-energy-analysis/src/test/scala/org/powerapi/code/energy/footprint/UnixServerSocketSuite.scala @@ -0,0 +1,366 @@ +/* + * This software is licensed under the GNU Affero General Public License, quoted below. + * + * This file is a part of PowerAPI. + * + * Copyright (C) 2011-2016 Inria, University of Lille 1. + * + * PowerAPI is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of + * the License, or (at your option) any later version. + * + * PowerAPI is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with PowerAPI. + * + * If not, please consult http://www.gnu.org/licenses/agpl-3.0.html. + */ +package org.powerapi.code.energy.footprint + +import java.io.{BufferedWriter, File, OutputStreamWriter} +import java.nio.channels.Channels +import java.util.UUID + +import akka.actor.Props +import akka.pattern.gracefulStop +import akka.testkit.{EventFilter, TestActorRef, TestProbe} +import akka.util.Timeout +import com.paulgoldbaum.influxdbclient.InfluxDB +import jnr.unixsocket.{UnixServerSocketChannel, UnixSocketAddress, UnixSocketChannel} +import org.joda.time.format.ISODateTimeFormat +import org.powerapi.core.MonitorChannel.{MonitorStart, MonitorTick, subscribeMonitorTick} +import org.powerapi.core.target.{Application, Process} +import org.powerapi.core.power._ +import org.powerapi.core.{MessageBus, MonitorChild} +import org.powerapi.module.PowerChannel.{AggregatePowerReport, RawPowerReport} +import org.powerapi.module.libpfm.PCInterruptionChannel.InterruptionTick +import org.powerapi.module.libpfm.PayloadProtocol.Payload +import org.powerapi.module.libpfm.{AgentTick, PayloadProtocol, TID} +import org.powerapi.{PowerDisplay, PowerMeter, PowerMonitoring, UnitTest} +import org.scalamock.scalatest.MockFactory + +import scala.concurrent.Await +import scala.concurrent.duration.DurationInt + +class UnixServerSocketSuite extends UnitTest with MockFactory { + + val timeout = Timeout(1.seconds) + val baseTopology = Map(0 -> Set(0), 1 -> Set(1)) + + val influxHost = "localhost" + val influxPort = 8086 + val influxUser = "powerapi" + val influxPwd = "powerapi" + val influxDB = "codenergy" + + val influxdb = InfluxDB.connect(influxHost, influxPort, influxUser, influxPwd) + val database = influxdb.selectDatabase(influxDB) + Await.result(database.create(), timeout.duration) + + override def afterAll() = { + Await.result(database.drop(), timeout.duration) + system.shutdown() + } + + trait Bus { + val eventBus = new MessageBus + } + + "An UnixServerSocket" should "be able to handle connexions with PowerAPI agents" in new Bus { + class DataAgentSocket(dataSocket: UnixSocketChannel, payload: Payload) extends Thread { + val stream = Channels.newOutputStream(dataSocket) + + def htonl(x: Int): Array[Byte] = { + var _x = x + val res: Array[Byte] = Array.fill(4){0} + + for (i <- 0 until 4) { + res(i) = new Integer(_x >>> 24).byteValue() + _x <<= 8 + } + res + } + + override def run(): Unit = { + val size = payload.getSerializedSize + stream.write(htonl(size)) + stream.flush() + payload.writeTo(stream) + stream.flush() + stream.write(htonl(size)) + stream.flush() + payload.writeTo(stream) + stream.flush() + } + } + + class Agent(muid: UUID, label: String, software: String, payloads: Map[Int, Payload]) extends Thread { + val probe = TestProbe() + subscribeMonitorTick(muid, software)(eventBus)(probe.ref) + + override def run(): Unit = { + val controlPath = new File("/tmp/agent-control.sock") + val controlAddress = new UnixSocketAddress(controlPath) + val controlWriter = new BufferedWriter(new OutputStreamWriter(Channels.newOutputStream(UnixSocketChannel.open(controlAddress)))) + + val servers = (for (core <- baseTopology.values.flatten) yield { + val dataPath = new File(s"/tmp/agent-$core-$label.sock") + val dataAddress = new UnixSocketAddress(dataPath) + val dataServer = UnixServerSocketChannel.open() + dataServer.socket().bind(dataAddress) + core -> (dataPath, dataServer) + }).toMap + + controlWriter.write(s"$label\n") + controlWriter.flush() + + controlWriter.write(s"$software\n") + controlWriter.flush() + + val dataAgents = for((core, (dataPath, dataServer)) <- servers) yield { + val dataAgent = new DataAgentSocket(dataServer.accept(), payloads(core)) + dataAgent.start() + dataAgent + } + + probe.receiveN(2).asInstanceOf[Seq[MonitorTick]].foreach { + monitorTick: MonitorTick => + val tick = monitorTick.tick.asInstanceOf[AgentTick] + tick.payload should equal(payloads(tick.payload.getCore)) + } + + controlWriter.write("end\n") + controlWriter.flush() + + dataAgents.foreach(_.join) + + servers.values.foreach { + case (dataPath, dataServer) => + dataServer.close() + dataPath.delete() + } + } + } + + class PowerMeterMock extends PowerMeter(system, Seq()) + + val muid1 = UUID.randomUUID() + val app1 = Application("test1") + val payloads1: Map[Int, Payload] = Map( + 0 -> Payload.newBuilder() + .setCore(0) + .setPid(10) + .setTid(10) + .setTimestamp(System.currentTimeMillis()) + .addCounters(PayloadProtocol.MapEntry.newBuilder().setKey("event1").setValue(200)) + .addCounters(PayloadProtocol.MapEntry.newBuilder().setKey("event2").setValue(400)) + .addTraces("a") + .addTraces("main") + .build(), + 1 -> Payload.newBuilder() + .setCore(1) + .setPid(10) + .setTid(11) + .setTimestamp(System.currentTimeMillis()) + .addCounters(PayloadProtocol.MapEntry.newBuilder().setKey("event1").setValue(200)) + .addCounters(PayloadProtocol.MapEntry.newBuilder().setKey("event2").setValue(400)) + .addTraces("b") + .addTraces("a") + .addTraces("main") + .build() + ) + val muid2 = UUID.randomUUID() + val app2 = Application("test2") + val payloads2: Map[Int, Payload] = Map( + 0 -> Payload.newBuilder() + .setCore(0) + .setPid(20) + .setTid(20) + .setTimestamp(System.currentTimeMillis()) + .addCounters(PayloadProtocol.MapEntry.newBuilder().setKey("event1").setValue(200000)) + .addCounters(PayloadProtocol.MapEntry.newBuilder().setKey("event2").setValue(300000)) + .addTraces("z") + .addTraces("main") + .build(), + 1 -> Payload.newBuilder() + .setCore(1) + .setPid(20) + .setTid(21) + .setTimestamp(System.currentTimeMillis()) + .addCounters(PayloadProtocol.MapEntry.newBuilder().setKey("event1").setValue(125000000)) + .addCounters(PayloadProtocol.MapEntry.newBuilder().setKey("event2").setValue(454444555)) + .addTraces("z") + .addTraces("z") + .addTraces("z") + .addTraces("main") + .build() + ) + + val monitorActor1 = TestActorRef(Props(classOf[MonitorChild], eventBus, muid1, Set(app1)), "monitor1") + val monitorActor2 = TestActorRef(Props(classOf[MonitorChild], eventBus, muid2, Set(app2)), "monitor2") + + EventFilter.info(occurrences = 1, source = monitorActor1.path.toString).intercept({ + monitorActor1 ! MonitorStart("test", muid1, Set(app1)) + }) + + EventFilter.info(occurrences = 1, source = monitorActor2.path.toString).intercept({ + monitorActor2 ! MonitorStart("test", muid2, Set(app2)) + }) + + val pMeter = mock[PowerMeterMock] + + val monitor1 = mock[PowerMonitoring] + monitor1.apply _ expects * once() returning monitor1 + (monitor1.to(_: PowerDisplay)) expects * anyNumberOfTimes() returning monitor1 + monitor1.cancel _ expects () anyNumberOfTimes() + monitor1.eventBus _ expects () anyNumberOfTimes() returning eventBus + monitor1.muid _ expects () anyNumberOfTimes() returning muid1 + pMeter.monitor _ expects Seq(app1) returning monitor1 + + val monitor2 = mock[PowerMonitoring] + monitor2.apply _ expects * once() returning monitor2 + (monitor2.to(_: PowerDisplay)) expects * anyNumberOfTimes() returning monitor2 + monitor2.cancel _ expects () anyNumberOfTimes() + monitor2.eventBus _ expects () anyNumberOfTimes() returning eventBus + monitor2.muid _ expects () anyNumberOfTimes() returning muid2 + pMeter.monitor _ expects Seq(app2) returning monitor2 + + val baseConfiguration = new UnixServerSocketConfiguration { + override lazy val topology = baseTopology + override lazy val influxHost = "localhost" + override lazy val influxPort = 8086 + override lazy val influxUser = "powerapi" + override lazy val influxPwd = "powerapi" + override lazy val influxDB = "codenergy" + } + + val server = new UnixServerSocket(pMeter) { + override val configuration = baseConfiguration + } + server.start() + + val agent1 = new Agent(muid1, "label1", "test1", payloads1) + val agent2 = new Agent(muid2, "label2", "test2", payloads2) + agent1.start() + agent2.start() + + agent1.join() + agent2.join() + + server.cancel() + + Await.result(gracefulStop(monitorActor1, timeout.duration), timeout.duration) + Await.result(gracefulStop(monitorActor2, timeout.duration), timeout.duration) + } + + "An InterruptionInfluxDisplay" should "write data into an Influx DB" in { + val timeout = 10.seconds + + val muid1 = UUID.randomUUID() + val muid2 = UUID.randomUUID() + + val timestamp1 = System.nanoTime() + val report1 = new AggregatePowerReport(muid1) { + override def rawPowers = Seq( + RawPowerReport("", muid1, Application("test"), 10.W, "cpu", InterruptionTick("", 0, TID(10), "a.b.b", timestamp1, true)), + RawPowerReport("", muid1, Application("test"), 2.W, "disk", AgentTick("", timestamp1, Payload.newBuilder().setCore(0).setPid(10).setTid(10).setTimestamp(timestamp1).build())) + ) + } + + val timestamp2 = System.nanoTime() + 1.seconds.toNanos + val report2 = new AggregatePowerReport(muid1) { + override def rawPowers = Seq( + RawPowerReport("", muid1, Application("test"), 10.W, "cpu", InterruptionTick("", 0, TID(10), "a.b.b", timestamp2, false)), + RawPowerReport("", muid1, Application("test"), 1.W, "cpu", InterruptionTick("", 1, TID(11), "w.x.y", timestamp2, true)), + RawPowerReport("", muid1, Application("test"), 8.W, "disk", AgentTick("", timestamp1, Payload.newBuilder().setCore(1).setPid(11).setTid(11).setTimestamp(timestamp2).build())) + ) + } + + val timestamp3 = System.nanoTime() + 2.seconds.toNanos + val report3 = new AggregatePowerReport(muid2) { + override def rawPowers = Seq( + RawPowerReport("", muid2, Application("test"), 1.W, "cpu", InterruptionTick("", 0, TID(12), "w.x.y", timestamp3, true)), + RawPowerReport("", muid2, Application("test"), 1.W, "disk", AgentTick("", timestamp3, Payload.newBuilder().setCore(0).setPid(12).setTid(12).setTimestamp(timestamp3).build())) + ) + } + + val display1 = new InterruptionInfluxDisplay(influxHost, influxPort, influxUser, influxPwd, influxDB, "test1", 1.seconds) + + Thread.sleep(5000) + display1.display(report1) + awaitCond({ + val result = Await.result(database.query("select * from test1 order by time"), timeout) + display1.run == 1 && + result.series.size == 1 && + result.series.head.records.size == 1 && + result.series.head.records.last("cpu").toString.toLong == 10 && + result.series.head.records.last("disk").toString.toLong == 2 && + result.series.head.records.last("core").toString == "0" && + result.series.head.records.last("method").toString == "a.b.b" && + result.series.head.records.last("run").toString == "1" && + result.series.head.records.last("tid").toString == "10" + }, 30.seconds, 1.seconds) + + Thread.sleep(5000) + display1.display(report2) + awaitCond({ + val result = Await.result(database.query("select * from test1 order by time"), timeout) + + if (result.series.size == 1) { + val filteredResult = result.series.head.records.filter(record => ISODateTimeFormat.dateTimeParser().parseDateTime(record.apply("time").toString).getMillis == (timestamp2 / 1e6).toLong) + display1.run == 1 && + result.series.head.records.size == 3 && + filteredResult.size == 2 && + filteredResult.exists { + record => + record.apply("cpu").toString.toLong == 10 && + record.apply("disk").toString.toLong == 0 && + record.apply("method").toString == "a.b.b" && + record.apply("run").toString == "1" && + record.apply("tid").toString == "10" + record.apply("core").toString == "0" + } && + filteredResult.exists { + record => + record.apply("cpu").toString.toLong == 1 && + record.apply("disk").toString.toLong == 8 && + record.apply("method").toString == "w.x.y" && + record.apply("run").toString == "1" && + record.apply("tid").toString == "11" + record.apply("core").toString == "1" + } + } + else false + }, 30.seconds, 1.seconds) + + val display2 = new InterruptionInfluxDisplay(influxHost, influxPort, influxUser, influxPwd, influxDB, "test1", 1.seconds) + + Thread.sleep(5000) + display2.display(report3) + awaitCond({ + val result = Await.result(database.query("select * from test1 order by time"), timeout) + + if (result.series.size == 1) { + val filteredResult = result.series.head.records.filter(record => ISODateTimeFormat.dateTimeParser().parseDateTime(record.apply("time").toString).getMillis == (timestamp3 / 1e6).toLong) + display2.run == 2 && + result.series.head.records.size == 4 && + filteredResult.size == 1 && + filteredResult.exists { + record => + record.apply("cpu").toString.toLong == 1 && + record.apply("disk").toString.toLong == 1 && + record.apply("method").toString == "w.x.y" && + record.apply("run").toString == "2" && + record.apply("tid").toString == "12" + record.apply("core").toString == "0" + } + } + else false + }, 30.seconds, 1.seconds) + } +} diff --git a/powerapi-code-energy-analysis/src/universal/conf/akka.conf b/powerapi-code-energy-analysis/src/universal/conf/akka.conf new file mode 100644 index 0000000..301dae2 --- /dev/null +++ b/powerapi-code-energy-analysis/src/universal/conf/akka.conf @@ -0,0 +1,26 @@ +akka { + # Options: OFF, ERROR, WARNING, INFO, DEBUG + loglevel = "error" + + log-dead-letters-during-shutdown = off + log-dead-letters = off + + actor { + guardian-supervisor-strategy = "org.powerapi.core.GuardianFailureStrategy" + + debug { + # Enable function of LoggingReceive, which is to log any received message at + # DEBUG level + receive = off + + # Enable DEBUG logging of all AutoReceiveMessages (Kill, PoisonPill and the like) + autoreceive = off + + # Enable DEBUG logging of actor lifecycle changes + lifecycle = off + + # Enable DEBUG logging of subscription changes on the eventStream + event-stream = off + } + } +} diff --git a/powerapi-code-energy-analysis/src/universal/conf/application.conf b/powerapi-code-energy-analysis/src/universal/conf/application.conf new file mode 100644 index 0000000..f10e6ba --- /dev/null +++ b/powerapi-code-energy-analysis/src/universal/conf/application.conf @@ -0,0 +1,3 @@ +include "akka" +include "rapl" +include "powerapi" diff --git a/powerapi-code-energy-analysis/src/universal/conf/log4j.xml b/powerapi-code-energy-analysis/src/universal/conf/log4j.xml new file mode 100644 index 0000000..29b70c9 --- /dev/null +++ b/powerapi-code-energy-analysis/src/universal/conf/log4j.xml @@ -0,0 +1,38 @@ + + + + + + + + + + + + + + + + + diff --git a/powerapi-code-energy-analysis/src/universal/conf/log4j2.xml b/powerapi-code-energy-analysis/src/universal/conf/log4j2.xml new file mode 100644 index 0000000..3a5187a --- /dev/null +++ b/powerapi-code-energy-analysis/src/universal/conf/log4j2.xml @@ -0,0 +1,35 @@ + + + + + + + + + + + + + + diff --git a/powerapi-code-energy-analysis/src/universal/conf/logback.xml b/powerapi-code-energy-analysis/src/universal/conf/logback.xml new file mode 100644 index 0000000..b814b0a --- /dev/null +++ b/powerapi-code-energy-analysis/src/universal/conf/logback.xml @@ -0,0 +1,13 @@ + + + + + UTF-8 + [%-5level] [d{MM/dd/yyyy HH:mm:ss.SSS}] [%thread] [%logger{36}] - %msg%n + + + + + + + diff --git a/powerapi-code-energy-analysis/src/universal/conf/powerapi.conf b/powerapi-code-energy-analysis/src/universal/conf/powerapi.conf new file mode 100644 index 0000000..25b5093 --- /dev/null +++ b/powerapi-code-energy-analysis/src/universal/conf/powerapi.conf @@ -0,0 +1,34 @@ +# Here is the required configuration. More details are available in the modules documentation. + +powerapi.cpu.topology = [ + { core = 0, indexes = [0, 4] } + { core = 1, indexes = [1, 5] } + { core = 2, indexes = [2, 6] } + { core = 3, indexes = [3, 7] } +] + +powerapi.sampling.interval = 250ms +powerapi.libpfm.events = [ "CPU_CLK_UNHALTED:THREAD_P", "CPU_CLK_UNHALTED:REF_P" ] + +powerapi.libpfm.formulae.cycles = [ + { coefficient = 12.0, formula = [92.20886561572331,2.285714027168686E-8,-1.416580072868971E-17] } + { coefficient = 13.0, formula = [92.48173723023501,2.1188435019184853E-8,-1.1164115851610073E-17] } + { coefficient = 14.0, formula = [91.18277678547861,2.795388215113586E-8,-1.5345230874242293E-17] } + { coefficient = 15.0, formula = [91.58387287172661,2.9734425570507765E-8,-1.7544248591494286E-17] } + { coefficient = 16.0, formula = [92.02244294383439,2.7807408431676527E-8,-1.2746904725498715E-17] } + { coefficient = 17.0, formula = [91.3500222816532,3.0366622403587484E-8,-1.3854655417383513E-17] } + { coefficient = 18.0, formula = [91.33481852488529,3.286018826888694E-8,-1.409692587879552E-17] } + { coefficient = 19.0, formula = [91.24010397916015,3.5201585320905026E-8,-1.4560221154346024E-17] } + { coefficient = 20.0, formula = [91.62432342022942,3.810191894497629E-8,-1.4857999181001822E-17] } + { coefficient = 21.0, formula = [92.03780565000716,5.119368902831622E-8,-2.166176426151429E-17] } + { coefficient = 22.0, formula = [90.26543958762136,5.472361694273432E-8,-2.17898798570039E-17] } +] + +powerapi.influx.host = "http://193.51.236.42:8086" +powerapi.influx.port = 8086 +powerapi.influx.username = "root" +powerapi.influx.pwd = "root" +powerapi.influx.database = "powerapi" +powerapi.influx.measurement = "event.powerapi.traces" + +powerapi.actors.timeout = 10s diff --git a/powerapi-code-energy-analysis/src/universal/conf/rapl.conf b/powerapi-code-energy-analysis/src/universal/conf/rapl.conf new file mode 100644 index 0000000..21316a1 --- /dev/null +++ b/powerapi-code-energy-analysis/src/universal/conf/rapl.conf @@ -0,0 +1,9 @@ +rapl.supported-architectures = [ + {id = 42, model = "Sandybridge"} + {id = 45, model = "Sandybridge-EP"} + {id = 58, model = "Ivybridge"} + {id = 62, model = "Ivybridge-EP"} + {id = 60, model = "Haswell"} + {id = 63, model = "Haswell-EP"} + {id = 61, model = "Broadwell"} +] diff --git a/powerapi-code-energy-analysis/src/universal/scripts/system.bash b/powerapi-code-energy-analysis/src/universal/scripts/system.bash new file mode 100644 index 0000000..4d4e355 --- /dev/null +++ b/powerapi-code-energy-analysis/src/universal/scripts/system.bash @@ -0,0 +1,5 @@ +#!/bin/bash + +ulimit -n 4096 + +exit diff --git a/powerapi-core/build.sbt b/powerapi-core/build.sbt index d4083a1..2724e8b 100644 --- a/powerapi-core/build.sbt +++ b/powerapi-core/build.sbt @@ -23,9 +23,21 @@ libraryDependencies ++= Seq( "org.hyperic" % "sigar" % "1.6.5.132", "net.java.dev.jna" % "jna" % "4.2.1", "io.spray" %% "spray-json" % "1.3.2", - "com.paulgoldbaum" %% "scala-influxdb-client" % "0.4.5" + "com.paulgoldbaum" %% "scala-influxdb-client" % "0.4.5", + "com.github.jnr" % "jnr-unixsocket" % "0.12", + "com.google.protobuf" % "protobuf-java" % "2.6.1" ) +// temporary +// it should be a jar-with-dependencies +unmanagedJars in Compile ++= { + baseDirectory.value + val m2 = Path.userHome / ".m2/repository" + val baseDirectories = (m2 / "org/kevoree/mwg/plugins/structure/7-SNAPSHOT") +++ (m2 / "org/kevoree/mwg/plugins/websocket/7-SNAPSHOT") + val customJars = (baseDirectories ** "*.jar") + customJars.classpath +} + // Tests libraryDependencies ++= Seq( "com.typesafe.akka" %% "akka-testkit" % "2.3.14" % "test", diff --git a/powerapi-core/src/main/java/org/powerapi/module/libpfm/PayloadProtocol.java b/powerapi-core/src/main/java/org/powerapi/module/libpfm/PayloadProtocol.java new file mode 100644 index 0000000..9ee3c69 --- /dev/null +++ b/powerapi-core/src/main/java/org/powerapi/module/libpfm/PayloadProtocol.java @@ -0,0 +1,1852 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: payload.proto + +package org.powerapi.module.libpfm; + +public final class PayloadProtocol { + private PayloadProtocol() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public interface MapEntryOrBuilder extends + // @@protoc_insertion_point(interface_extends:MapEntry) + com.google.protobuf.MessageOrBuilder { + + /** + * required string key = 1; + */ + boolean hasKey(); + /** + * required string key = 1; + */ + java.lang.String getKey(); + /** + * required string key = 1; + */ + com.google.protobuf.ByteString + getKeyBytes(); + + /** + * required uint64 value = 2; + */ + boolean hasValue(); + /** + * required uint64 value = 2; + */ + long getValue(); + } + /** + * Protobuf type {@code MapEntry} + */ + public static final class MapEntry extends + com.google.protobuf.GeneratedMessage implements + // @@protoc_insertion_point(message_implements:MapEntry) + MapEntryOrBuilder { + // Use MapEntry.newBuilder() to construct. + private MapEntry(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private MapEntry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MapEntry defaultInstance; + public static MapEntry getDefaultInstance() { + return defaultInstance; + } + + public MapEntry getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MapEntry( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + com.google.protobuf.ByteString bs = input.readBytes(); + bitField0_ |= 0x00000001; + key_ = bs; + break; + } + case 16: { + bitField0_ |= 0x00000002; + value_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.powerapi.module.libpfm.PayloadProtocol.internal_static_MapEntry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.powerapi.module.libpfm.PayloadProtocol.internal_static_MapEntry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.powerapi.module.libpfm.PayloadProtocol.MapEntry.class, org.powerapi.module.libpfm.PayloadProtocol.MapEntry.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MapEntry parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MapEntry(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + public static final int KEY_FIELD_NUMBER = 1; + private java.lang.Object key_; + /** + * required string key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string key = 1; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + key_ = s; + } + return s; + } + } + /** + * required string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int VALUE_FIELD_NUMBER = 2; + private long value_; + /** + * required uint64 value = 2; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint64 value = 2; + */ + public long getValue() { + return value_; + } + + private void initFields() { + key_ = ""; + value_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (!hasKey()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasValue()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getKeyBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, value_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getKeyBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, value_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.powerapi.module.libpfm.PayloadProtocol.MapEntry parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.powerapi.module.libpfm.PayloadProtocol.MapEntry parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.powerapi.module.libpfm.PayloadProtocol.MapEntry parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.powerapi.module.libpfm.PayloadProtocol.MapEntry parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.powerapi.module.libpfm.PayloadProtocol.MapEntry parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.powerapi.module.libpfm.PayloadProtocol.MapEntry parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.powerapi.module.libpfm.PayloadProtocol.MapEntry parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.powerapi.module.libpfm.PayloadProtocol.MapEntry parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.powerapi.module.libpfm.PayloadProtocol.MapEntry parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.powerapi.module.libpfm.PayloadProtocol.MapEntry parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.powerapi.module.libpfm.PayloadProtocol.MapEntry prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code MapEntry} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder implements + // @@protoc_insertion_point(builder_implements:MapEntry) + org.powerapi.module.libpfm.PayloadProtocol.MapEntryOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.powerapi.module.libpfm.PayloadProtocol.internal_static_MapEntry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.powerapi.module.libpfm.PayloadProtocol.internal_static_MapEntry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.powerapi.module.libpfm.PayloadProtocol.MapEntry.class, org.powerapi.module.libpfm.PayloadProtocol.MapEntry.Builder.class); + } + + // Construct using org.powerapi.module.libpfm.PayloadProtocol.MapEntry.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + key_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + value_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.powerapi.module.libpfm.PayloadProtocol.internal_static_MapEntry_descriptor; + } + + public org.powerapi.module.libpfm.PayloadProtocol.MapEntry getDefaultInstanceForType() { + return org.powerapi.module.libpfm.PayloadProtocol.MapEntry.getDefaultInstance(); + } + + public org.powerapi.module.libpfm.PayloadProtocol.MapEntry build() { + org.powerapi.module.libpfm.PayloadProtocol.MapEntry result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.powerapi.module.libpfm.PayloadProtocol.MapEntry buildPartial() { + org.powerapi.module.libpfm.PayloadProtocol.MapEntry result = new org.powerapi.module.libpfm.PayloadProtocol.MapEntry(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.key_ = key_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.value_ = value_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.powerapi.module.libpfm.PayloadProtocol.MapEntry) { + return mergeFrom((org.powerapi.module.libpfm.PayloadProtocol.MapEntry)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.powerapi.module.libpfm.PayloadProtocol.MapEntry other) { + if (other == org.powerapi.module.libpfm.PayloadProtocol.MapEntry.getDefaultInstance()) return this; + if (other.hasKey()) { + bitField0_ |= 0x00000001; + key_ = other.key_; + onChanged(); + } + if (other.hasValue()) { + setValue(other.getValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasKey()) { + + return false; + } + if (!hasValue()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.powerapi.module.libpfm.PayloadProtocol.MapEntry parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.powerapi.module.libpfm.PayloadProtocol.MapEntry) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.lang.Object key_ = ""; + /** + * required string key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string key = 1; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + key_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string key = 1; + */ + public Builder setKey( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + key_ = value; + onChanged(); + return this; + } + /** + * required string key = 1; + */ + public Builder clearKey() { + bitField0_ = (bitField0_ & ~0x00000001); + key_ = getDefaultInstance().getKey(); + onChanged(); + return this; + } + /** + * required string key = 1; + */ + public Builder setKeyBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + key_ = value; + onChanged(); + return this; + } + + private long value_ ; + /** + * required uint64 value = 2; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint64 value = 2; + */ + public long getValue() { + return value_; + } + /** + * required uint64 value = 2; + */ + public Builder setValue(long value) { + bitField0_ |= 0x00000002; + value_ = value; + onChanged(); + return this; + } + /** + * required uint64 value = 2; + */ + public Builder clearValue() { + bitField0_ = (bitField0_ & ~0x00000002); + value_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:MapEntry) + } + + static { + defaultInstance = new MapEntry(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:MapEntry) + } + + public interface PayloadOrBuilder extends + // @@protoc_insertion_point(interface_extends:Payload) + com.google.protobuf.MessageOrBuilder { + + /** + * required uint32 core = 1; + */ + boolean hasCore(); + /** + * required uint32 core = 1; + */ + int getCore(); + + /** + * required uint32 pid = 2; + */ + boolean hasPid(); + /** + * required uint32 pid = 2; + */ + int getPid(); + + /** + * required uint32 tid = 3; + */ + boolean hasTid(); + /** + * required uint32 tid = 3; + */ + int getTid(); + + /** + * required uint64 timestamp = 4; + */ + boolean hasTimestamp(); + /** + * required uint64 timestamp = 4; + */ + long getTimestamp(); + + /** + * repeated .MapEntry counters = 5; + */ + java.util.List + getCountersList(); + /** + * repeated .MapEntry counters = 5; + */ + org.powerapi.module.libpfm.PayloadProtocol.MapEntry getCounters(int index); + /** + * repeated .MapEntry counters = 5; + */ + int getCountersCount(); + /** + * repeated .MapEntry counters = 5; + */ + java.util.List + getCountersOrBuilderList(); + /** + * repeated .MapEntry counters = 5; + */ + org.powerapi.module.libpfm.PayloadProtocol.MapEntryOrBuilder getCountersOrBuilder( + int index); + + /** + * repeated string traces = 6; + */ + com.google.protobuf.ProtocolStringList + getTracesList(); + /** + * repeated string traces = 6; + */ + int getTracesCount(); + /** + * repeated string traces = 6; + */ + java.lang.String getTraces(int index); + /** + * repeated string traces = 6; + */ + com.google.protobuf.ByteString + getTracesBytes(int index); + } + /** + * Protobuf type {@code Payload} + */ + public static final class Payload extends + com.google.protobuf.GeneratedMessage implements + // @@protoc_insertion_point(message_implements:Payload) + PayloadOrBuilder { + // Use Payload.newBuilder() to construct. + private Payload(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Payload(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Payload defaultInstance; + public static Payload getDefaultInstance() { + return defaultInstance; + } + + public Payload getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Payload( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + core_ = input.readUInt32(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + pid_ = input.readUInt32(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + tid_ = input.readUInt32(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + timestamp_ = input.readUInt64(); + break; + } + case 42: { + if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + counters_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000010; + } + counters_.add(input.readMessage(org.powerapi.module.libpfm.PayloadProtocol.MapEntry.PARSER, extensionRegistry)); + break; + } + case 50: { + com.google.protobuf.ByteString bs = input.readBytes(); + if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + traces_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000020; + } + traces_.add(bs); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + counters_ = java.util.Collections.unmodifiableList(counters_); + } + if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + traces_ = traces_.getUnmodifiableView(); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.powerapi.module.libpfm.PayloadProtocol.internal_static_Payload_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.powerapi.module.libpfm.PayloadProtocol.internal_static_Payload_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.powerapi.module.libpfm.PayloadProtocol.Payload.class, org.powerapi.module.libpfm.PayloadProtocol.Payload.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Payload parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Payload(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + public static final int CORE_FIELD_NUMBER = 1; + private int core_; + /** + * required uint32 core = 1; + */ + public boolean hasCore() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint32 core = 1; + */ + public int getCore() { + return core_; + } + + public static final int PID_FIELD_NUMBER = 2; + private int pid_; + /** + * required uint32 pid = 2; + */ + public boolean hasPid() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint32 pid = 2; + */ + public int getPid() { + return pid_; + } + + public static final int TID_FIELD_NUMBER = 3; + private int tid_; + /** + * required uint32 tid = 3; + */ + public boolean hasTid() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint32 tid = 3; + */ + public int getTid() { + return tid_; + } + + public static final int TIMESTAMP_FIELD_NUMBER = 4; + private long timestamp_; + /** + * required uint64 timestamp = 4; + */ + public boolean hasTimestamp() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required uint64 timestamp = 4; + */ + public long getTimestamp() { + return timestamp_; + } + + public static final int COUNTERS_FIELD_NUMBER = 5; + private java.util.List counters_; + /** + * repeated .MapEntry counters = 5; + */ + public java.util.List getCountersList() { + return counters_; + } + /** + * repeated .MapEntry counters = 5; + */ + public java.util.List + getCountersOrBuilderList() { + return counters_; + } + /** + * repeated .MapEntry counters = 5; + */ + public int getCountersCount() { + return counters_.size(); + } + /** + * repeated .MapEntry counters = 5; + */ + public org.powerapi.module.libpfm.PayloadProtocol.MapEntry getCounters(int index) { + return counters_.get(index); + } + /** + * repeated .MapEntry counters = 5; + */ + public org.powerapi.module.libpfm.PayloadProtocol.MapEntryOrBuilder getCountersOrBuilder( + int index) { + return counters_.get(index); + } + + public static final int TRACES_FIELD_NUMBER = 6; + private com.google.protobuf.LazyStringList traces_; + /** + * repeated string traces = 6; + */ + public com.google.protobuf.ProtocolStringList + getTracesList() { + return traces_; + } + /** + * repeated string traces = 6; + */ + public int getTracesCount() { + return traces_.size(); + } + /** + * repeated string traces = 6; + */ + public java.lang.String getTraces(int index) { + return traces_.get(index); + } + /** + * repeated string traces = 6; + */ + public com.google.protobuf.ByteString + getTracesBytes(int index) { + return traces_.getByteString(index); + } + + private void initFields() { + core_ = 0; + pid_ = 0; + tid_ = 0; + timestamp_ = 0L; + counters_ = java.util.Collections.emptyList(); + traces_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (!hasCore()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasPid()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTid()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTimestamp()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getCountersCount(); i++) { + if (!getCounters(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt32(1, core_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt32(2, pid_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt32(3, tid_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeUInt64(4, timestamp_); + } + for (int i = 0; i < counters_.size(); i++) { + output.writeMessage(5, counters_.get(i)); + } + for (int i = 0; i < traces_.size(); i++) { + output.writeBytes(6, traces_.getByteString(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(1, core_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(2, pid_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(3, tid_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(4, timestamp_); + } + for (int i = 0; i < counters_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, counters_.get(i)); + } + { + int dataSize = 0; + for (int i = 0; i < traces_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(traces_.getByteString(i)); + } + size += dataSize; + size += 1 * getTracesList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.powerapi.module.libpfm.PayloadProtocol.Payload parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.powerapi.module.libpfm.PayloadProtocol.Payload parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.powerapi.module.libpfm.PayloadProtocol.Payload parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.powerapi.module.libpfm.PayloadProtocol.Payload parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.powerapi.module.libpfm.PayloadProtocol.Payload parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.powerapi.module.libpfm.PayloadProtocol.Payload parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.powerapi.module.libpfm.PayloadProtocol.Payload parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.powerapi.module.libpfm.PayloadProtocol.Payload parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.powerapi.module.libpfm.PayloadProtocol.Payload parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.powerapi.module.libpfm.PayloadProtocol.Payload parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.powerapi.module.libpfm.PayloadProtocol.Payload prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code Payload} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder implements + // @@protoc_insertion_point(builder_implements:Payload) + org.powerapi.module.libpfm.PayloadProtocol.PayloadOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.powerapi.module.libpfm.PayloadProtocol.internal_static_Payload_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.powerapi.module.libpfm.PayloadProtocol.internal_static_Payload_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.powerapi.module.libpfm.PayloadProtocol.Payload.class, org.powerapi.module.libpfm.PayloadProtocol.Payload.Builder.class); + } + + // Construct using org.powerapi.module.libpfm.PayloadProtocol.Payload.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getCountersFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + core_ = 0; + bitField0_ = (bitField0_ & ~0x00000001); + pid_ = 0; + bitField0_ = (bitField0_ & ~0x00000002); + tid_ = 0; + bitField0_ = (bitField0_ & ~0x00000004); + timestamp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000008); + if (countersBuilder_ == null) { + counters_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + } else { + countersBuilder_.clear(); + } + traces_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000020); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.powerapi.module.libpfm.PayloadProtocol.internal_static_Payload_descriptor; + } + + public org.powerapi.module.libpfm.PayloadProtocol.Payload getDefaultInstanceForType() { + return org.powerapi.module.libpfm.PayloadProtocol.Payload.getDefaultInstance(); + } + + public org.powerapi.module.libpfm.PayloadProtocol.Payload build() { + org.powerapi.module.libpfm.PayloadProtocol.Payload result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.powerapi.module.libpfm.PayloadProtocol.Payload buildPartial() { + org.powerapi.module.libpfm.PayloadProtocol.Payload result = new org.powerapi.module.libpfm.PayloadProtocol.Payload(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.core_ = core_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.pid_ = pid_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.tid_ = tid_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.timestamp_ = timestamp_; + if (countersBuilder_ == null) { + if (((bitField0_ & 0x00000010) == 0x00000010)) { + counters_ = java.util.Collections.unmodifiableList(counters_); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.counters_ = counters_; + } else { + result.counters_ = countersBuilder_.build(); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + traces_ = traces_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000020); + } + result.traces_ = traces_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.powerapi.module.libpfm.PayloadProtocol.Payload) { + return mergeFrom((org.powerapi.module.libpfm.PayloadProtocol.Payload)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.powerapi.module.libpfm.PayloadProtocol.Payload other) { + if (other == org.powerapi.module.libpfm.PayloadProtocol.Payload.getDefaultInstance()) return this; + if (other.hasCore()) { + setCore(other.getCore()); + } + if (other.hasPid()) { + setPid(other.getPid()); + } + if (other.hasTid()) { + setTid(other.getTid()); + } + if (other.hasTimestamp()) { + setTimestamp(other.getTimestamp()); + } + if (countersBuilder_ == null) { + if (!other.counters_.isEmpty()) { + if (counters_.isEmpty()) { + counters_ = other.counters_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensureCountersIsMutable(); + counters_.addAll(other.counters_); + } + onChanged(); + } + } else { + if (!other.counters_.isEmpty()) { + if (countersBuilder_.isEmpty()) { + countersBuilder_.dispose(); + countersBuilder_ = null; + counters_ = other.counters_; + bitField0_ = (bitField0_ & ~0x00000010); + countersBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getCountersFieldBuilder() : null; + } else { + countersBuilder_.addAllMessages(other.counters_); + } + } + } + if (!other.traces_.isEmpty()) { + if (traces_.isEmpty()) { + traces_ = other.traces_; + bitField0_ = (bitField0_ & ~0x00000020); + } else { + ensureTracesIsMutable(); + traces_.addAll(other.traces_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasCore()) { + + return false; + } + if (!hasPid()) { + + return false; + } + if (!hasTid()) { + + return false; + } + if (!hasTimestamp()) { + + return false; + } + for (int i = 0; i < getCountersCount(); i++) { + if (!getCounters(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.powerapi.module.libpfm.PayloadProtocol.Payload parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.powerapi.module.libpfm.PayloadProtocol.Payload) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private int core_ ; + /** + * required uint32 core = 1; + */ + public boolean hasCore() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint32 core = 1; + */ + public int getCore() { + return core_; + } + /** + * required uint32 core = 1; + */ + public Builder setCore(int value) { + bitField0_ |= 0x00000001; + core_ = value; + onChanged(); + return this; + } + /** + * required uint32 core = 1; + */ + public Builder clearCore() { + bitField0_ = (bitField0_ & ~0x00000001); + core_ = 0; + onChanged(); + return this; + } + + private int pid_ ; + /** + * required uint32 pid = 2; + */ + public boolean hasPid() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint32 pid = 2; + */ + public int getPid() { + return pid_; + } + /** + * required uint32 pid = 2; + */ + public Builder setPid(int value) { + bitField0_ |= 0x00000002; + pid_ = value; + onChanged(); + return this; + } + /** + * required uint32 pid = 2; + */ + public Builder clearPid() { + bitField0_ = (bitField0_ & ~0x00000002); + pid_ = 0; + onChanged(); + return this; + } + + private int tid_ ; + /** + * required uint32 tid = 3; + */ + public boolean hasTid() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint32 tid = 3; + */ + public int getTid() { + return tid_; + } + /** + * required uint32 tid = 3; + */ + public Builder setTid(int value) { + bitField0_ |= 0x00000004; + tid_ = value; + onChanged(); + return this; + } + /** + * required uint32 tid = 3; + */ + public Builder clearTid() { + bitField0_ = (bitField0_ & ~0x00000004); + tid_ = 0; + onChanged(); + return this; + } + + private long timestamp_ ; + /** + * required uint64 timestamp = 4; + */ + public boolean hasTimestamp() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required uint64 timestamp = 4; + */ + public long getTimestamp() { + return timestamp_; + } + /** + * required uint64 timestamp = 4; + */ + public Builder setTimestamp(long value) { + bitField0_ |= 0x00000008; + timestamp_ = value; + onChanged(); + return this; + } + /** + * required uint64 timestamp = 4; + */ + public Builder clearTimestamp() { + bitField0_ = (bitField0_ & ~0x00000008); + timestamp_ = 0L; + onChanged(); + return this; + } + + private java.util.List counters_ = + java.util.Collections.emptyList(); + private void ensureCountersIsMutable() { + if (!((bitField0_ & 0x00000010) == 0x00000010)) { + counters_ = new java.util.ArrayList(counters_); + bitField0_ |= 0x00000010; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.powerapi.module.libpfm.PayloadProtocol.MapEntry, org.powerapi.module.libpfm.PayloadProtocol.MapEntry.Builder, org.powerapi.module.libpfm.PayloadProtocol.MapEntryOrBuilder> countersBuilder_; + + /** + * repeated .MapEntry counters = 5; + */ + public java.util.List getCountersList() { + if (countersBuilder_ == null) { + return java.util.Collections.unmodifiableList(counters_); + } else { + return countersBuilder_.getMessageList(); + } + } + /** + * repeated .MapEntry counters = 5; + */ + public int getCountersCount() { + if (countersBuilder_ == null) { + return counters_.size(); + } else { + return countersBuilder_.getCount(); + } + } + /** + * repeated .MapEntry counters = 5; + */ + public org.powerapi.module.libpfm.PayloadProtocol.MapEntry getCounters(int index) { + if (countersBuilder_ == null) { + return counters_.get(index); + } else { + return countersBuilder_.getMessage(index); + } + } + /** + * repeated .MapEntry counters = 5; + */ + public Builder setCounters( + int index, org.powerapi.module.libpfm.PayloadProtocol.MapEntry value) { + if (countersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCountersIsMutable(); + counters_.set(index, value); + onChanged(); + } else { + countersBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .MapEntry counters = 5; + */ + public Builder setCounters( + int index, org.powerapi.module.libpfm.PayloadProtocol.MapEntry.Builder builderForValue) { + if (countersBuilder_ == null) { + ensureCountersIsMutable(); + counters_.set(index, builderForValue.build()); + onChanged(); + } else { + countersBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .MapEntry counters = 5; + */ + public Builder addCounters(org.powerapi.module.libpfm.PayloadProtocol.MapEntry value) { + if (countersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCountersIsMutable(); + counters_.add(value); + onChanged(); + } else { + countersBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .MapEntry counters = 5; + */ + public Builder addCounters( + int index, org.powerapi.module.libpfm.PayloadProtocol.MapEntry value) { + if (countersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCountersIsMutable(); + counters_.add(index, value); + onChanged(); + } else { + countersBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .MapEntry counters = 5; + */ + public Builder addCounters( + org.powerapi.module.libpfm.PayloadProtocol.MapEntry.Builder builderForValue) { + if (countersBuilder_ == null) { + ensureCountersIsMutable(); + counters_.add(builderForValue.build()); + onChanged(); + } else { + countersBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .MapEntry counters = 5; + */ + public Builder addCounters( + int index, org.powerapi.module.libpfm.PayloadProtocol.MapEntry.Builder builderForValue) { + if (countersBuilder_ == null) { + ensureCountersIsMutable(); + counters_.add(index, builderForValue.build()); + onChanged(); + } else { + countersBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .MapEntry counters = 5; + */ + public Builder addAllCounters( + java.lang.Iterable values) { + if (countersBuilder_ == null) { + ensureCountersIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, counters_); + onChanged(); + } else { + countersBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .MapEntry counters = 5; + */ + public Builder clearCounters() { + if (countersBuilder_ == null) { + counters_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + } else { + countersBuilder_.clear(); + } + return this; + } + /** + * repeated .MapEntry counters = 5; + */ + public Builder removeCounters(int index) { + if (countersBuilder_ == null) { + ensureCountersIsMutable(); + counters_.remove(index); + onChanged(); + } else { + countersBuilder_.remove(index); + } + return this; + } + /** + * repeated .MapEntry counters = 5; + */ + public org.powerapi.module.libpfm.PayloadProtocol.MapEntry.Builder getCountersBuilder( + int index) { + return getCountersFieldBuilder().getBuilder(index); + } + /** + * repeated .MapEntry counters = 5; + */ + public org.powerapi.module.libpfm.PayloadProtocol.MapEntryOrBuilder getCountersOrBuilder( + int index) { + if (countersBuilder_ == null) { + return counters_.get(index); } else { + return countersBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .MapEntry counters = 5; + */ + public java.util.List + getCountersOrBuilderList() { + if (countersBuilder_ != null) { + return countersBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(counters_); + } + } + /** + * repeated .MapEntry counters = 5; + */ + public org.powerapi.module.libpfm.PayloadProtocol.MapEntry.Builder addCountersBuilder() { + return getCountersFieldBuilder().addBuilder( + org.powerapi.module.libpfm.PayloadProtocol.MapEntry.getDefaultInstance()); + } + /** + * repeated .MapEntry counters = 5; + */ + public org.powerapi.module.libpfm.PayloadProtocol.MapEntry.Builder addCountersBuilder( + int index) { + return getCountersFieldBuilder().addBuilder( + index, org.powerapi.module.libpfm.PayloadProtocol.MapEntry.getDefaultInstance()); + } + /** + * repeated .MapEntry counters = 5; + */ + public java.util.List + getCountersBuilderList() { + return getCountersFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.powerapi.module.libpfm.PayloadProtocol.MapEntry, org.powerapi.module.libpfm.PayloadProtocol.MapEntry.Builder, org.powerapi.module.libpfm.PayloadProtocol.MapEntryOrBuilder> + getCountersFieldBuilder() { + if (countersBuilder_ == null) { + countersBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.powerapi.module.libpfm.PayloadProtocol.MapEntry, org.powerapi.module.libpfm.PayloadProtocol.MapEntry.Builder, org.powerapi.module.libpfm.PayloadProtocol.MapEntryOrBuilder>( + counters_, + ((bitField0_ & 0x00000010) == 0x00000010), + getParentForChildren(), + isClean()); + counters_ = null; + } + return countersBuilder_; + } + + private com.google.protobuf.LazyStringList traces_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureTracesIsMutable() { + if (!((bitField0_ & 0x00000020) == 0x00000020)) { + traces_ = new com.google.protobuf.LazyStringArrayList(traces_); + bitField0_ |= 0x00000020; + } + } + /** + * repeated string traces = 6; + */ + public com.google.protobuf.ProtocolStringList + getTracesList() { + return traces_.getUnmodifiableView(); + } + /** + * repeated string traces = 6; + */ + public int getTracesCount() { + return traces_.size(); + } + /** + * repeated string traces = 6; + */ + public java.lang.String getTraces(int index) { + return traces_.get(index); + } + /** + * repeated string traces = 6; + */ + public com.google.protobuf.ByteString + getTracesBytes(int index) { + return traces_.getByteString(index); + } + /** + * repeated string traces = 6; + */ + public Builder setTraces( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureTracesIsMutable(); + traces_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string traces = 6; + */ + public Builder addTraces( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureTracesIsMutable(); + traces_.add(value); + onChanged(); + return this; + } + /** + * repeated string traces = 6; + */ + public Builder addAllTraces( + java.lang.Iterable values) { + ensureTracesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, traces_); + onChanged(); + return this; + } + /** + * repeated string traces = 6; + */ + public Builder clearTraces() { + traces_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + return this; + } + /** + * repeated string traces = 6; + */ + public Builder addTracesBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureTracesIsMutable(); + traces_.add(value); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:Payload) + } + + static { + defaultInstance = new Payload(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:Payload) + } + + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_MapEntry_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_MapEntry_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_Payload_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_Payload_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\rpayload.proto\"&\n\010MapEntry\022\013\n\003key\030\001 \002(\t" + + "\022\r\n\005value\030\002 \002(\004\"q\n\007Payload\022\014\n\004core\030\001 \002(\r" + + "\022\013\n\003pid\030\002 \002(\r\022\013\n\003tid\030\003 \002(\r\022\021\n\ttimestamp\030" + + "\004 \002(\004\022\033\n\010counters\030\005 \003(\0132\t.MapEntry\022\016\n\006tr" + + "aces\030\006 \003(\tB-\n\032org.powerapi.module.libpfm" + + "B\017PayloadProtocol" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }, assigner); + internal_static_MapEntry_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_MapEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_MapEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_Payload_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_Payload_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_Payload_descriptor, + new java.lang.String[] { "Core", "Pid", "Tid", "Timestamp", "Counters", "Traces", }); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/powerapi-core/src/main/java/org/powerapi/reporter/mwg/MwgReporter.java b/powerapi-core/src/main/java/org/powerapi/reporter/mwg/MwgReporter.java new file mode 100644 index 0000000..b8225bd --- /dev/null +++ b/powerapi-core/src/main/java/org/powerapi/reporter/mwg/MwgReporter.java @@ -0,0 +1,269 @@ +package org.powerapi.reporter.mwg; + +import org.mwg.*; +import org.mwg.core.utility.CoreDeferCounterSync; +import org.mwg.ml.MLPlugin; +import org.mwg.ml.algorithm.regression.PolynomialNode; +import org.mwg.task.Action; +import org.mwg.task.Task; +import org.mwg.task.TaskContext; +import org.powerapi.PowerDisplay; +import org.powerapi.module.PowerChannel; +import org.powerapi.module.libpfm.AgentTick; +import org.powerapi.module.libpfm.PCInterruptionChannel; +import org.powerapi.reporter.mwg.model.*; +import org.powerapi.reporter.mwg.model.Thread; +import org.powerapi.reporter.mwg.plugins.NodeAggregatorPlugin; +import org.powerapi.reporter.mwg.plugins.PolynomialAggregatorNode; +import scala.collection.Seq; + +import static org.mwg.task.Actions.*; + +public class MwgReporter implements PowerDisplay { + + private final Graph graph; + private final String measurement; + + + //Task data + //var to set at the beginning, i.e. before run the tasks + private final String methodName = "methodName"; + private final String threadID = "threadID"; + private final String cpuID = "cpuID"; + private final String timeStamp = "timeStamp"; + private final String powerValue = "powerValue"; + private final String diskValue = "diskValue"; + + private final Task finalTask; + + + /** + * + * @param dbPath ws URL (and port) to server + * @param measurement + */ + public MwgReporter(String dbPath, String measurement) { + GraphBuilder graphBuilder = new GraphBuilder() + .withStorage(new WSClient(dbPath)) + .withPlugin(new MLPlugin()) + .withPlugin(new NodeAggregatorPlugin()) + .withMemorySize(10000); + graph = graphBuilder.build(); + this.measurement = measurement; + + finalTask = newTask(); + initTasks(); + } + + private void initTasks() { + //node store in (global) var + final String powerNode = "powerNode"; + final String diskNode = "diskNode"; + final String measurementNode = "measurementNode"; + final String methodNode = "methodNode"; + final String threadNode = "threadNode"; + + Task createSoftMeasured = + inject(measurement) + .asVar("measurementId") + .newNode() + .setProperty(Measurement.ATT_MEASUREMENTID, Measurement.ATT_MEASUREMENTID_TYPE,measurement) + .indexNode(Measurement.IDX_MEASUREMENT,Measurement.ATT_MEASUREMENTID); + Task getOrCreateSoftMeasured = + fromIndex(Measurement.IDX_MEASUREMENT,Measurement.ATT_MEASUREMENTID + "=" + measurement) + .ifThen(context -> context.resultAsNodes().size() == 0,createSoftMeasured) + .asGlobalVar(measurementNode); + + Task createMethod = + newNode() + .asGlobalVar(methodNode) + .setProperty(Method.ATT_NAME,Method.ATT_NAME_TYPE,"{{" + methodName + "}}") + .setProperty(Debug.ATT_ID,Debug.ATT_ID_TYPE,(Debug.nextIndex++) + "") + .indexNode(Debug.IDX_DEBUG,Debug.ATT_ID + "") + .fromVar(measurementNode) + .localIndex(Measurement.IDX_REL_METHOD,Method.ATT_NAME,methodNode) + .fromVar(methodNode); + Task createPowerNode = + newTypedNode(Power.NODE_TYPE) + .asGlobalVar(powerNode) + .fromVar(methodNode) + .add(Method.REL_POWER, powerNode) + .fromVar(powerNode); + Task getOrCreatePower = + fromVar(methodNode) + .traverse(Method.REL_POWER) + .ifThen(context -> context.result().size() == 0, createPowerNode) + .asGlobalVar(powerNode); + Task createDiskNode = + newTypedNode(Power.NODE_TYPE) + .asGlobalVar(diskNode) + .fromVar(methodNode) + .add(Method.REL_DISK,diskNode) + .fromVar(diskNode); + Task getOrCreateDisk = + fromVar(methodNode) + .traverse(Method.REL_DISK) + .ifThen(context -> context.result().size() == 0, createDiskNode) + .asGlobalVar(diskNode); + Task getOrCreateMethod = + fromVar(measurementNode) + .traverseIndex(Measurement.IDX_REL_METHOD, Method.ATT_NAME + "=" + "{{" + methodName + "}}") + .ifThen(context -> context.result().size() == 0,createMethod) + .asGlobalVar(methodNode) + .subTask(getOrCreatePower) + .subTask(getOrCreateDisk) + .fromVar(methodNode); + + final String measureCPUNode = "measureCpuNode"; + final String measureDiskNode = "measureDiskNode"; + Task createThread = + newNode() + .setProperty(Thread.ATT_THREADID, Thread.ATT_THREADID_TYPE,"{{" + threadID + "}}") + .setProperty(Debug.ATT_ID,Debug.ATT_ID_TYPE,(Debug.nextIndex++) + "") + .indexNode(Debug.IDX_DEBUG,Debug.ATT_ID + "") + .asGlobalVar(threadNode) + //add threadNode in Method.IDX_REL_THREAD relation + .fromVar(methodNode) + .localIndex(Method.IDX_REL_THREAD,Thread.ATT_THREADID,threadNode) + //create Polynomial node for cpu + .newTypedNode(Measure.NODE_TYPE) + .asVar(measureCPUNode) + .fromVar(threadNode) + .add(Thread.REL_CPU_MEASURE,measureCPUNode) + //add measureNode in 'child' relation of PowerNode + .fromVar(powerNode) + .add(PolynomialAggregatorNode.REL_CHILD,measureCPUNode) + //create polynomial node for disk + .newTypedNode(Measure.NODE_TYPE) + .asVar(measureDiskNode) + .fromVar(threadNode) + .add(Thread.REL_DISK_MEASURE,measureDiskNode) + //add diskNode in 'child' relation of DiskNode + .fromVar(diskNode) + .add(PolynomialAggregatorNode.REL_CHILD,measureDiskNode) + .fromVar(threadNode); + Task getOrCreateThread = + fromVar(methodNode) + .traverseIndex(Method.IDX_REL_THREAD, Thread.ATT_THREADID + "=" + "{{" + threadID + "}}") + .ifThen(context -> context.result().size() == 0, createThread) + .asGlobalVar(threadNode); + + + Task updateCpuIfNeeded = + get(Thread.ATT_CPU_ID) + .ifThen(context -> (context.result().size() == 0 || !context.result().get(0).equals(cpuID)), + fromVar(threadNode) + .jump("{{" + timeStamp + "}}") + .setProperty(Thread.ATT_CPU_ID,Thread.ATT_CPU_ID_TYPE,cpuID)); + + Task addCPUValue = + fromVar(threadNode) + .traverse(Thread.REL_CPU_MEASURE) + .jump("{{" + timeStamp + "}}") + .setProperty(PolynomialNode.VALUE, Type.DOUBLE,"{{" + powerValue + "}}"); + + Task addDiskValue = + fromVar(threadNode) + .traverse(Thread.REL_DISK_MEASURE) + .jump("{{" + timeStamp + "}}") + .setProperty(PolynomialNode.VALUE,Type.DOUBLE,"{{" + diskValue + "}}"); + + finalTask + .setTime("{{" + timeStamp + "}}") + .setWorld("0") + .subTask(getOrCreateSoftMeasured) + .subTask(getOrCreateMethod) + .subTask(getOrCreateThread) + .subTask(updateCpuIfNeeded) + .subTask(addCPUValue) + .subTask(addDiskValue) + .save(); + } + + + public void connect() { + CoreDeferCounterSync counter = new CoreDeferCounterSync(1); + graph.connect(new Callback() { + @Override + public void on(Boolean succeed) { + if(!succeed) { + throw new RuntimeException("Error during graph connection."); + } + counter.count(); + } + }); + counter.waitResult(); + } + + public void disconnect() { + CoreDeferCounterSync counter = new CoreDeferCounterSync(1); + graph.disconnect(new Callback() { + @Override + public void on(Boolean succeed) { + if(!succeed) { + throw new RuntimeException("Error during graph disconnection."); + } + counter.count(); + System.out.println("Disconnected."); + } + }); + + counter.waitResult(); + } + + public Graph getGraph() { + return graph; + } + + + + @Override + public void display(PowerChannel.AggregatePowerReport aggregatePowerReport) { + //sum tick is they are agent + Seq seqRawPower = aggregatePowerReport.rawPowers(); + scala.collection.Iterator itAgent = seqRawPower.toIterator(); + PowerChannel.RawPowerReport rawPowerReport; + + + double disk = 0; + while(itAgent.hasNext()) { + rawPowerReport = itAgent.next(); + if(rawPowerReport.tick() instanceof AgentTick) { + disk = disk + rawPowerReport.power().toWatts(); + } + } + + scala.collection.Iterator it = seqRawPower.toIterator(); + while(it.hasNext()) { + rawPowerReport = it.next(); + if(rawPowerReport.tick() instanceof PCInterruptionChannel.InterruptionTick) { + PCInterruptionChannel.InterruptionTick tick = (PCInterruptionChannel.InterruptionTick) rawPowerReport.tick(); + + then(new Action() { + @Override + public void eval(TaskContext taskContext) { + System.out.println("Task started"); + taskContext.continueTask(); + } + }) + .inject(tick.fullMethodName()) + .asGlobalVar(methodName) + .inject(tick.tid().toString()) + .asGlobalVar(threadID) + .inject(tick.cpu()) + .asGlobalVar(cpuID) + .inject(tick.timestamp()) + .asGlobalVar(timeStamp) + .inject(rawPowerReport.power().toWatts()) + .asGlobalVar(powerValue) + .inject(disk) + .asGlobalVar(diskValue) + .subTask(finalTask) + .executeSync(graph); + } + } + + + + } +} diff --git a/powerapi-core/src/main/java/org/powerapi/reporter/mwg/model/Debug.java b/powerapi-core/src/main/java/org/powerapi/reporter/mwg/model/Debug.java new file mode 100644 index 0000000..a2618cc --- /dev/null +++ b/powerapi-core/src/main/java/org/powerapi/reporter/mwg/model/Debug.java @@ -0,0 +1,15 @@ +package org.powerapi.reporter.mwg.model; + +import org.mwg.Type; + +/** + * Created by ludovicmouline on 08/09/16. + */ +public class Debug { + //tempory index used for debugguer + public static final String IDX_DEBUG = "Debug.idx_debug"; + public static final String ATT_ID = "id"; + public static final byte ATT_ID_TYPE = Type.INT; + + public static int nextIndex = 0; +} diff --git a/powerapi-core/src/main/java/org/powerapi/reporter/mwg/model/Measure.java b/powerapi-core/src/main/java/org/powerapi/reporter/mwg/model/Measure.java new file mode 100644 index 0000000..0e912a9 --- /dev/null +++ b/powerapi-core/src/main/java/org/powerapi/reporter/mwg/model/Measure.java @@ -0,0 +1,8 @@ +package org.powerapi.reporter.mwg.model; + + +import org.mwg.ml.algorithm.regression.PolynomialNode; + +public class Measure { + public static final String NODE_TYPE = PolynomialNode.NAME; +} diff --git a/powerapi-core/src/main/java/org/powerapi/reporter/mwg/model/Measurement.java b/powerapi-core/src/main/java/org/powerapi/reporter/mwg/model/Measurement.java new file mode 100644 index 0000000..3adf73c --- /dev/null +++ b/powerapi-core/src/main/java/org/powerapi/reporter/mwg/model/Measurement.java @@ -0,0 +1,12 @@ +package org.powerapi.reporter.mwg.model; + +import org.mwg.Type; + +public class Measurement { + public static final String IDX_MEASUREMENT = "Measurement.measurementID"; + + public static final String ATT_MEASUREMENTID = "measurementID"; + public static final byte ATT_MEASUREMENTID_TYPE = Type.STRING; + + public static final String IDX_REL_METHOD = "method"; +} diff --git a/powerapi-core/src/main/java/org/powerapi/reporter/mwg/model/Method.java b/powerapi-core/src/main/java/org/powerapi/reporter/mwg/model/Method.java new file mode 100644 index 0000000..d30fa00 --- /dev/null +++ b/powerapi-core/src/main/java/org/powerapi/reporter/mwg/model/Method.java @@ -0,0 +1,15 @@ +package org.powerapi.reporter.mwg.model; + + +import org.mwg.Type; + +public class Method { + public static final String ATT_NAME = "name"; + public static final byte ATT_NAME_TYPE = Type.STRING; + + public static final String IDX_REL_THREAD = "thread"; + + public static final String REL_POWER = "power"; + public static final String REL_DISK = "disk"; + +} diff --git a/powerapi-core/src/main/java/org/powerapi/reporter/mwg/model/Power.java b/powerapi-core/src/main/java/org/powerapi/reporter/mwg/model/Power.java new file mode 100644 index 0000000..2964cd8 --- /dev/null +++ b/powerapi-core/src/main/java/org/powerapi/reporter/mwg/model/Power.java @@ -0,0 +1,7 @@ +package org.powerapi.reporter.mwg.model; + +import org.powerapi.reporter.mwg.plugins.PolynomialAggregatorNode; + +public class Power { + public static final String NODE_TYPE = PolynomialAggregatorNode.NAME; +} diff --git a/powerapi-core/src/main/java/org/powerapi/reporter/mwg/model/Thread.java b/powerapi-core/src/main/java/org/powerapi/reporter/mwg/model/Thread.java new file mode 100644 index 0000000..59d1dad --- /dev/null +++ b/powerapi-core/src/main/java/org/powerapi/reporter/mwg/model/Thread.java @@ -0,0 +1,15 @@ +package org.powerapi.reporter.mwg.model; + +import org.mwg.Type; + +public class Thread { + public static final String ATT_THREADID = "threadID"; + public static final byte ATT_THREADID_TYPE = Type.STRING; + + public static final String ATT_CPU_ID = "cpu"; + public static final byte ATT_CPU_ID_TYPE = Type.STRING; + + public static final String REL_CPU_MEASURE = "cpuMeasure"; + public static final String REL_DISK_MEASURE = "diskMeasure"; + +} diff --git a/powerapi-core/src/main/scala/org/powerapi/PowerMeter.scala b/powerapi-core/src/main/scala/org/powerapi/PowerMeter.scala index df8a42b..3c6a2e3 100644 --- a/powerapi-core/src/main/scala/org/powerapi/PowerMeter.scala +++ b/powerapi-core/src/main/scala/org/powerapi/PowerMeter.scala @@ -150,7 +150,7 @@ class PowerMeter(factory: ActorRefFactory, modules: Seq[PowerModule]) extends Po * @param duration duration to wait for. * @return the instance of the underlying software power meter. */ - def waitFor(duration: FiniteDuration): this.type = { + def waitFor(duration: FiniteDuration): PowerMeter = { Await.result(underlyingActor.ask(WaitForMessage(duration))(duration + 1L.seconds), duration + 1L.seconds) this } @@ -198,6 +198,12 @@ trait PowerModule { * @author Romain Rouvoy */ trait PowerMonitoring { + + /** + * Internal event bus. + */ + def eventBus: MessageBus + /** * Unique ID */ @@ -206,35 +212,35 @@ trait PowerMonitoring { /** * Change the aggregation function to apply on raw power reports. */ - def apply(aggregator: Seq[Power] => Power): this.type + def apply(aggregator: Seq[Power] => Power): PowerMonitoring /** * Change frequency when periodic ticks are internally created by a clock. */ - def every(frequency: FiniteDuration): this.type + def every(frequency: FiniteDuration): PowerMonitoring /** * Configure the power display to use for rendering power estimation. */ - def to(output: PowerDisplay): this.type + def to(output: PowerDisplay): PowerMonitoring - def to(reference: ActorRef): this.type + def to(reference: ActorRef): PowerMonitoring - def to(reference: ActorRef, subscribeMethod: MessageBus => ActorRef => Unit): this.type + def to(reference: ActorRef, subscribeMethod: MessageBus => ActorRef => Unit): PowerMonitoring /** * Remove the power display used for rendering power estimation. */ - def unto(output: PowerDisplay): this.type + def unto(output: PowerDisplay): PowerMonitoring - def unto(reference: ActorRef): this.type + def unto(reference: ActorRef): PowerMonitoring - def unto(reference: ActorRef, unsubscribeMethod: MessageBus => ActorRef => Unit): this.type + def unto(reference: ActorRef, unsubscribeMethod: MessageBus => ActorRef => Unit): PowerMonitoring /** * Cancel the subscription and stop the associated monitoring. */ - def cancel() + def cancel(): Unit } /** diff --git a/powerapi-core/src/main/scala/org/powerapi/core/Component.scala b/powerapi-core/src/main/scala/org/powerapi/core/Component.scala index 37eb6b2..2424205 100644 --- a/powerapi-core/src/main/scala/org/powerapi/core/Component.scala +++ b/powerapi-core/src/main/scala/org/powerapi/core/Component.scala @@ -72,6 +72,6 @@ class GuardianFailureStrategy extends SupervisorStrategyConfigurator { } def handleFailure: PartialFunction[Throwable, Directive] = { - case _: UnsupportedOperationException => Resume + case _ => Resume } } diff --git a/powerapi-core/src/main/scala/org/powerapi/core/MonitorActors.scala b/powerapi-core/src/main/scala/org/powerapi/core/MonitorActors.scala index 5562eeb..f85d187 100644 --- a/powerapi-core/src/main/scala/org/powerapi/core/MonitorActors.scala +++ b/powerapi-core/src/main/scala/org/powerapi/core/MonitorActors.scala @@ -257,7 +257,7 @@ class Monitors(eventBus: MessageBus) extends MonitorConfiguration with Superviso /** * This class acts like a mirror for interacting with the event bus. */ -class Monitor(eventBus: MessageBus) extends PowerMonitoring { +class Monitor(val eventBus: MessageBus) extends PowerMonitoring { val muid = UUID.randomUUID() def apply(aggregator: Seq[Power] => Power): this.type = { diff --git a/powerapi-core/src/main/scala/org/powerapi/core/OSHelper.scala b/powerapi-core/src/main/scala/org/powerapi/core/OSHelper.scala index 18bbf1c..ca704d1 100644 --- a/powerapi-core/src/main/scala/org/powerapi/core/OSHelper.scala +++ b/powerapi-core/src/main/scala/org/powerapi/core/OSHelper.scala @@ -419,7 +419,7 @@ class LinuxHelper extends Configuration(None) with OSHelper { } def attachToCGroup(subsystem: String, name: String, toAttach: String): Unit = { - Seq("cgclassify", "-g", s"$subsystem:/$name", s"$toAttach").! + Seq("cgclassify", "-g", s"$subsystem:/$name", "--sticky", s"$toAttach").! } def deleteCGroup(subsystem: String, name: String): Unit = { diff --git a/powerapi-core/src/main/scala/org/powerapi/module/PowerChannel.scala b/powerapi-core/src/main/scala/org/powerapi/module/PowerChannel.scala index 3ba7f3f..91b5020 100644 --- a/powerapi-core/src/main/scala/org/powerapi/module/PowerChannel.scala +++ b/powerapi-core/src/main/scala/org/powerapi/module/PowerChannel.scala @@ -122,6 +122,8 @@ object PowerChannel extends Channel { reports += powerReport } + def rawPowers: Seq[RawPowerReport] = reports + def size: Int = reports.size def targets: Set[Target] = reports.map(_.target).toSet diff --git a/powerapi-core/src/main/scala/org/powerapi/module/extpowermeter/rapl/RAPLHelper.scala b/powerapi-core/src/main/scala/org/powerapi/module/extpowermeter/rapl/RAPLHelper.scala index 9f8dfb4..eb2d7ec 100644 --- a/powerapi-core/src/main/scala/org/powerapi/module/extpowermeter/rapl/RAPLHelper.scala +++ b/powerapi-core/src/main/scala/org/powerapi/module/extpowermeter/rapl/RAPLHelper.scala @@ -27,9 +27,10 @@ import java.nio.channels.FileChannel import java.nio.{ByteBuffer, ByteOrder} import scala.sys.process.stringSeqToProcess - import org.apache.logging.log4j.LogManager +import scala.io.Source + /** * Collecting energy information contained into RAPL registers (MSR) * and providing the CPU energy. @@ -94,7 +95,7 @@ class RAPLHelper(msrPath: String, cpuInfoPath: String, supportedArchis: Map[Int, } private def detectCpu: Boolean = { - val source = io.Source.fromFile(cpuInfoPath).getLines + val source = Source.fromFile(cpuInfoPath).getLines source.find(l => l.startsWith("vendor_id") && l.endsWith("GenuineIntel")) match { case Some(_) => source.find(l => l.startsWith("cpu family") && l.endsWith("6")) match { case Some(_) => source.find(_.startsWith("model")) match { diff --git a/powerapi-core/src/main/scala/org/powerapi/module/libpfm/LibpfmCoreSensor.scala b/powerapi-core/src/main/scala/org/powerapi/module/libpfm/LibpfmCoreSensor.scala index 8e9c455..62345af 100644 --- a/powerapi-core/src/main/scala/org/powerapi/module/libpfm/LibpfmCoreSensor.scala +++ b/powerapi-core/src/main/scala/org/powerapi/module/libpfm/LibpfmCoreSensor.scala @@ -38,7 +38,7 @@ import org.powerapi.module.Sensor import org.powerapi.module.libpfm.PerformanceCounterChannel.{HWCounter, LibpfmPickerStop, formatLibpfmCoreSensorChildName, publishPCReport} /** - * Libpfm sensor component that collects metrics with libpfm at a core level. + * Libpfm sensor component which collects metrics with libpfm at a core level. * * @author Maxime Colmant */ diff --git a/powerapi-core/src/main/scala/org/powerapi/module/libpfm/LibpfmHelper.scala b/powerapi-core/src/main/scala/org/powerapi/module/libpfm/LibpfmHelper.scala index 79d7379..619eb5b 100644 --- a/powerapi-core/src/main/scala/org/powerapi/module/libpfm/LibpfmHelper.scala +++ b/powerapi-core/src/main/scala/org/powerapi/module/libpfm/LibpfmHelper.scala @@ -40,7 +40,9 @@ import perfmon2.libpfm.{LibpfmLibrary, perf_event_attr, pfm_event_attr_info_t, p */ trait Identifier -case class TID(identifier: Int) extends Identifier +case class TID(identifier: Int) extends Identifier { + override def toString: String = s"$identifier" +} case class CID(core: Int) extends Identifier diff --git a/powerapi-core/src/main/scala/org/powerapi/module/libpfm/LibpfmInterruptionCoreModule.scala b/powerapi-core/src/main/scala/org/powerapi/module/libpfm/LibpfmInterruptionCoreModule.scala new file mode 100644 index 0000000..8e34741 --- /dev/null +++ b/powerapi-core/src/main/scala/org/powerapi/module/libpfm/LibpfmInterruptionCoreModule.scala @@ -0,0 +1,46 @@ +/* + * This software is licensed under the GNU Affero General Public License, quoted below. + * + * This file is a part of PowerAPI. + * + * Copyright (C) 2011-2016 Inria, University of Lille 1. + * + * PowerAPI is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of + * the License, or (at your option) any later version. + * + * PowerAPI is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with PowerAPI. + * + * If not, please consult http://www.gnu.org/licenses/agpl-3.0.html. + */ +package org.powerapi.module.libpfm + +import scala.concurrent.duration.FiniteDuration + +import org.powerapi.PowerModule +import org.powerapi.module.libpfm.cycles.{LibpfmCoreCyclesFormulaConfiguration, LibpfmInterruptionCoreCyclesFormula} + +class LibpfmInterruptionCoreModule(topology: Map[Int, Set[Int]], events: Set[String], + cyclesThreadName: String, cyclesRefName: String, pModel: Map[Double, List[Double]], samplingInterval: FiniteDuration) extends PowerModule { + + val sensor = Some((classOf[LibpfmInterruptionCoreSensor], Seq(topology, events))) + val formula = Some((classOf[LibpfmInterruptionCoreCyclesFormula], Seq(cyclesThreadName, cyclesRefName, pModel, samplingInterval))) +} + +object LibpfmInterruptionCoreModule { + def apply(prefixConfig: Option[String] = None): LibpfmInterruptionCoreModule = { + val coreSensorConfig = new LibpfmCoreSensorConfiguration(prefixConfig) + val coreCyclesFormulaConfig = new LibpfmCoreCyclesFormulaConfiguration(prefixConfig) + + new LibpfmInterruptionCoreModule(coreSensorConfig.topology, coreSensorConfig.events, + coreCyclesFormulaConfig.cyclesThreadName, coreCyclesFormulaConfig.cyclesRefName, + coreCyclesFormulaConfig.formulae, coreCyclesFormulaConfig.samplingInterval) + } +} diff --git a/powerapi-core/src/main/scala/org/powerapi/module/libpfm/LibpfmInterruptionCoreSensor.scala b/powerapi-core/src/main/scala/org/powerapi/module/libpfm/LibpfmInterruptionCoreSensor.scala new file mode 100644 index 0000000..aa322f9 --- /dev/null +++ b/powerapi-core/src/main/scala/org/powerapi/module/libpfm/LibpfmInterruptionCoreSensor.scala @@ -0,0 +1,95 @@ +/* + * This software is licensed under the GNU Affero General Public License, quoted below. + * + * This file is a part of PowerAPI. + * + * Copyright (C) 2011-2016 Inria, University of Lille 1. + * + * PowerAPI is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of + * the License, or (at your option) any later version. + * + * PowerAPI is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with PowerAPI. + * + * If not, please consult http://www.gnu.org/licenses/agpl-3.0.html. + */ +package org.powerapi.module.libpfm + +import java.util.UUID + +import collection.JavaConversions._ + +import akka.actor.{PoisonPill, Actor} + +import org.powerapi.core.{Tick, MessageBus} +import org.powerapi.core.MonitorChannel.{MonitorTick, subscribeMonitorTick, unsubscribeMonitorTick} +import org.powerapi.core.target.{Process, Target} +import org.powerapi.module.Sensor +import org.powerapi.module.libpfm.PCInterruptionChannel.{InterruptionHWCounter, InterruptionPCWrapper, publishInterruptionPCReport} +import org.powerapi.module.libpfm.PayloadProtocol.Payload + +/** + * Libpfm sensor component which used information sent by a PowerAPI agent. + * The PowerAPI agent uses the interruption mode to collect performance counters and stack traces. + * + * @author Maxime Colmant + */ +class LibpfmInterruptionCoreSensor(eventBus: MessageBus, muid: UUID, target: Target, topology: Map[Int, Set[Int]], events: Set[String]) + extends Sensor(eventBus, muid, target) { + + val combinations = { + for { + core: Int <- topology.keys + index: Int <- topology(core) + event: String <- events + } yield (core, index, event) + }.toParArray + + def init(): Unit = subscribeMonitorTick(muid, target)(eventBus)(self) + + def terminate(): Unit = unsubscribeMonitorTick(muid, target)(eventBus)(self) + + def handler: Actor.Receive = sense(Map()) + + // payloads: Map[cpu index -> Payload] + def sense(payloads: Map[Int, Payload]): Actor.Receive = { + case msg: MonitorTick if msg.tick.isInstanceOf[AgentTick] => + val tick = msg.tick.asInstanceOf[AgentTick] + + if (tick.payload.getTracesCount == 0) { + val currentPayloads = payloads.find(_._2.getTid == tick.payload.getTid) match { + case Some((core, _)) => payloads - core + case _ => payloads + } + + context.become(sense(currentPayloads) orElse sensorDefault) + } + + else { + val currentPayloads = (payloads.find(_._2.getTid == tick.payload.getTid) match { + case Some((core, _)) => payloads - core + case _ => payloads + }) + (tick.payload.getCore -> tick.payload) + + val allWrappers = for ((core, index, event) <- combinations if currentPayloads.contains(index)) yield { + val payload = currentPayloads(index) + val triggering = tick.payload.getTimestamp == payload.getTimestamp + val counter = InterruptionHWCounter(index, payload.getTid, payload.getTracesList.reverse.mkString("."), payload.getCountersList.filter(_.getKey == event).head.getValue, triggering) + InterruptionPCWrapper(core, event, List(counter)) + } + + publishInterruptionPCReport(muid, target, allWrappers.groupBy(wrapper => (wrapper.core, wrapper.event)).map { + case ((core, event), wrappers) => InterruptionPCWrapper(core, event, wrappers.flatMap(_.values).toList) + }.toList, new Tick { val topic = ""; val timestamp = tick.payload.getTimestamp })(eventBus) + + context.become(sense(currentPayloads) orElse sensorDefault) + } + } +} diff --git a/powerapi-core/src/main/scala/org/powerapi/module/libpfm/PCInterruptionChannel.scala b/powerapi-core/src/main/scala/org/powerapi/module/libpfm/PCInterruptionChannel.scala new file mode 100644 index 0000000..443c676 --- /dev/null +++ b/powerapi-core/src/main/scala/org/powerapi/module/libpfm/PCInterruptionChannel.scala @@ -0,0 +1,112 @@ +/* + * This software is licensed under the GNU Affero General Public License, quoted below. + * + * This file is a part of PowerAPI. + * + * Copyright (C) 2011-2016 Inria, University of Lille 1. + * + * PowerAPI is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of + * the License, or (at your option) any later version. + * + * PowerAPI is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with PowerAPI. + * + * If not, please consult http://www.gnu.org/licenses/agpl-3.0.html. + */ +package org.powerapi.module.libpfm + +import java.util.UUID + +import akka.actor.ActorRef +import org.powerapi.core.target.Target +import org.powerapi.core.{Channel, Message, MessageBus, Tick} +import org.powerapi.module.libpfm.PayloadProtocol.Payload + +case class AgentTick(topic: String, timestamp: Long, payload: Payload) extends Tick + +/** + * InterruptionChannel channel and messages. + * + * @author Maxime Colmant + */ +object PCInterruptionChannel extends Channel { + + type M = InterruptionPCReport + + /** + * Publish an InterruptionPCReport in the event bus. + */ + def publishInterruptionPCReport(muid: UUID, target: Target, wrappers: Seq[InterruptionPCWrapper], tick: Tick): MessageBus => Unit = { + publish(InterruptionPCReport(interruptionPCReportToTopic(muid, target), muid, target, wrappers, tick)) + } + + /** + * Used to subscribe to InterruptionPCReport on the right topic. + */ + def subscribeInterruptionPCReport(muid: UUID, target: Target): MessageBus => ActorRef => Unit = { + subscribe(interruptionPCReportToTopic(muid, target)) + } + + /** + * Used to unsubscribe to InterruptionPCReport on the right topic. + */ + def unsubscribeInterruptionPCReport(muid: UUID, target: Target): MessageBus => ActorRef => Unit = { + unsubscribe(interruptionPCReportToTopic(muid, target)) + } + + /** + * Used to format the topic used to interact with the FormulaChild actors. + */ + def interruptionPCReportToTopic(muid: UUID, target: Target): String = { + s"libpfm-interruption-sensor:$muid-$target" + } + + /** + * Extended tick to keep more information when an interruption occurs. + * + * @param cpu cpu id of the running core when the interruption was launched. + * @param tid thread id at the origin of the interruption. + * @param fullMethodName method name which is at the origin of the interruption. + * @param timestamp origin timestamp of the interruption (in nanoseconds). + * @param triggering is it the tick at the origin of the interruption? + */ + case class InterruptionTick(topic: String, + cpu: Int, + tid: TID, + fullMethodName: String, + timestamp: Long, + triggering: Boolean) extends Tick + + /** + * Internal message. + * One message per core/event. + */ + case class InterruptionPCWrapper(core: Int, event: String, values: Seq[InterruptionHWCounter]) + + /** + * Internal message used to wrap an hardware counter value received after an interruption. + */ + case class InterruptionHWCounter(cpu: Int, tid: Int, fullMethodName: String, value: Long, triggering: Boolean) + + /** + * InterruptionPCReport is represented as a dedicated type of message. + * + * @param topic subject used for routing the message. + * @param muid monitor unique identifier (MUID), which is at the origin of the report flow. + * @param target monitor target. + * @param wrappers performance counter wrappers. + * @param tick tick origin. + */ + case class InterruptionPCReport(topic: String, + muid: UUID, + target: Target, + wrappers: Seq[InterruptionPCWrapper], + tick: Tick) extends Message +} diff --git a/powerapi-core/src/main/scala/org/powerapi/module/libpfm/PerformanceCounterChannel.scala b/powerapi-core/src/main/scala/org/powerapi/module/libpfm/PerformanceCounterChannel.scala index 9cb335b..6a5240f 100644 --- a/powerapi-core/src/main/scala/org/powerapi/module/libpfm/PerformanceCounterChannel.scala +++ b/powerapi-core/src/main/scala/org/powerapi/module/libpfm/PerformanceCounterChannel.scala @@ -62,7 +62,7 @@ object PerformanceCounterChannel extends Channel { } /** - * Used to unsubscribe to PCReport on the rigth topic. + * Used to unsubscribe to PCReport on the right topic. */ def unsubscribePCReport(muid: UUID, target: Target): MessageBus => ActorRef => Unit = { unsubscribe(pcReportToTopic(muid, target)) diff --git a/powerapi-core/src/main/scala/org/powerapi/module/libpfm/cycles/LibpfmInterruptionCoreCyclesFormula.scala b/powerapi-core/src/main/scala/org/powerapi/module/libpfm/cycles/LibpfmInterruptionCoreCyclesFormula.scala new file mode 100644 index 0000000..298584f --- /dev/null +++ b/powerapi-core/src/main/scala/org/powerapi/module/libpfm/cycles/LibpfmInterruptionCoreCyclesFormula.scala @@ -0,0 +1,97 @@ +/* + * This software is licensed under the GNU Affero General Public License, quoted below. + * + * This file is a part of PowerAPI. + * + * Copyright (C) 2011-2016 Inria, University of Lille 1. + * + * PowerAPI is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of + * the License, or (at your option) any later version. + * + * PowerAPI is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with PowerAPI. + * + * If not, please consult http://www.gnu.org/licenses/agpl-3.0.html. + */ +package org.powerapi.module.libpfm.cycles + +import java.util.UUID + +import scala.concurrent.ExecutionContext.Implicits.global +import scala.concurrent.Future +import scala.concurrent.duration.FiniteDuration + +import akka.actor.Actor + +import org.powerapi.core.MessageBus +import org.powerapi.core.power._ +import org.powerapi.core.target.Target +import org.powerapi.module.Formula +import org.powerapi.module.PowerChannel.publishRawPowerReport +import org.powerapi.module.libpfm.PCInterruptionChannel.{InterruptionTick, InterruptionPCReport, subscribeInterruptionPCReport, unsubscribeInterruptionPCReport} +import org.powerapi.module.libpfm.TID + +/** + * Special implementation of power model, using two named events respectively, "unhalted cycles" and "reference cycles". + * The first one increases following the load, and the second one is used to compute running frequency of the CPU. + * One RawPower message is published per Thread ID, which corresponds to a running method). + * + * @author Maxime Colmant + */ +class LibpfmInterruptionCoreCyclesFormula(eventBus: MessageBus, muid: UUID, target: Target, cyclesThreadName: String, cyclesRefName: String, formulae: Map[Double, List[Double]], samplingInterval: FiniteDuration) + extends Formula(eventBus, muid, target) { + + def init(): Unit = subscribeInterruptionPCReport(muid, target)(eventBus)(self) + + def terminate(): Unit = unsubscribeInterruptionPCReport(muid, target)(eventBus)(self) + + def handler: Actor.Receive = compute(System.nanoTime()) + + def compute(old: Long): Actor.Receive = { + case msg: InterruptionPCReport => + val now = System.nanoTime() + + for ((_, wrappers) <- msg.wrappers.groupBy(_.core)) { + if (wrappers.count(_.event == cyclesThreadName) == 1 || wrappers.count(_.event == cyclesRefName) == 1) { + val cyclesThread = wrappers.filter(_.event == cyclesThreadName).head.values + val cyclesRef = wrappers.filter(_.event == cyclesRefName).head.values + + val cyclesVal = cyclesThread.map(_.value).sum + val scaledCycles = if (now - old <= 0) 0l else math.round(cyclesVal * (samplingInterval.toNanos / (now - old).toDouble)) + + val refsVal = cyclesRef.map(_.value).sum + val scaledRefs = if (now - old <= 0) 0l else math.round(refsVal * (samplingInterval.toNanos / (now - old).toDouble)) + + var coefficient: Double = math.round(scaledCycles / scaledRefs.toDouble) + + if (coefficient.isNaN || coefficient < formulae.keys.min) coefficient = formulae.keys.min + + if (coefficient > formulae.keys.max) coefficient = formulae.keys.max + + if (!formulae.contains(coefficient)) { + val coefficientsBefore = formulae.keys.filter(_ < coefficient) + coefficient = coefficientsBefore.max + } + + val formula = formulae(coefficient).updated(0, 0d) + val corePower = formula.zipWithIndex.foldLeft(0d)((acc, tuple) => acc + (tuple._1 * math.pow(scaledCycles, tuple._2))) + + cyclesThread.foreach { + case cycles => + val threadPower = corePower * (cycles.value / cyclesVal.toDouble) + val tick = InterruptionTick("", cycles.cpu, TID(cycles.tid), cycles.fullMethodName, msg.tick.timestamp, cycles.triggering) + publishRawPowerReport(muid, target, if (threadPower > 0) threadPower.W else 0.W, "cpu", tick)(eventBus) + } + + context.become(compute(now) orElse formulaDefault) + } + } + } +} diff --git a/powerapi-core/src/main/scala/org/powerapi/reporter/InfluxDisplay.scala b/powerapi-core/src/main/scala/org/powerapi/reporter/InfluxDisplay.scala index 96674a9..834b50c 100644 --- a/powerapi-core/src/main/scala/org/powerapi/reporter/InfluxDisplay.scala +++ b/powerapi-core/src/main/scala/org/powerapi/reporter/InfluxDisplay.scala @@ -27,14 +27,22 @@ import com.paulgoldbaum.influxdbclient.{InfluxDB, Point} import org.powerapi.PowerDisplay import org.powerapi.module.PowerChannel.AggregatePowerReport +import scala.concurrent.Await +import scala.concurrent.duration.DurationInt +import scala.concurrent.ExecutionContext.Implicits.global + /** * Write power information inside an InfluxDB database. */ class InfluxDisplay(host: String, port: Int, user: String, pwd: String, dbName: String, measurement: String) extends PowerDisplay { - + val timeout = 10.seconds val influxdb = InfluxDB.connect(host, port, user, pwd) val database = influxdb.selectDatabase(dbName) + if (!Await.result(database.exists(), timeout)) { + Await.result(database.create(), timeout) + } + def display(aggregatePowerReport: AggregatePowerReport): Unit = { val muid = aggregatePowerReport.muid val timestamp = aggregatePowerReport.ticks.map(_.timestamp).head diff --git a/powerapi-core/src/test/java/org/powerapi/reporter/mwg/MwgReporterTest.java b/powerapi-core/src/test/java/org/powerapi/reporter/mwg/MwgReporterTest.java new file mode 100644 index 0000000..a4f1ba9 --- /dev/null +++ b/powerapi-core/src/test/java/org/powerapi/reporter/mwg/MwgReporterTest.java @@ -0,0 +1,168 @@ +package org.powerapi.reporter.mwg; + +import org.mwg.Callback; +import org.mwg.Node; +import org.mwg.ml.algorithm.regression.PolynomialNode; +import org.powerapi.core.power.PowerConverter; +import org.powerapi.core.power.RawPower; +import org.powerapi.core.target.Application; +import org.powerapi.module.PowerChannel; +import org.powerapi.module.libpfm.AgentTick; +import org.powerapi.module.libpfm.PCInterruptionChannel; +import org.powerapi.module.libpfm.PayloadProtocol; +import org.powerapi.module.libpfm.TID; +import org.powerapi.reporter.mwg.model.Measurement; +import org.powerapi.reporter.mwg.model.Method; +import org.powerapi.reporter.mwg.model.Thread; +import org.powerapi.reporter.mwg.plugins.PolynomialAggregatorNode; +import scala.collection.mutable.Seq; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; + +public class MwgReporterTest { + + public static void main(String[] args) { + MwgReporter mwgReporter = new MwgReporter("ws://localhost:9876","testSoft"); + mwgReporter.connect(); + + final long timestamp1 = 12344L; + final long timestamp2 = 12345L; + final long timestamp3 = 12346L; + + + UUID uuid = UUID.randomUUID(); + PowerChannel.AggregatePowerReport report1 = new PowerChannel.AggregatePowerReport(uuid) { + @Override + public Seq rawPowers() { + List list = new ArrayList<>(); + + PowerChannel.RawPowerReport r1 = new PowerChannel.RawPowerReport("",uuid,new Application("testSoft"), + new RawPower(10, PowerConverter.WATTS()),"cpu",new PCInterruptionChannel.InterruptionTick("",0,new TID(10),"a.b.b",timestamp1,true)); + list.add(r1); + + PowerChannel.RawPowerReport r2 = new PowerChannel.RawPowerReport("",uuid,new Application("testSoft"), + new RawPower(20, PowerConverter.WATTS()),"disk",new AgentTick("",timestamp1, PayloadProtocol.Payload.newBuilder().setCore(0).setPid(10).setTid(10).setTimestamp(timestamp1).build())); + list.add(r2); + + return scala.collection.JavaConversions.asScalaBuffer(list).seq(); + } + }; + mwgReporter.display(report1); + + PowerChannel.AggregatePowerReport report2 = new PowerChannel.AggregatePowerReport(uuid) { + @Override + public Seq rawPowers() { + List list = new ArrayList<>(); + + PowerChannel.RawPowerReport r1 = new PowerChannel.RawPowerReport("",uuid,new Application("testSoft"), + new RawPower(11, PowerConverter.WATTS()),"cpu",new PCInterruptionChannel.InterruptionTick("",0,new TID(10),"a.b.b",timestamp2,false)); + list.add(r1); + + PowerChannel.RawPowerReport r2 = new PowerChannel.RawPowerReport("",uuid,new Application("testSoft"), + new RawPower(10, PowerConverter.WATTS()),"cpu",new PCInterruptionChannel.InterruptionTick("",1,new TID(11),"w.x.y",timestamp2,true)); + list.add(r2); + + PowerChannel.RawPowerReport r3 = new PowerChannel.RawPowerReport("",uuid,new Application("testSoft"), + new RawPower(8, PowerConverter.WATTS()),"disk",new AgentTick("",timestamp2, PayloadProtocol.Payload.newBuilder().setCore(1).setPid(11).setTid(11).setTimestamp(timestamp2).build())); + list.add(r3); + + return scala.collection.JavaConversions.asScalaBuffer(list).seq(); + } + }; + mwgReporter.display(report2); + + + PowerChannel.AggregatePowerReport report3 = new PowerChannel.AggregatePowerReport(uuid) { + @Override + public Seq rawPowers() { + List list = new ArrayList<>(); + + PowerChannel.RawPowerReport r1 = new PowerChannel.RawPowerReport("",uuid,new Application("testSoft"), + new RawPower(1, PowerConverter.WATTS()),"cpu",new PCInterruptionChannel.InterruptionTick("",0,new TID(12),"w.x.y",timestamp3,true)); + list.add(r1); + + PowerChannel.RawPowerReport r2 = new PowerChannel.RawPowerReport("",uuid,new Application("testSoft"), + new RawPower(1, PowerConverter.WATTS()),"disk",new AgentTick("",timestamp3, PayloadProtocol.Payload.newBuilder().setCore(0).setPid(12).setTid(12).setTimestamp(timestamp3).build())); + list.add(r2); + + return scala.collection.JavaConversions.asScalaBuffer(list).seq(); + } + }; + mwgReporter.display(report3); + + mwgReporter.getGraph().findAll(0, timestamp3, Measurement.IDX_MEASUREMENT, new Callback() { + @Override + public void on(Node[] measurement) { + System.out.println("receive answer"); + System.out.println(Arrays.toString(measurement)); + for(Node m : measurement) { + System.out.println("Measurement: " + m); + m.findAll(Measurement.IDX_REL_METHOD, new Callback() { + @Override + public void on(Node[] methods) { + + for (int i = 0; i < methods.length; i++) { + System.out.println("\tMethod: " + methods[i]); + methods[i].rel(Method.REL_POWER, new Callback() { + @Override + public void on(Node[] power) { + for (int p = 0; p < power.length; p++) { + System.out.println("\t\tPower value: " + power[p] + " => " + power[p].get(PolynomialAggregatorNode.ATT_VALUE)); + } + } + }); + + methods[i].rel(Method.REL_DISK, new Callback() { + @Override + public void on(Node[] disk) { + for (int d = 0; d < disk.length; d++) { + System.out.println("\t\tDisk value: " + disk[d] + " => " + disk[d].get(PolynomialAggregatorNode.ATT_VALUE)); + } + } + }); + + methods[i].findAll(Method.IDX_REL_THREAD, new Callback() { + @Override + public void on(Node[] thread) { + for (Node t : thread) { + System.out.println("\t\t\tThread: " + t); + t.rel(Thread.REL_CPU_MEASURE, new Callback() { + @Override + public void on(Node[] measure) { + for (Node m : measure) { + System.out.println("\t\t\t\tCpu Measure: " + m + " => " + m.get(PolynomialNode.VALUE)); + } + } + }); + + t.rel(Thread.REL_DISK_MEASURE, new Callback() { + @Override + public void on(Node[] measure) { + for (Node m : measure) { + System.out.println("\t\t\t\tDisk Measure: " + m + " => " + m.get(PolynomialNode.VALUE)); + } + } + }); + } + } + }); + } + } + }); + } + } + }); + + + Runtime.getRuntime().addShutdownHook(new java.lang.Thread(new Runnable() { + @Override + public void run() { + System.out.println("Disconnect...."); + mwgReporter.disconnect(); + } + })); + } +} diff --git a/powerapi-core/src/test/scala/org/powerapi/core/ComponentSuite.scala b/powerapi-core/src/test/scala/org/powerapi/core/ComponentSuite.scala index 214349e..dc023fd 100644 --- a/powerapi-core/src/test/scala/org/powerapi/core/ComponentSuite.scala +++ b/powerapi-core/src/test/scala/org/powerapi/core/ComponentSuite.scala @@ -107,15 +107,16 @@ class ComponentSuite extends UnitTest { expectMsgPF() { case Terminated(_) => () } })(system) - EventFilter[Exception]("crash", occurrences = 1, source = supervisor.path.toString).intercept({ + EventFilter.warning(occurrences = 1, source = supervisor.path.toString).intercept({ supervisor ! Props[TestChild] child = expectMsgClass(classOf[ActorRef]) - watch(child) child ! 42 child ! "state" expectMsg(42) child ! new Exception("crash") - expectMsgPF() { case t@Terminated(_) if t.existenceConfirmed => () } + child ! 52 + child ! "state" + expectMsg(52) })(system) } @@ -146,7 +147,7 @@ class ComponentSuite extends UnitTest { supervisor ! new UnsupportedOperationException("umh, not supported") })(system) - EventFilter[Exception]("crash", occurrences = 1, source = supervisor.path.toString).intercept({ + EventFilter.warning(occurrences = 1, source = supervisor.path.toString).intercept({ supervisor ! new Exception("crash") })(system) } diff --git a/powerapi-core/src/test/scala/org/powerapi/module/libpfm/LibpfmInterruptionCoreModulesSuite.scala b/powerapi-core/src/test/scala/org/powerapi/module/libpfm/LibpfmInterruptionCoreModulesSuite.scala new file mode 100644 index 0000000..04511e1 --- /dev/null +++ b/powerapi-core/src/test/scala/org/powerapi/module/libpfm/LibpfmInterruptionCoreModulesSuite.scala @@ -0,0 +1,99 @@ +/* + * This software is licensed under the GNU Affero General Public License, quoted below. + * + * This file is a part of PowerAPI. + * + * Copyright (C) 2011-2016 Inria, University of Lille 1. + * + * PowerAPI is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of + * the License, or (at your option) any later version. + * + * PowerAPI is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with PowerAPI. + * + * If not, please consult http://www.gnu.org/licenses/agpl-3.0.html. + */ +package org.powerapi.module.libpfm + +import scala.concurrent.duration.DurationInt + +import akka.util.Timeout + +import org.powerapi.UnitTest +import org.powerapi.module.libpfm.cycles.LibpfmInterruptionCoreCyclesFormula + +class LibpfmInterruptionCoreModulesSuite extends UnitTest { + + val timeout = Timeout(1.seconds) + + override def afterAll() = { + system.shutdown() + } + + "The LibpfmInterruptionCoreModule class" should "create the underlying classes (sensor/formula)" in { + val module = new LibpfmInterruptionCoreModule(Map(10 -> Set(10)), Set("e1"), "Threads", "Refs", Map(1d -> List(1d, 2d)), 10.milliseconds) + + module.sensor.get._1 should equal(classOf[LibpfmInterruptionCoreSensor]) + module.sensor.get._2.size should equal(2) + module.sensor.get._2(0) should equal(Map(10 -> Set(10))) + module.sensor.get._2(1) should equal(Set("e1")) + + module.formula.get._1 should equal(classOf[LibpfmInterruptionCoreCyclesFormula]) + module.formula.get._2.size should equal(4) + module.formula.get._2(0) should equal("Threads") + module.formula.get._2(1) should equal("Refs") + module.formula.get._2(2) should equal(Map(1d -> List(1d, 2d))) + module.formula.get._2(3) should equal(10.milliseconds) + } + + "The LibpfmInterruptionCoreModule object" should "build correctly the companion class" in { + val module1 = LibpfmInterruptionCoreModule() + val module2 = LibpfmInterruptionCoreModule(Some("libpfm")) + + val formulae = Map[Double, List[Double]]( + 12d -> List(85.7545270697, 1.10006565433e-08, -2.0341944068e-18), + 13d -> List(87.0324917754, 9.03486530986e-09, -1.31575869787e-18), + 14d -> List(86.3094440375, 1.04895773556e-08, -1.61982669617e-18), + 15d -> List(88.2194900717, 8.71468661777e-09, -1.12354133527e-18), + 16d -> List(85.8010062547, 1.05239105674e-08, -1.34813984791e-18), + 17d -> List(85.5127064474, 1.05732955159e-08, -1.28040830962e-18), + 18d -> List(85.5593567382, 1.07921513277e-08, -1.22419197787e-18), + 19d -> List(87.2004521609, 9.99728883739e-09, -9.9514346029e-19), + 20d -> List(87.7358230435, 1.00553994023e-08, -1.00002335486e-18), + 21d -> List(94.4635683042, 4.83140424765e-09, 4.25218895447e-20), + 22d -> List(104.356371072, 3.75414807806e-09, 6.73289818651e-20) + ) + + module1.sensor.get._1 should equal(classOf[LibpfmInterruptionCoreSensor]) + module1.sensor.get._2.size should equal(2) + module1.sensor.get._2(0) should equal(Map(0 -> Set(0, 4), 1 -> Set(1, 5), 2 -> Set(2, 6), 3 -> Set(3, 7))) + module1.sensor.get._2(1) should equal(Set("CPU_CLK_UNHALTED:THREAD_P", "CPU_CLK_UNHALTED:REF_P")) + + module1.formula.get._1 should equal(classOf[LibpfmInterruptionCoreCyclesFormula]) + module1.formula.get._2.size should equal(4) + module1.formula.get._2(0) should equal("Test:cyclesThreadName") + module1.formula.get._2(1) should equal("Test:cyclesRefName") + module1.formula.get._2(2) should equal(formulae) + module1.formula.get._2(3) should equal(125.milliseconds) + + + module2.sensor.get._1 should equal(classOf[LibpfmInterruptionCoreSensor]) + module2.sensor.get._2.size should equal(2) + module2.sensor.get._2(0) should equal(Map(0 -> Set(0, 4), 1 -> Set(1, 5), 2 -> Set(2, 6), 3 -> Set(3, 7))) + module2.sensor.get._2(1) should equal(Set("event")) + + module2.formula.get._1 should equal(classOf[LibpfmInterruptionCoreCyclesFormula]) + module2.formula.get._2.size should equal(4) + module2.formula.get._2(0) should equal("Test:cyclesThreadName") + module2.formula.get._2(1) should equal("Test:cyclesRefName") + module2.formula.get._2(2) should equal(Map[Double, List[Double]](1d -> List(10.0, 1.0e-08, -4.0e-18))) + module2.formula.get._2(3) should equal(10.milliseconds) + } +} diff --git a/powerapi-core/src/test/scala/org/powerapi/module/libpfm/LibpfmInterruptionCoreSensorSuite.scala b/powerapi-core/src/test/scala/org/powerapi/module/libpfm/LibpfmInterruptionCoreSensorSuite.scala new file mode 100644 index 0000000..74d335f --- /dev/null +++ b/powerapi-core/src/test/scala/org/powerapi/module/libpfm/LibpfmInterruptionCoreSensorSuite.scala @@ -0,0 +1,222 @@ +/* + * This software is licensed under the GNU Affero General Public License, quoted below. + * + * This file is a part of PowerAPI. + * + * Copyright (C) 2011-2016 Inria, University of Lille 1. + * + * PowerAPI is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of + * the License, or (at your option) any later version. + * + * PowerAPI is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with PowerAPI. + * + * If not, please consult http://www.gnu.org/licenses/agpl-3.0.html. + */ +package org.powerapi.module.libpfm + +import java.util.UUID + +import scala.concurrent.Await +import scala.concurrent.duration.DurationInt +import akka.actor.Props +import akka.testkit.{EventFilter, TestActorRef} +import akka.pattern.gracefulStop +import akka.util.Timeout +import com.google.protobuf.ByteString +import org.powerapi.UnitTest +import org.powerapi.core.{MessageBus, Tick} +import org.powerapi.core.target.{Application, Process} +import org.powerapi.core.MonitorChannel.publishMonitorTick +import org.powerapi.module.SensorChannel.{startSensor, stopSensor} +import org.powerapi.module.Sensors +import org.powerapi.module.libpfm.PayloadProtocol.{MapEntry, Payload} +import org.powerapi.module.libpfm.PCInterruptionChannel.{InterruptionHWCounter, InterruptionPCReport, subscribeInterruptionPCReport} + +class LibpfmInterruptionCoreSensorSuite extends UnitTest { + + val timeout = Timeout(20.seconds) + val topology = Map(0 -> Set(0, 1), 1 -> Set(2, 3)) + val events = Set("event", "event1") + + override def afterAll() = { + system.shutdown() + } + + trait Bus { + val eventBus = new MessageBus + } + + "A LibpfmInterruptionCoreSensor" should "handle MonitorTick messages and exploit extended ticks from a PowerAPI agent" in new Bus { + val muid = UUID.randomUUID() + val target = Process(10) + + val sensors = TestActorRef(Props(classOf[Sensors], eventBus), "sensors") + subscribeInterruptionPCReport(muid, target)(eventBus)(testActor) + + EventFilter.info(occurrences = 1, start = s"sensor is started, class: ${classOf[LibpfmInterruptionCoreSensor].getName}").intercept({ + startSensor(muid, target, classOf[LibpfmInterruptionCoreSensor], Seq(eventBus, muid, target, topology, events))(eventBus) + }) + + val payload1 = Payload.newBuilder().setCore(0) + .setPid(target.pid) + .setTid(10) + .setTimestamp(System.nanoTime()) + .addCounters(0, MapEntry.newBuilder().setKey("event").setValue(1000)) + .addCounters(1, MapEntry.newBuilder().setKey("event1").setValue(10)) + .addTraces("c") + .addTraces("b") + .addTraces("a") + .addTraces("main") + .build() + val payload2 = Payload.newBuilder().setCore(0) + .setPid(target.pid) + .setTid(11) + .setTimestamp(System.nanoTime() + 1000000000) + .addCounters(0, MapEntry.newBuilder().setKey("event").setValue(1100)) + .addCounters(1, MapEntry.newBuilder().setKey("event1").setValue(11)) + .addTraces("d") + .addTraces("c") + .addTraces("b") + .addTraces("a") + .addTraces("main") + .build() + val payload3 = Payload.newBuilder().setCore(1) + .setPid(target.pid) + .setTid(13) + .setTimestamp(System.nanoTime() + 2000000000) + .addCounters(0, MapEntry.newBuilder().setKey("event").setValue(1300)) + .addCounters(1, MapEntry.newBuilder().setKey("event1").setValue(13)) + .addTraces("z") + .addTraces("main") + .build() + val payload4 = Payload.newBuilder().setCore(1) + .setPid(target.pid) + .setTid(13) + .setTimestamp(0l) + .addCounters(0, MapEntry.newBuilder().setKey("event").setValue(0)) + .addCounters(1, MapEntry.newBuilder().setKey("event1").setValue(0)) + .build() + val payload5 = Payload.newBuilder().setCore(1) + .setPid(target.pid) + .setTid(14) + .setTimestamp(0l) + .addCounters(0, MapEntry.newBuilder().setKey("event").setValue(0)) + .addCounters(1, MapEntry.newBuilder().setKey("event1").setValue(0)) + .build() + val payload6 = Payload.newBuilder().setCore(1) + .setPid(target.pid) + .setTid(15) + .setTimestamp(System.nanoTime() + 3000000000l) + .addCounters(0, MapEntry.newBuilder().setKey("event").setValue(1500)) + .addCounters(1, MapEntry.newBuilder().setKey("event1").setValue(15)) + .addTraces("c") + .addTraces("b") + .addTraces("main") + .build() + val payload7 = Payload.newBuilder().setCore(2) + .setPid(target.pid) + .setTid(16) + .setTimestamp(System.nanoTime() + 4000000000l) + .addCounters(0, MapEntry.newBuilder().setKey("event").setValue(1600)) + .addCounters(1, MapEntry.newBuilder().setKey("event1").setValue(16)) + .addTraces("d") + .addTraces("main") + .build() + + val tick1 = AgentTick("test", payload1.getTimestamp, payload1) + val tick2 = AgentTick("test", payload2.getTimestamp, payload2) + val tick3 = AgentTick("test", payload3.getTimestamp, payload3) + val tick4 = AgentTick("test", payload4.getTimestamp, payload4) + val tick5 = AgentTick("test", payload5.getTimestamp, payload5) + val tick6 = AgentTick("test", payload6.getTimestamp, payload6) + val tick7 = AgentTick("test", payload7.getTimestamp, payload7) + + publishMonitorTick(muid, target, tick1)(eventBus) + expectMsgClass(classOf[InterruptionPCReport]) match { + case InterruptionPCReport(_, _muid, _target, wrappers, _tick) => + _muid should equal(muid) + _target should equal(target) + _tick.timestamp should equal(tick1.timestamp) + wrappers.size should equal(events.size) + wrappers.filter(wrapper => wrapper.core == 0 && wrapper.event == "event").head.values should + contain theSameElementsAs Seq(InterruptionHWCounter(0, 10, "main.a.b.c", 1000, true)) + wrappers.filter(wrapper => wrapper.core == 0 && wrapper.event == "event1").head.values should + contain theSameElementsAs Seq(InterruptionHWCounter(0, 10, "main.a.b.c", 10, true)) + } + + publishMonitorTick(muid, target, tick2)(eventBus) + expectMsgClass(classOf[InterruptionPCReport]) match { + case InterruptionPCReport(_, _muid, _target, wrappers, _tick) => + _muid should equal(muid) + _target should equal(target) + _tick.timestamp should equal(tick2.timestamp) + wrappers.size should equal(events.size) + wrappers.filter(wrapper => wrapper.core == 0 && wrapper.event == "event").head.values should + contain theSameElementsAs Seq(InterruptionHWCounter(0, 11, "main.a.b.c.d", 1100, true)) + wrappers.filter(wrapper => wrapper.core == 0 && wrapper.event == "event1").head.values should + contain theSameElementsAs Seq(InterruptionHWCounter(0, 11, "main.a.b.c.d", 11, true)) + } + + publishMonitorTick(muid, target, tick3)(eventBus) + expectMsgClass(classOf[InterruptionPCReport]) match { + case InterruptionPCReport(_, _muid, _target, wrappers, _tick) => + _muid should equal(muid) + _target should equal(target) + _tick.timestamp should equal(tick3.timestamp) + wrappers.size should equal(events.size) + wrappers.filter(wrapper => wrapper.core == 0 && wrapper.event == "event").head.values should + contain theSameElementsAs Seq(InterruptionHWCounter(0, 11, "main.a.b.c.d", 1100, false), InterruptionHWCounter(1, 13, "main.z", 1300, true)) + wrappers.filter(wrapper => wrapper.core == 0 && wrapper.event == "event1").head.values should + contain theSameElementsAs Seq(InterruptionHWCounter(0, 11, "main.a.b.c.d", 11, false), InterruptionHWCounter(1, 13, "main.z", 13, true)) + } + + publishMonitorTick(muid, target, tick4)(eventBus) + publishMonitorTick(muid, target, tick5)(eventBus) + publishMonitorTick(muid, target, new Tick { val topic = ""; val timestamp = 0l })(eventBus) + expectNoMsg() + + publishMonitorTick(muid, target, tick6)(eventBus) + expectMsgClass(classOf[InterruptionPCReport]) match { + case InterruptionPCReport(_, _muid, _target, wrappers, _tick) => + _muid should equal(muid) + _target should equal(target) + _tick.timestamp should equal(tick6.timestamp) + wrappers.size should equal(events.size) + wrappers.filter(wrapper => wrapper.core == 0 && wrapper.event == "event").head.values should + contain theSameElementsAs Seq(InterruptionHWCounter(0, 11, "main.a.b.c.d", 1100, false), InterruptionHWCounter(1, 15, "main.b.c", 1500, true)) + wrappers.filter(wrapper => wrapper.core == 0 && wrapper.event == "event1").head.values should + contain theSameElementsAs Seq(InterruptionHWCounter(0, 11, "main.a.b.c.d", 11, false), InterruptionHWCounter(1, 15, "main.b.c", 15, true)) + } + + publishMonitorTick(muid, target, tick7)(eventBus) + expectMsgClass(classOf[InterruptionPCReport]) match { + case InterruptionPCReport(_, _muid, _target, wrappers, _tick) => + _muid should equal(muid) + _target should equal(target) + _tick.timestamp should equal(tick7.timestamp) + wrappers.size should equal(events.size * 2) + wrappers.filter(wrapper => wrapper.core == 0 && wrapper.event == "event").head.values should + contain theSameElementsAs Seq(InterruptionHWCounter(0, 11, "main.a.b.c.d", 1100, false), InterruptionHWCounter(1, 15, "main.b.c", 1500, false)) + wrappers.filter(wrapper => wrapper.core == 0 && wrapper.event == "event1").head.values should + contain theSameElementsAs Seq(InterruptionHWCounter(0, 11, "main.a.b.c.d", 11, false), InterruptionHWCounter(1, 15, "main.b.c", 15, false)) + wrappers.filter(wrapper => wrapper.core == 1 && wrapper.event == "event").head.values should + contain theSameElementsAs Seq(InterruptionHWCounter(2, 16, "main.d", 1600, true)) + wrappers.filter(wrapper => wrapper.core == 1 && wrapper.event == "event1").head.values should + contain theSameElementsAs Seq(InterruptionHWCounter(2, 16, "main.d", 16, true)) + } + + EventFilter.info(occurrences = 1, start = s"sensor is stopped, class: ${classOf[LibpfmInterruptionCoreSensor].getName}").intercept({ + stopSensor(muid)(eventBus) + }) + + Await.result(gracefulStop(sensors, timeout.duration), timeout.duration) + } +} diff --git a/powerapi-core/src/test/scala/org/powerapi/module/libpfm/cycles/LibpfmInterruptionCoreCyclesFormulaSuite.scala b/powerapi-core/src/test/scala/org/powerapi/module/libpfm/cycles/LibpfmInterruptionCoreCyclesFormulaSuite.scala new file mode 100644 index 0000000..f11ef4f --- /dev/null +++ b/powerapi-core/src/test/scala/org/powerapi/module/libpfm/cycles/LibpfmInterruptionCoreCyclesFormulaSuite.scala @@ -0,0 +1,143 @@ +/* + * This software is licensed under the GNU Affero General Public License, quoted below. + * + * This file is a part of PowerAPI. + * + * Copyright (C) 2011-2016 Inria, University of Lille 1. + * + * PowerAPI is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of + * the License, or (at your option) any later version. + * + * PowerAPI is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with PowerAPI. + * + * If not, please consult http://www.gnu.org/licenses/agpl-3.0.html. + */ +package org.powerapi.module.libpfm.cycles + +import java.util.UUID + +import scala.concurrent.Await +import scala.concurrent.duration.DurationInt + +import akka.actor.Props +import akka.testkit.{EventFilter, TestActorRef} +import akka.pattern.gracefulStop +import akka.util.Timeout + +import org.powerapi.UnitTest +import org.powerapi.core.{Tick, MessageBus} +import org.powerapi.core.power._ +import org.powerapi.core.target.Process +import org.powerapi.module.FormulaChannel.{stopFormula, startFormula} +import org.powerapi.module.Formulas +import org.powerapi.module.PowerChannel.{RawPowerReport, subscribeRawPowerReport} +import org.powerapi.module.libpfm.PCInterruptionChannel.{InterruptionTick, InterruptionHWCounter, InterruptionPCWrapper, publishInterruptionPCReport} +import org.powerapi.module.libpfm.TID + +class LibpfmInterruptionCoreCyclesFormulaSuite extends UnitTest { + + val timeout = Timeout(1.seconds) + + override def afterAll() = { + system.shutdown() + } + + trait Bus { + val eventBus = new MessageBus + } + + trait Formulae { + var formulae = Map[Double, List[Double]]() + formulae += 12d -> List(85.7545270697, 1.10006565433e-08, -2.0341944068e-18) + formulae += 13d -> List(87.0324917754, 9.03486530986e-09, -1.31575869787e-18) + formulae += 14d -> List(86.3094440375, 1.04895773556e-08, -1.61982669617e-18) + formulae += 15d -> List(88.2194900717, 8.71468661777e-09, -1.12354133527e-18) + formulae += 16d -> List(85.8010062547, 1.05239105674e-08, -1.34813984791e-18) + formulae += 17d -> List(85.5127064474, 1.05732955159e-08, -1.28040830962e-18) + formulae += 18d -> List(85.5593567382, 1.07921513277e-08, -1.22419197787e-18) + formulae += 19d -> List(87.2004521609, 9.99728883739e-09, -9.9514346029e-19) + formulae += 20d -> List(87.7358230435, 1.00553994023e-08, -1.00002335486e-18) + formulae += 21d -> List(94.4635683042, 4.83140424765e-09, 4.25218895447e-20) + formulae += 22d -> List(104.356371072, 3.75414807806e-09, 6.73289818651e-20) + } + + "A LibpfmInterruptionCoreCyclesFormula" should "process a SensorReport and then publish a RawPowerReport" in new Bus with Formulae { + val muid = UUID.randomUUID() + val target = Process(1) + + val tick1 = new Tick { + val topic = "test" + val timestamp = System.nanoTime() + } + + val tick2 = new Tick { + val topic = "test" + val timestamp = System.nanoTime() + 1000 + } + + var wrappers = Seq[InterruptionPCWrapper]() + wrappers +:= InterruptionPCWrapper(0, "thread_p", Seq(InterruptionHWCounter(0, 10, "main.a.b", 650000000, false), InterruptionHWCounter(1, 11, "main.z", 651000000, false))) + wrappers +:= InterruptionPCWrapper(0, "ref_p", Seq(InterruptionHWCounter(0, 10, "main.a.b", 34475589, false), InterruptionHWCounter(1, 11, "main.z", 34075589, false))) + wrappers +:= InterruptionPCWrapper(1, "thread_p", Seq(InterruptionHWCounter(2, 12, "main.e.e.e", 240000000, true))) + wrappers +:= InterruptionPCWrapper(1, "ref_p", Seq(InterruptionHWCounter(2, 12, "main.e.e.e", 15475589, true))) + + val formulas = TestActorRef(Props(classOf[Formulas], eventBus), "formulas") + EventFilter.info(occurrences = 1, start = s"formula is started, class: ${classOf[LibpfmInterruptionCoreCyclesFormula].getName}").intercept({ + startFormula(muid, target, classOf[LibpfmInterruptionCoreCyclesFormula], Seq(eventBus, muid, target, "thread_p", "ref_p", formulae, 250.millis))(eventBus) + }) + subscribeRawPowerReport(muid)(eventBus)(testActor) + + publishInterruptionPCReport(muid, target, wrappers, tick1)(eventBus) + val messages = receiveN(3).asInstanceOf[Seq[RawPowerReport]] + val msg1 = messages.filter(_.tick.asInstanceOf[InterruptionTick].tid == TID(10)).head + val tickMsg1 = msg1.tick.asInstanceOf[InterruptionTick] + msg1.muid should equal(muid) + msg1.target should equal(target) + msg1.power should be > 0.W + msg1.device should equal("cpu") + tickMsg1.cpu should equal(0) + tickMsg1.fullMethodName should equal("main.a.b") + tickMsg1.tid should equal(TID(10)) + tickMsg1.timestamp should equal(tick1.timestamp) + tickMsg1.triggering should equal(false) + val msg2 = messages.filter(_.tick.asInstanceOf[InterruptionTick].tid == TID(11)).head + val tickMsg2 = msg2.tick.asInstanceOf[InterruptionTick] + msg2.muid should equal(muid) + msg2.target should equal(target) + msg2.power should be > 0.W + msg2.device should equal("cpu") + tickMsg2.cpu should equal(1) + tickMsg2.fullMethodName should equal("main.z") + tickMsg2.tid should equal(TID(11)) + tickMsg2.timestamp should equal(tick1.timestamp) + tickMsg2.triggering should equal(false) + val msg3 = messages.filter(_.tick.asInstanceOf[InterruptionTick].tid == TID(12)).head + val tickMsg3 = msg3.tick.asInstanceOf[InterruptionTick] + msg3.muid should equal(muid) + msg3.target should equal(target) + msg3.power should be > 0.W + msg3.device should equal("cpu") + tickMsg3.cpu should equal(2) + tickMsg3.fullMethodName should equal("main.e.e.e") + tickMsg3.tid should equal(TID(12)) + tickMsg3.timestamp should equal(tick1.timestamp) + tickMsg3.triggering should equal(true) + + EventFilter.info(occurrences = 1, start = s"formula is stopped, class: ${classOf[LibpfmInterruptionCoreCyclesFormula].getName}").intercept({ + stopFormula(muid)(eventBus) + }) + + publishInterruptionPCReport(muid, target, wrappers, tick2)(eventBus) + expectNoMsg() + + Await.result(gracefulStop(formulas, timeout.duration), timeout.duration) + } +} diff --git a/project/PowerApiBuild.scala b/project/PowerApiBuild.scala index 4e28dc5..be99473 100644 --- a/project/PowerApiBuild.scala +++ b/project/PowerApiBuild.scala @@ -63,10 +63,14 @@ object PowerApiBuild extends Build { lazy val powerapi: sbt.Project = Project(id = "powerapi", base = file(".")).settings(buildSettings: _*).aggregate(powerapiCore, powerapiCli, powerapiDaemon, powerapiSampling) - lazy val powerapiCore = Project(id = "powerapi-core", base = file("powerapi-core")).settings(buildSettings: _*) + lazy val mwgPlugin = Project(id="mwg-plugin",base = file("mwg-plugin")).settings(buildSettings: _*) + lazy val mwgServer = Project(id="mwg-server",base = file("mwg-server")).settings(buildSettings: _*) + + lazy val powerapiCore = Project(id = "powerapi-core", base = file("powerapi-core")).settings(buildSettings: _*).dependsOn(mwgPlugin % "compile -> compile") lazy val powerapiCli = Project(id = "powerapi-cli", base = file("powerapi-cli")).settings(buildSettings: _*).dependsOn(powerapiCore % "compile -> compile; test -> test").enablePlugins(JavaAppPackaging) lazy val powerapiDaemon = Project(id = "powerapi-daemon", base = file("powerapi-daemon")).settings(buildSettings: _*).dependsOn(powerapiCore % "compile -> compile; test -> test").enablePlugins(JavaServerAppPackaging) lazy val powerapiSampling = Project(id = "powerapi-sampling", base = file("powerapi-sampling")).settings(buildSettings: _*).dependsOn(powerapiCore % "compile -> compile; test -> test").enablePlugins(JavaAppPackaging) + lazy val powerapiCodeEnergyAnalysis = Project(id = "powerapi-code-energy-analysis", base = file("powerapi-code-energy-analysis")).settings(buildSettings: _*).dependsOn(powerapiCore % "compile -> compile; test -> test").enablePlugins(JavaAppPackaging) // example of power meters lazy val appMonitorProcsJava = Project(id = "powerapi-example-app-monitor-procfs-java", base = file("powerapi-powermeter/AppMonitorProcFSJava")).settings(buildSettings: _*).dependsOn(powerapiCore % "compile -> compile")