Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

DRILL-4258: Add threads, fragments, and queries system tables #479

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,124 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.drill.exec.ops;

import com.carrotsearch.hppc.LongObjectHashMap;
import com.carrotsearch.hppc.procedures.LongObjectProcedure;

import java.lang.management.ManagementFactory;
import java.lang.management.ThreadMXBean;
import java.util.AbstractMap.SimpleEntry;
import java.util.Deque;
import java.util.Iterator;
import java.util.Map.Entry;
import java.util.concurrent.ConcurrentLinkedDeque;

public class ThreadStatCollector implements Runnable {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There is a thread (WorkManager.StatusThread) that does periodic tasks (currently updates fragment statuses), how about we add this as another task?

private static final long ONE_BILLION = 1000000000;
private static final long RETAIN_INTERVAL = 5 * ONE_BILLION;
private static final int COLLECTION_INTERVAL = 1;

private ThreadMXBean mxBean = ManagementFactory.getThreadMXBean();
private ThreadStat cpuStat = new ThreadStat();
private ThreadStat userStat = new ThreadStat();

@Override
public void run() {
while (true) {
try {
Thread.sleep(COLLECTION_INTERVAL * 1000);
addCpuTime();
addUserTime();
} catch (InterruptedException e) {
return;
}
}
}

public Integer getCpuTrailingAverage(long id, int seconds) {
return cpuStat.getTrailingAverage(id, seconds);
}

public Integer getUserTrailingAverage(long id, int seconds) {
return userStat.getTrailingAverage(id, seconds);
}

private void addCpuTime() {
for (long id : mxBean.getAllThreadIds()) {
cpuStat.add(id, System.nanoTime(), mxBean.getThreadCpuTime(id));
}
}

private void addUserTime() {
for (long id : mxBean.getAllThreadIds()) {
userStat.add(id, System.nanoTime(), mxBean.getThreadUserTime(id));
}
}

private static class ThreadStat {
volatile LongObjectHashMap<Deque<Entry<Long,Long>>> data = new LongObjectHashMap<>();

public void add(long id, long ts, long value) {
Entry<Long,Long> entry = new SimpleEntry<>(ts, value);
Deque<Entry<Long,Long>> list = data.get(id);
if (list == null) {
list = new ConcurrentLinkedDeque<>();
}
list.add(entry);
while (ts - list.peekFirst().getKey() > RETAIN_INTERVAL) {
list.removeFirst();
}
data.put(id, list);
}

public Integer getTrailingAverage(long id, int seconds) {
Deque<Entry<Long,Long>> list = data.get(id);
if (list == null) {
return null;
}
return getTrailingAverage(list, seconds);
}

private Integer getTrailingAverage(Deque<Entry<Long, Long>> list, int seconds) {
Entry<Long,Long> latest = list.peekLast();
Entry<Long,Long> old = list.peekFirst();
Iterator<Entry<Long,Long>> iter = list.descendingIterator();
while (iter.hasNext()) {
Entry<Long,Long> e = iter.next();
if (e.getKey() - latest.getKey() > seconds * ONE_BILLION) {
old = e;
break;
}
}
try {
return (int) (100 * (old.getValue() - latest.getValue()) / (old.getKey() - latest.getKey()));
} catch (Exception e) {
return null;
}
}

public void print(final int window) {
data.forEach(new LongObjectProcedure<Deque<Entry<Long,Long>>>() {
@Override
public void apply(long l, Deque<Entry<Long,Long>> entries) {
System.out.println(String.format("%d %d", l, getTrailingAverage(entries, window)));
}
});
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
import org.apache.drill.exec.memory.BufferAllocator;
import org.apache.drill.exec.memory.RootAllocatorFactory;
import org.apache.drill.exec.metrics.DrillMetrics;
import org.apache.drill.exec.ops.ThreadStatCollector;
import org.apache.drill.exec.rpc.NamedThreadFactory;
import org.apache.drill.exec.rpc.TransportCheck;

Expand All @@ -47,6 +48,8 @@ public class BootStrapContext implements AutoCloseable {
private final ScanResult classpathScan;
private final ExecutorService executor;

private final ThreadStatCollector threadStatCollector;

public BootStrapContext(DrillConfig config, ScanResult classpathScan) {
this.config = config;
this.classpathScan = classpathScan;
Expand All @@ -65,6 +68,7 @@ protected void afterExecute(final Runnable r, final Throwable t) {
super.afterExecute(r, t);
}
};
this.threadStatCollector = new ThreadStatCollector();
}

public ExecutorService getExecutor() {
Expand Down Expand Up @@ -95,6 +99,10 @@ public ScanResult getClasspathScan() {
return classpathScan;
}

public ThreadStatCollector getThreadStatCollector() {
return threadStatCollector;
}

@Override
public void close() {
try {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,7 @@ public void run() throws Exception {
javaPropertiesToSystemOptions();
registrationHandle = coord.register(md);
webServer.start();
context.getExecutor().submit(context.getThreadStatCollector());

Runtime.getRuntime().addShutdownHook(new ShutdownThread(this, new StackTrace()));
logger.info("Startup completed ({} ms).", w.elapsed(TimeUnit.MILLISECONDS));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
import org.apache.drill.exec.coord.ClusterCoordinator;
import org.apache.drill.exec.expr.fn.FunctionImplementationRegistry;
import org.apache.drill.exec.memory.BufferAllocator;
import org.apache.drill.exec.ops.ThreadStatCollector;
import org.apache.drill.exec.physical.impl.OperatorCreatorRegistry;
import org.apache.drill.exec.planner.PhysicalPlanReader;
import org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint;
Expand All @@ -42,6 +43,7 @@
import org.apache.drill.exec.store.sys.PersistentStoreProvider;

import com.codahale.metrics.MetricRegistry;
import org.apache.drill.exec.work.WorkManager;

public class DrillbitContext implements AutoCloseable {
// private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(DrillbitContext.class);
Expand All @@ -61,6 +63,7 @@ public class DrillbitContext implements AutoCloseable {
private final CodeCompiler compiler;
private final ScanResult classpathScan;
private final LogicalPlanPersistence lpPersistence;
private final WorkManager workManager;


public DrillbitContext(
Expand All @@ -70,7 +73,8 @@ public DrillbitContext(
Controller controller,
DataConnectionCreator connectionsPool,
WorkEventBus workBus,
PersistentStoreProvider provider) {
PersistentStoreProvider provider,
WorkManager workManager) {
this.classpathScan = context.getClasspathScan();
this.workBus = workBus;
this.controller = checkNotNull(controller);
Expand All @@ -79,6 +83,7 @@ public DrillbitContext(
this.connectionsPool = checkNotNull(connectionsPool);
this.endpoint = checkNotNull(endpoint);
this.provider = provider;
this.workManager = workManager;
this.lpPersistence = new LogicalPlanPersistence(context.getConfig(), classpathScan);

// TODO remove escaping "this".
Expand Down Expand Up @@ -172,6 +177,10 @@ public ExecutorService getExecutor() {
return context.getExecutor();
}

public ThreadStatCollector getThreadStatCollector() {
return context.getThreadStatCollector();
}

public LogicalPlanPersistence getLpPersistence() {
return lpPersistence;
}
Expand All @@ -180,6 +189,10 @@ public ScanResult getClasspathScan() {
return classpathScan;
}

public WorkManager getWorkManager() {
return workManager;
}

@Override
public void close() throws Exception {
getOptionManager().close();
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.drill.exec.store.sys;

import com.google.common.collect.ImmutableList;
import org.apache.drill.exec.ops.FragmentContext;
import org.apache.drill.exec.proto.CoordinationProtos;
import org.apache.drill.exec.proto.UserBitShared.MinorFragmentProfile;
import org.apache.drill.exec.proto.UserBitShared.OperatorProfile;
import org.apache.drill.exec.proto.UserBitShared.StreamProfile;
import org.apache.drill.exec.proto.helper.QueryIdHelper;
import org.apache.drill.exec.server.DrillbitContext;
import org.apache.drill.exec.work.WorkManager;
import org.apache.drill.exec.work.fragment.FragmentExecutor;

import java.sql.Timestamp;
import java.util.Collection;
import java.util.Iterator;

/**
* Iterator which returns {@link FragmentInfo} for every fragment running in this drillbit.
*/
public class FragmentIterator implements Iterator<Object> {
private final WorkManager workManager;
private final Iterator<FragmentExecutor> iter;

public FragmentIterator(FragmentContext c) {
this.workManager = c.getDrillbitContext().getWorkManager();
iter = ImmutableList.copyOf(workManager.getRunningFragments()).iterator();
}

@Override
public boolean hasNext() {
return iter.hasNext();
}

@Override
public Object next() {
FragmentExecutor fragmentExecutor = iter.next();
MinorFragmentProfile profile = fragmentExecutor.getStatus().getProfile();
FragmentInfo fragmentInfo = new FragmentInfo();
fragmentInfo.hostname = workManager.getContext().getEndpoint().getAddress();
fragmentInfo.queryId = QueryIdHelper.getQueryId(fragmentExecutor.getContext().getHandle().getQueryId());
fragmentInfo.majorFragmentId = fragmentExecutor.getContext().getHandle().getMajorFragmentId();
fragmentInfo.minorFragmentId = fragmentExecutor.getContext().getHandle().getMinorFragmentId();
fragmentInfo.rowsProcessed = getRowsProcessed(profile);
fragmentInfo.memoryUsage = profile.getMemoryUsed();
fragmentInfo.startTime = new Timestamp(profile.getStartTime());
return fragmentInfo;
}

private long getRowsProcessed(MinorFragmentProfile profile) {
long maxRecords = 0;
for (OperatorProfile operatorProfile : profile.getOperatorProfileList()) {
long records = 0;
for (StreamProfile inputProfile :operatorProfile.getInputProfileList()) {
if (inputProfile.hasRecords()) {
records += inputProfile.getRecords();
}
}
maxRecords = Math.max(maxRecords, records);
}
return maxRecords;
}

@Override
public void remove() {
throw new UnsupportedOperationException();
}

public static class FragmentInfo {
public String hostname;
public String queryId;
public int majorFragmentId;
public int minorFragmentId;
public Long memoryUsage;
/**
* The maximum number of input records across all Operators in fragment
*/
public Long rowsProcessed;
public Timestamp startTime;
}
}
Loading