diff --git a/kaas_local_jaas.conf b/kaas_local_jaas.conf new file mode 100644 index 0000000..6dc2616 --- /dev/null +++ b/kaas_local_jaas.conf @@ -0,0 +1,11 @@ +Client { + org.apache.zookeeper.server.auth.DigestLoginModule required + username="xyz" + password="xyz"; +}; + +KafkaClient { + org.apache.kafka.common.security.plain.PlainLoginModule required + username="xyz" + password="xyz"; +}; \ No newline at end of file diff --git a/pom.xml b/pom.xml index 1e73b39..2003600 100644 --- a/pom.xml +++ b/pom.xml @@ -74,6 +74,18 @@ kafka_2.9.2 0.8.2.2 + + + org.projectlombok + lombok + 1.16.6 + + + + org.apache.kafka + kafka-clients + 0.11.0.2 + org.freemarker freemarker @@ -197,7 +209,7 @@ org.apache.maven.plugins maven-compiler-plugin - 2.3.2 + 3.7.0 1.8 1.8 diff --git a/src/main/java/com/homeadvisor/kafdrop/KafDrop.java b/src/main/java/com/homeadvisor/kafdrop/KafDrop.java index 47ed3de..ad66669 100644 --- a/src/main/java/com/homeadvisor/kafdrop/KafDrop.java +++ b/src/main/java/com/homeadvisor/kafdrop/KafDrop.java @@ -21,6 +21,7 @@ import com.google.common.base.Throwables; import com.homeadvisor.kafdrop.config.ini.IniFilePropertySource; import com.homeadvisor.kafdrop.config.ini.IniFileReader; +import joptsimple.internal.Strings; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.boot.Banner; @@ -36,7 +37,12 @@ import org.springframework.web.servlet.config.annotation.ContentNegotiationConfigurer; import org.springframework.web.servlet.config.annotation.WebMvcConfigurerAdapter; -import java.io.*; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.Reader; import java.util.Objects; import java.util.stream.Stream; @@ -83,17 +89,15 @@ public void onApplicationEvent(ApplicationEnvironmentPreparedEvent event) catch (Exception ex) { System.err.println("Unable to set up logging.dir from logging.file " + loggingFile + ": " + - Throwables.getStackTraceAsString(ex)); + Throwables.getStackTraceAsString(ex)); } } if (environment.containsProperty("debug") && - !"false".equalsIgnoreCase(environment.getProperty("debug", String.class))) + !"false".equalsIgnoreCase(environment.getProperty("debug", String.class))) { System.setProperty(PROP_SPRING_BOOT_LOG_LEVEL, "DEBUG"); } - } - } private static class EnvironmentSetupListener implements ApplicationListener, Ordered @@ -107,19 +111,41 @@ public int getOrder() return Ordered.HIGHEST_PRECEDENCE + 10; } - @Override - public void onApplicationEvent(ApplicationEnvironmentPreparedEvent event) - { - final ConfigurableEnvironment environment = event.getEnvironment(); - if (environment.containsProperty(SM_CONFIG_DIR)) - { - Stream.of("kafdrop", "global") - .map(name -> readProperties(environment, name)) - .filter(Objects::nonNull) - .forEach(iniPropSource -> environment.getPropertySources() - .addBefore("applicationConfigurationProperties", iniPropSource)); - } - } + @Override + public void onApplicationEvent(ApplicationEnvironmentPreparedEvent event) + { + final ConfigurableEnvironment environment = event.getEnvironment(); + + LOG.info("Initializing jaas config"); + String env = environment.getProperty("kafka.env"); + Boolean isSecured = environment.getProperty("kafka.isSecured", Boolean.class); + LOG.info("env: {} .Issecured kafka: {}", env, isSecured); + if (isSecured && Strings.isNullOrEmpty(env)) { + throw new RuntimeException("'env' cannot be null if connecting to secured kafka."); + } + + LOG.info("ENV: {}", env); + String path; + + if (isSecured) { + if ((env.equalsIgnoreCase("stage") || env.equalsIgnoreCase("prod") || env.equalsIgnoreCase("local"))) { + path = environment.getProperty("user.dir") + "/kaas_" + env.toLowerCase() + "_jaas.conf"; + LOG.info("PATH: {}", path); + System.setProperty("java.security.auth.login.config", path); + } + else { + throw new RuntimeException("unable to identify env. set 'evn' variable either to 'stage' or 'prod' or local"); + } + } + + if (environment.containsProperty(SM_CONFIG_DIR)) { + Stream.of("kafdrop", "global") + .map(name -> readProperties(environment, name)) + .filter(Objects::nonNull) + .forEach(iniPropSource -> environment.getPropertySources() + .addBefore("applicationConfigurationProperties", iniPropSource)); + } + } private IniFilePropertySource readProperties(Environment environment, String name) { diff --git a/src/main/java/com/homeadvisor/kafdrop/config/KafkaConfiguration.java b/src/main/java/com/homeadvisor/kafdrop/config/KafkaConfiguration.java new file mode 100644 index 0000000..5a78edb --- /dev/null +++ b/src/main/java/com/homeadvisor/kafdrop/config/KafkaConfiguration.java @@ -0,0 +1,22 @@ +package com.homeadvisor.kafdrop.config; + +import lombok.*; +import org.springframework.boot.context.properties.*; +import org.springframework.stereotype.*; + +/** + * Created by Satendra Sahu on 9/26/18 + */ +@Component +@ConfigurationProperties(prefix = "kafka") +@Data +public class KafkaConfiguration +{ + private String env = "local"; + private String brokerConnect; + private Boolean isSecured = false; + private String keyDeserializer; + private String valueDeserializer; + private String saslMechanism; + private String securityProtocol; +} diff --git a/src/main/java/com/homeadvisor/kafdrop/controller/ClusterController.java b/src/main/java/com/homeadvisor/kafdrop/controller/ClusterController.java index 6ed7e4b..7af3a08 100644 --- a/src/main/java/com/homeadvisor/kafdrop/controller/ClusterController.java +++ b/src/main/java/com/homeadvisor/kafdrop/controller/ClusterController.java @@ -28,10 +28,16 @@ import io.swagger.annotations.ApiResponse; import io.swagger.annotations.ApiResponses; import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.HttpStatus; import org.springframework.http.MediaType; import org.springframework.stereotype.Controller; import org.springframework.ui.Model; -import org.springframework.web.bind.annotation.*; +import org.springframework.web.bind.annotation.ExceptionHandler; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RequestMethod; +import org.springframework.web.bind.annotation.RequestParam; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.ResponseStatus; import java.util.Collections; import java.util.List; @@ -112,4 +118,10 @@ public static class ClusterInfoVO public List brokers; public List topics; } + + @ResponseStatus(HttpStatus.OK) + @RequestMapping("/health_check") + public void healthCheck() + { + } } diff --git a/src/main/java/com/homeadvisor/kafdrop/model/BrokerVO.java b/src/main/java/com/homeadvisor/kafdrop/model/BrokerVO.java index 7e8ba7b..660a552 100644 --- a/src/main/java/com/homeadvisor/kafdrop/model/BrokerVO.java +++ b/src/main/java/com/homeadvisor/kafdrop/model/BrokerVO.java @@ -18,88 +18,113 @@ package com.homeadvisor.kafdrop.model; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonProperty; import java.util.Date; +@JsonIgnoreProperties(ignoreUnknown = true) public class BrokerVO { private int id; private String host; + private String[] endpoints; private int port; private int jmxPort; private int version; private boolean controller; private Date timestamp; + public void setEndpoints(String[] endpoints) + { + this.endpoints = endpoints; + if (host == null) + { + String[] hostPort = endpoints[0].split("://")[1].split(":"); + this.host = hostPort[0]; + this.port = Integer.parseInt(hostPort[1]); + } + } + + public String[] getEndpoints() + { + return this.endpoints; + } + public int getId() { - return id; + return id; } public void setId(int id) { - this.id = id; + this.id = id; } public String getHost() { - return host; + return host; } public void setHost(String host) { - this.host = host; + if (host != null) + { + this.host = host; + } } public int getPort() { - return port; + return port; } public void setPort(int port) { - this.port = port; + if (port > 0) + { + this.port = port; + } } public int getJmxPort() { - return jmxPort; + return jmxPort; } @JsonProperty("jmx_port") public void setJmxPort(int jmxPort) { - this.jmxPort = jmxPort; + this.jmxPort = jmxPort; } public int getVersion() { - return version; + return version; } public void setVersion(int version) { - this.version = version; + this.version = version; } public Date getTimestamp() { - return timestamp; + return timestamp; } public void setTimestamp(Date timestamp) { - this.timestamp = timestamp; + this.timestamp = timestamp; } public boolean isController() { - return controller; + return controller; } public void setController(boolean controller) { - this.controller = controller; + this.controller = controller; } } diff --git a/src/main/java/com/homeadvisor/kafdrop/model/TopicVO.java b/src/main/java/com/homeadvisor/kafdrop/model/TopicVO.java index e48f0a5..92b184d 100644 --- a/src/main/java/com/homeadvisor/kafdrop/model/TopicVO.java +++ b/src/main/java/com/homeadvisor/kafdrop/model/TopicVO.java @@ -19,7 +19,7 @@ package com.homeadvisor.kafdrop.model; import java.util.*; -import java.util.stream.Collectors; +import java.util.stream.*; public class TopicVO implements Comparable { @@ -30,119 +30,125 @@ public class TopicVO implements Comparable // partition state // delete supported? - public TopicVO(String name) { - this.name = name; + this.name = name; } public String getName() { - return name; + return name; } public void setName(String name) { - this.name = name; + this.name = name; } public Map getConfig() { - return config; + return config; } public void setConfig(Map config) { - this.config = config; + this.config = config; + } + + public Map getPartitionMap() + { + return partitions; } public Collection getPartitions() { - return partitions.values(); + return partitions.values(); } public Optional getPartition(int partitionId) { - return Optional.ofNullable(partitions.get(partitionId)); + return Optional.ofNullable(partitions.get(partitionId)); } public Collection getLeaderPartitions(int brokerId) { - return partitions.values().stream() - .filter(tp -> tp.getLeader() != null && tp.getLeader().getId() == brokerId) - .collect(Collectors.toList()); + return partitions.values().stream() + .filter(tp -> tp.getLeader() != null && tp.getLeader().getId() == brokerId) + .collect(Collectors.toList()); } public Collection getUnderReplicatedPartitions() { - return partitions.values().stream() - .filter(TopicPartitionVO::isUnderReplicated) - .collect(Collectors.toList()); + return partitions.values().stream() + .filter(TopicPartitionVO::isUnderReplicated) + .collect(Collectors.toList()); } public void setPartitions(Map partitions) { - this.partitions = partitions; + this.partitions = partitions; } /** - * Returns the total number of messages published to the topic, ever - * @return - */ + * Returns the total number of messages published to the topic, ever + * @return + */ public long getTotalSize() { - return partitions.values().stream() - .map(TopicPartitionVO::getSize) - .reduce(0L, Long::sum); + return partitions.values().stream() + .map(TopicPartitionVO::getSize) + .reduce(0L, Long::sum); } /** - * Returns the total number of messages available to consume from the topic. - * @return - */ + * Returns the total number of messages available to consume from the topic. + * @return + */ public long getAvailableSize() { - return partitions.values().stream() - .map(p -> p.getSize() - p.getFirstOffset()) - .reduce(0L, Long::sum); + return partitions.values().stream() + .map(p -> p.getSize() - p.getFirstOffset()) + .reduce(0L, Long::sum); } public double getPreferredReplicaPercent() { - long preferredLeaderCount = partitions.values().stream() - .filter(TopicPartitionVO::isLeaderPreferred) - .count(); - return ((double) preferredLeaderCount) / ((double)partitions.size()); + long preferredLeaderCount = partitions.values().stream() + .filter(TopicPartitionVO::isLeaderPreferred) + .count(); + return ((double) preferredLeaderCount) / ((double) partitions.size()); } public void addPartition(TopicPartitionVO partition) { - partitions.put(partition.getId(), partition); + partitions.put(partition.getId(), partition); } @Override public int compareTo(TopicVO that) { - return this.name.compareTo(that.name); + return this.name.compareTo(that.name); } @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (this == o) + { return true; } + if (o == null || getClass() != o.getClass()) + { return false; } - TopicVO that = (TopicVO) o; + TopicVO that = (TopicVO) o; - if (!name.equals(that.name)) return false; + if (!name.equals(that.name)) + { return false; } - return true; + return true; } @Override public int hashCode() { - return name.hashCode(); + return name.hashCode(); } - } diff --git a/src/main/java/com/homeadvisor/kafdrop/service/CuratorKafkaMonitor.java b/src/main/java/com/homeadvisor/kafdrop/service/CuratorKafkaMonitor.java index 84df01b..f6e03ab 100644 --- a/src/main/java/com/homeadvisor/kafdrop/service/CuratorKafkaMonitor.java +++ b/src/main/java/com/homeadvisor/kafdrop/service/CuratorKafkaMonitor.java @@ -18,45 +18,44 @@ package com.homeadvisor.kafdrop.service; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.base.Throwables; -import com.google.common.collect.ImmutableMap; +import com.fasterxml.jackson.databind.*; +import com.google.common.base.*; +import com.google.common.collect.*; import com.homeadvisor.kafdrop.model.*; -import com.homeadvisor.kafdrop.util.BrokerChannel; -import com.homeadvisor.kafdrop.util.Version; -import kafka.api.ConsumerMetadataRequest; -import kafka.api.PartitionOffsetRequestInfo; -import kafka.cluster.Broker; -import kafka.common.ErrorMapping; -import kafka.common.TopicAndPartition; -import kafka.javaapi.*; -import kafka.network.BlockingChannel; -import kafka.utils.ZKGroupDirs; -import kafka.utils.ZKGroupTopicDirs; -import kafka.utils.ZkUtils; -import org.apache.commons.lang.StringUtils; -import org.apache.curator.framework.CuratorFramework; +import com.homeadvisor.kafdrop.util.*; +import javax.annotation.*; +import kafka.api.*; +import kafka.cluster.*; +import kafka.common.*; +import kafka.javaapi.ConsumerMetadataResponse; +import kafka.javaapi.OffsetFetchRequest; +import kafka.javaapi.OffsetFetchResponse; +import kafka.javaapi.OffsetRequest; +import kafka.javaapi.OffsetResponse; +import kafka.javaapi.PartitionMetadata; +import kafka.javaapi.TopicMetadata; +import kafka.network.*; +import kafka.utils.*; +import org.apache.commons.lang.*; +import org.apache.curator.framework.*; import org.apache.curator.framework.recipes.cache.*; -import org.apache.curator.framework.recipes.cache.PathChildrenCache.StartMode; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.retry.backoff.FixedBackOffPolicy; -import org.springframework.retry.policy.SimpleRetryPolicy; -import org.springframework.retry.support.RetryTemplate; -import org.springframework.stereotype.Service; - -import javax.annotation.PostConstruct; -import javax.annotation.PreDestroy; -import java.io.IOException; +import org.apache.curator.framework.recipes.cache.PathChildrenCache.*; +import org.apache.kafka.clients.consumer.*; +import org.apache.kafka.common.*; +import org.slf4j.*; +import org.springframework.beans.factory.annotation.*; +import org.springframework.retry.backoff.*; +import org.springframework.retry.policy.*; +import org.springframework.retry.support.*; +import org.springframework.stereotype.*; + +import java.io.*; import java.util.*; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ForkJoinPool; -import java.util.concurrent.ForkJoinTask; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Collectors; -import java.util.stream.LongStream; -import java.util.stream.Stream; +import java.util.Objects; +import java.util.Optional; +import java.util.concurrent.*; +import java.util.concurrent.atomic.*; +import java.util.stream.*; @Service public class CuratorKafkaMonitor implements KafkaMonitor @@ -85,845 +84,860 @@ public class CuratorKafkaMonitor implements KafkaMonitor @Autowired private CuratorKafkaMonitorProperties properties; + @Autowired + private KafkaHighLevelConsumer kafkaHighLevelConsumer; private Version kafkaVersion; private RetryTemplate retryTemplate; @PostConstruct - public void start() throws Exception - { - try - { - kafkaVersion = new Version(properties.getKafkaVersion()); - } - catch (Exception ex) - { - throw new IllegalStateException("Invalid kafka version: " + properties.getKafkaVersion(), ex); - } - - threadPool = new ForkJoinPool(properties.getThreadPoolSize()); - - FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy(); - backOffPolicy.setBackOffPeriod(properties.getRetry().getBackoffMillis()); - - final SimpleRetryPolicy retryPolicy = - new SimpleRetryPolicy(properties.getRetry().getMaxAttempts(), - ImmutableMap.of(InterruptedException.class, false, - Exception.class, true)); - - retryTemplate = new RetryTemplate(); - retryTemplate.setBackOffPolicy(backOffPolicy); - retryTemplate.setRetryPolicy(retryPolicy); - - cacheInitCounter.set(4); - - brokerPathCache = new PathChildrenCache(curatorFramework, ZkUtils.BrokerIdsPath(), true); - brokerPathCache.getListenable().addListener(new BrokerListener()); - brokerPathCache.getListenable().addListener((f, e) -> { - if (e.getType() == PathChildrenCacheEvent.Type.INITIALIZED) - { - cacheInitCounter.decrementAndGet(); - LOG.info("Broker cache initialized"); - } - }); - brokerPathCache.start(StartMode.POST_INITIALIZED_EVENT); - - topicConfigPathCache = new PathChildrenCache(curatorFramework, ZkUtils.TopicConfigPath(), true); - topicConfigPathCache.getListenable().addListener((f, e) -> { - if (e.getType() == PathChildrenCacheEvent.Type.INITIALIZED) - { - cacheInitCounter.decrementAndGet(); - LOG.info("Topic configuration cache initialized"); - } - }); - topicConfigPathCache.start(StartMode.POST_INITIALIZED_EVENT); - - topicTreeCache = new TreeCache(curatorFramework, ZkUtils.BrokerTopicsPath()); - topicTreeCache.getListenable().addListener((client, event) -> { - if (event.getType() == TreeCacheEvent.Type.INITIALIZED) - { - cacheInitCounter.decrementAndGet(); - LOG.info("Topic tree cache initialized"); - } - }); - topicTreeCache.start(); - - consumerTreeCache = new TreeCache(curatorFramework, ZkUtils.ConsumersPath()); - consumerTreeCache.getListenable().addListener((client, event) -> { - if (event.getType() == TreeCacheEvent.Type.INITIALIZED) - { - cacheInitCounter.decrementAndGet(); - LOG.info("Consumer tree cache initialized"); - } - }); - consumerTreeCache.start(); - - controllerNodeCache = new NodeCache(curatorFramework, ZkUtils.ControllerPath()); - controllerNodeCache.getListenable().addListener(this::updateController); - controllerNodeCache.start(true); - updateController(); + public void start() + throws Exception + { + try + { + kafkaVersion = new Version(properties.getKafkaVersion()); + } + catch (Exception ex) + { + throw new IllegalStateException("Invalid kafka version: " + properties.getKafkaVersion(), ex); + } + + threadPool = new ForkJoinPool(properties.getThreadPoolSize()); + + FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy(); + backOffPolicy.setBackOffPeriod(properties.getRetry().getBackoffMillis()); + + final SimpleRetryPolicy retryPolicy = + new SimpleRetryPolicy(properties.getRetry().getMaxAttempts(), + ImmutableMap.of(InterruptedException.class, false, + Exception.class, true)); + + retryTemplate = new RetryTemplate(); + retryTemplate.setBackOffPolicy(backOffPolicy); + retryTemplate.setRetryPolicy(retryPolicy); + + cacheInitCounter.set(4); + + brokerPathCache = new PathChildrenCache(curatorFramework, ZkUtils.BrokerIdsPath(), true); + brokerPathCache.getListenable().addListener(new BrokerListener()); + brokerPathCache.getListenable().addListener((f, e) -> { + if (e.getType() == PathChildrenCacheEvent.Type.INITIALIZED) + { + cacheInitCounter.decrementAndGet(); + LOG.info("Broker cache initialized"); + } + }); + brokerPathCache.start(StartMode.POST_INITIALIZED_EVENT); + + topicConfigPathCache = new PathChildrenCache(curatorFramework, ZkUtils.TopicConfigPath(), true); + topicConfigPathCache.getListenable().addListener((f, e) -> { + if (e.getType() == PathChildrenCacheEvent.Type.INITIALIZED) + { + cacheInitCounter.decrementAndGet(); + LOG.info("Topic configuration cache initialized"); + } + }); + topicConfigPathCache.start(StartMode.POST_INITIALIZED_EVENT); + + topicTreeCache = new TreeCache(curatorFramework, ZkUtils.BrokerTopicsPath()); + topicTreeCache.getListenable().addListener((client, event) -> { + if (event.getType() == TreeCacheEvent.Type.INITIALIZED) + { + cacheInitCounter.decrementAndGet(); + LOG.info("Topic tree cache initialized"); + } + }); + topicTreeCache.start(); + + consumerTreeCache = new TreeCache(curatorFramework, ZkUtils.ConsumersPath()); + consumerTreeCache.getListenable().addListener((client, event) -> { + if (event.getType() == TreeCacheEvent.Type.INITIALIZED) + { + cacheInitCounter.decrementAndGet(); + LOG.info("Consumer tree cache initialized"); + } + }); + consumerTreeCache.start(); + + controllerNodeCache = new NodeCache(curatorFramework, ZkUtils.ControllerPath()); + controllerNodeCache.getListenable().addListener(this::updateController); + controllerNodeCache.start(true); + updateController(); } private String clientId() { - return properties.getClientId(); + return properties.getClientId(); + } + + @Override + public Version getKafkaVersion() + { + return kafkaVersion; } private void updateController() { - Optional.ofNullable(controllerNodeCache.getCurrentData()) - .map(data -> { - try - { - Map controllerData = objectMapper.reader(Map.class).readValue(data.getData()); - return (Integer) controllerData.get("brokerid"); - } - catch (IOException e) - { - LOG.error("Unable to read controller data", e); - return null; - } - }) - .ifPresent(this::updateController); + Optional.ofNullable(controllerNodeCache.getCurrentData()) + .map(data -> { + try + { + Map controllerData = objectMapper.reader(Map.class).readValue(data.getData()); + return (Integer) controllerData.get("brokerid"); + } + catch (IOException e) + { + LOG.error("Unable to read controller data", e); + return null; + } + }) + .ifPresent(this::updateController); } private void updateController(int brokerId) { - brokerCache.values() - .forEach(broker -> broker.setController(broker.getId() == brokerId)); + brokerCache.values() + .forEach(broker -> broker.setController(broker.getId() == brokerId)); } private void validateInitialized() { - if (cacheInitCounter.get() > 0) - { - throw new NotInitializedException(); - } + if (cacheInitCounter.get() > 0) + { + throw new NotInitializedException(); + } } - @PreDestroy - public void stop() throws IOException + public void stop() + throws IOException { - consumerTreeCache.close(); - topicConfigPathCache.close(); - brokerPathCache.close(); - controllerNodeCache.close(); + consumerTreeCache.close(); + topicConfigPathCache.close(); + brokerPathCache.close(); + controllerNodeCache.close(); } private int brokerId(ChildData input) { - return Integer.parseInt(StringUtils.substringAfter(input.getPath(), ZkUtils.BrokerIdsPath() + "/")); + return Integer.parseInt(StringUtils.substringAfter(input.getPath(), ZkUtils.BrokerIdsPath() + "/")); } private BrokerVO addBroker(BrokerVO broker) { - final BrokerVO oldBroker = brokerCache.put(broker.getId(), broker); - LOG.info("Kafka broker {} was {}", broker.getId(), oldBroker == null ? "added" : "updated"); - return oldBroker; + final BrokerVO oldBroker = brokerCache.put(broker.getId(), broker); + LOG.info("Kafka broker {} was {}", broker.getId(), oldBroker == null ? "added" : "updated"); + return oldBroker; } private BrokerVO removeBroker(int brokerId) { - final BrokerVO broker = brokerCache.remove(brokerId); - LOG.info("Kafka broker {} was removed", broker.getId()); - return broker; + final BrokerVO broker = brokerCache.remove(brokerId); + LOG.info("Kafka broker {} was removed", broker.getId()); + return broker; } @Override public List getBrokers() { - validateInitialized(); - return brokerCache.values().stream().collect(Collectors.toList()); + validateInitialized(); + return brokerCache.values().stream().collect(Collectors.toList()); } @Override public Optional getBroker(int id) { - validateInitialized(); - return Optional.ofNullable(brokerCache.get(id)); + validateInitialized(); + return Optional.ofNullable(brokerCache.get(id)); } private BrokerChannel brokerChannel(Integer brokerId) { - if (brokerId == null) - { - brokerId = randomBroker(); - if (brokerId == null) - { - throw new BrokerNotFoundException("No brokers available to select from"); - } - } + if (brokerId == null) + { + brokerId = randomBroker(); + if (brokerId == null) + { + throw new BrokerNotFoundException("No brokers available to select from"); + } + } - Integer finalBrokerId = brokerId; - BrokerVO broker = getBroker(brokerId) - .orElseThrow(() -> new BrokerNotFoundException("Broker " + finalBrokerId + " is not available")); + Integer finalBrokerId = brokerId; + BrokerVO broker = getBroker(brokerId) + .orElseThrow(() -> new BrokerNotFoundException("Broker " + finalBrokerId + " is not available")); - return BrokerChannel.forBroker(broker.getHost(), broker.getPort()); + return BrokerChannel.forBroker(broker.getHost(), broker.getPort()); } private Integer randomBroker() { - if (brokerCache.size() > 0) - { - List brokerIds = brokerCache.keySet().stream().collect(Collectors.toList()); - Collections.shuffle(brokerIds); - return brokerIds.get(0); - } - else - { - return null; - } + if (brokerCache.size() > 0) + { + List brokerIds = brokerCache.keySet().stream().collect(Collectors.toList()); + Collections.shuffle(brokerIds); + return brokerIds.get(0); + } else + { + return null; + } } public ClusterSummaryVO getClusterSummary() { - return getClusterSummary(getTopics()); + return getClusterSummary(getTopics()); } @Override - public ClusterSummaryVO getClusterSummary(Collection topics) { - final ClusterSummaryVO topicSummary = topics.stream() - .map(topic -> { - ClusterSummaryVO summary = new ClusterSummaryVO(); - summary.setPartitionCount(topic.getPartitions().size()); - summary.setUnderReplicatedCount(topic.getUnderReplicatedPartitions().size()); - summary.setPreferredReplicaPercent(topic.getPreferredReplicaPercent()); - topic.getPartitions() - .forEach(partition -> { - if (partition.getLeader() != null) { - summary.addBrokerLeaderPartition(partition.getLeader().getId()); - } - if (partition.getPreferredLeader() != null) { - summary.addBrokerPreferredLeaderPartition(partition.getPreferredLeader().getId()); - } - partition.getReplicas() - .forEach(replica -> summary.addExpectedBrokerId(replica.getId())); - }); - return summary; - }) - .reduce((s1, s2) -> { - s1.setPartitionCount(s1.getPartitionCount() + s2.getPartitionCount()); - s1.setUnderReplicatedCount(s1.getUnderReplicatedCount() + s2.getUnderReplicatedCount()); - s1.setPreferredReplicaPercent(s1.getPreferredReplicaPercent() + s2.getPreferredReplicaPercent()); - s2.getBrokerLeaderPartitionCount().forEach(s1::addBrokerLeaderPartition); - s2.getBrokerPreferredLeaderPartitionCount().forEach(s1::addBrokerPreferredLeaderPartition); - return s1; - }) - .orElseGet(ClusterSummaryVO::new); - topicSummary.setTopicCount(topics.size()); - topicSummary.setPreferredReplicaPercent(topicSummary.getPreferredReplicaPercent() / topics.size()); - return topicSummary; + public ClusterSummaryVO getClusterSummary(Collection topics) + { + final ClusterSummaryVO topicSummary = topics.stream() + .map(topic -> { + ClusterSummaryVO summary = new ClusterSummaryVO(); + summary.setPartitionCount(topic.getPartitions().size()); + summary.setUnderReplicatedCount(topic.getUnderReplicatedPartitions().size()); + summary.setPreferredReplicaPercent(topic.getPreferredReplicaPercent()); + topic.getPartitions() + .forEach(partition -> { + if (partition.getLeader() != null) + { + summary.addBrokerLeaderPartition(partition.getLeader().getId()); + } + if (partition.getPreferredLeader() != null) + { + summary.addBrokerPreferredLeaderPartition(partition.getPreferredLeader().getId()); + } + partition.getReplicas() + .forEach(replica -> summary.addExpectedBrokerId(replica.getId())); + }); + return summary; + }) + .reduce((s1, s2) -> { + s1.setPartitionCount(s1.getPartitionCount() + s2.getPartitionCount()); + s1.setUnderReplicatedCount(s1.getUnderReplicatedCount() + s2.getUnderReplicatedCount()); + s1.setPreferredReplicaPercent(s1.getPreferredReplicaPercent() + s2.getPreferredReplicaPercent()); + s2.getBrokerLeaderPartitionCount().forEach(s1::addBrokerLeaderPartition); + s2.getBrokerPreferredLeaderPartitionCount().forEach(s1::addBrokerPreferredLeaderPartition); + return s1; + }) + .orElseGet(ClusterSummaryVO::new); + topicSummary.setTopicCount(topics.size()); + topicSummary.setPreferredReplicaPercent(topicSummary.getPreferredReplicaPercent() / topics.size()); + return topicSummary; } @Override public List getTopics() { - validateInitialized(); - return getTopicMetadata().values().stream() - .sorted(Comparator.comparing(TopicVO::getName)) - .collect(Collectors.toList()); + validateInitialized(); + return getTopicMetadata().values().stream() + .sorted(Comparator.comparing(TopicVO::getName)) + .collect(Collectors.toList()); } @Override public Optional getTopic(String topic) { - validateInitialized(); - final Optional topicVO = Optional.ofNullable(getTopicMetadata(topic).get(topic)); - topicVO.ifPresent( - vo -> { - getTopicPartitionSizes(vo, kafka.api.OffsetRequest.LatestTime()) - .entrySet() - .forEach(entry -> vo.getPartition(entry.getKey()).ifPresent(p -> p.setSize(entry.getValue()))); - getTopicPartitionSizes(vo, kafka.api.OffsetRequest.EarliestTime()) - .entrySet() - .forEach(entry -> vo.getPartition(entry.getKey()).ifPresent(p -> p.setFirstOffset(entry.getValue()))); - } - ); - return topicVO; + validateInitialized(); + final Optional topicVO = Optional.ofNullable(getTopicMetadata(topic).get(topic)); + if (kafkaVersion.compareTo(new Version(0, 8, 2)) > 0) + { + topicVO.ifPresent(vo -> { + vo.setPartitions(getTopicPartitionSizes(vo)); + }); + } else + { + topicVO.ifPresent( + vo -> { + getTopicPartitionSizes(vo, kafka.api.OffsetRequest.LatestTime()) + .entrySet() + .forEach(entry -> vo.getPartition(entry.getKey()).ifPresent(p -> p.setSize(entry.getValue()))); + getTopicPartitionSizes(vo, kafka.api.OffsetRequest.EarliestTime()) + .entrySet() + .forEach(entry -> vo.getPartition(entry.getKey()).ifPresent(p -> p.setFirstOffset(entry.getValue()))); + } + ); + } + return topicVO; } private Map getTopicMetadata(String... topics) { - if (kafkaVersion.compareTo(new Version(0, 9, 0)) >= 0) - { - return retryTemplate.execute( - context -> brokerChannel(null) - .execute(channel -> getTopicMetadata(channel, topics))); - } - else - { - Stream topicStream; - if (topics == null || topics.length == 0) - { - topicStream = - Optional.ofNullable( - topicTreeCache.getCurrentChildren(ZkUtils.BrokerTopicsPath())) - .map(Map::keySet) - .map(Collection::stream) - .orElse(Stream.empty()); - } - else - { - topicStream = Stream.of(topics); - } - - return topicStream - .map(this::getTopicZkData) - .filter(Objects::nonNull) - .collect(Collectors.toMap(TopicVO::getName, topic -> topic)); - } + if (kafkaVersion.compareTo(new Version(0, 9, 0)) >= 0) + { + return retryTemplate.execute( + context -> brokerChannel(null) + .execute(channel -> getTopicMetadata(channel, topics))); + } else + { + Stream topicStream; + if (topics == null || topics.length == 0) + { + topicStream = + Optional.ofNullable( + topicTreeCache.getCurrentChildren(ZkUtils.BrokerTopicsPath())) + .map(Map::keySet) + .map(Collection::stream) + .orElse(Stream.empty()); + } else + { + topicStream = Stream.of(topics); + } + + return topicStream + .map(this::getTopicZkData) + .filter(Objects::nonNull) + .collect(Collectors.toMap(TopicVO::getName, topic -> topic)); + } } private TopicVO getTopicZkData(String topic) { - return Optional.ofNullable(topicTreeCache.getCurrentData(ZkUtils.getTopicPath(topic))) - .map(this::parseZkTopic) - .orElse(null); + return Optional.ofNullable(topicTreeCache.getCurrentData(ZkUtils.getTopicPath(topic))) + .map(this::parseZkTopic) + .orElse(null); } public TopicVO parseZkTopic(ChildData input) { - try - { - final TopicVO topic = new TopicVO(StringUtils.substringAfterLast(input.getPath(), "/")); + try + { + final TopicVO topic = new TopicVO(StringUtils.substringAfterLast(input.getPath(), "/")); - final TopicRegistrationVO topicRegistration = - objectMapper.reader(TopicRegistrationVO.class).readValue(input.getData()); + final TopicRegistrationVO topicRegistration = + objectMapper.reader(TopicRegistrationVO.class).readValue(input.getData()); - topic.setConfig( - Optional.ofNullable(topicConfigPathCache.getCurrentData(ZkUtils.TopicConfigPath() + "/" + topic.getName())) - .map(this::readTopicConfig) - .orElse(Collections.emptyMap())); + topic.setConfig( + Optional.ofNullable(topicConfigPathCache.getCurrentData(ZkUtils.TopicConfigPath() + "/" + topic.getName())) + .map(this::readTopicConfig) + .orElse(Collections.emptyMap())); - for (Map.Entry> entry : topicRegistration.getReplicas().entrySet()) - { - final int partitionId = entry.getKey(); - final List partitionBrokerIds = entry.getValue(); + for (Map.Entry> entry : topicRegistration.getReplicas().entrySet()) + { + final int partitionId = entry.getKey(); + final List partitionBrokerIds = entry.getValue(); - final TopicPartitionVO partition = new TopicPartitionVO(partitionId); + final TopicPartitionVO partition = new TopicPartitionVO(partitionId); - final Optional partitionState = partitionState(topic.getName(), partition.getId()); + final Optional partitionState = partitionState(topic.getName(), partition.getId()); - partitionBrokerIds.stream() - .map(brokerId -> { - TopicPartitionVO.PartitionReplica replica = new TopicPartitionVO.PartitionReplica(); - replica.setId(brokerId); - replica.setInService(partitionState.map(ps -> ps.getIsr().contains(brokerId)).orElse(false)); - replica.setLeader(partitionState.map(ps -> brokerId == ps.getLeader()).orElse(false)); - return replica; - }) - .forEach(partition::addReplica); + partitionBrokerIds.stream() + .map(brokerId -> { + TopicPartitionVO.PartitionReplica replica = new TopicPartitionVO.PartitionReplica(); + replica.setId(brokerId); + replica.setInService(partitionState.map(ps -> ps.getIsr().contains(brokerId)).orElse(false)); + replica.setLeader(partitionState.map(ps -> brokerId == ps.getLeader()).orElse(false)); + return replica; + }) + .forEach(partition::addReplica); - topic.addPartition(partition); - } + topic.addPartition(partition); + } - // todo: get partition sizes here as single bulk request? + // todo: get partition sizes here as single bulk request? - return topic; - } - catch (IOException e) - { - throw Throwables.propagate(e); - } + return topic; + } + catch (IOException e) + { + throw Throwables.propagate(e); + } } private Map getTopicMetadata(BlockingChannel channel, String... topics) { - final TopicMetadataRequest request = - new TopicMetadataRequest((short) 0, 0, clientId(), Arrays.asList(topics)); - - LOG.debug("Sending topic metadata request: {}", request); - - channel.send(request); - final kafka.api.TopicMetadataResponse underlyingResponse = - kafka.api.TopicMetadataResponse.readFrom(channel.receive().buffer()); - - LOG.debug("Received topic metadata response: {}", underlyingResponse); - - TopicMetadataResponse response = new TopicMetadataResponse(underlyingResponse); - return response.topicsMetadata().stream() - .filter(tmd -> tmd.errorCode() == ErrorMapping.NoError()) - .map(this::processTopicMetadata) - .collect(Collectors.toMap(TopicVO::getName, t -> t)); + return kafkaHighLevelConsumer.getTopicsInfo(topics); } private TopicVO processTopicMetadata(TopicMetadata tmd) { - TopicVO topic = new TopicVO(tmd.topic()); + TopicVO topic = new TopicVO(tmd.topic()); - topic.setConfig( - Optional.ofNullable(topicConfigPathCache.getCurrentData(ZkUtils.TopicConfigPath() + "/" + topic.getName())) - .map(this::readTopicConfig) - .orElse(Collections.emptyMap())); + topic.setConfig( + Optional.ofNullable(topicConfigPathCache.getCurrentData(ZkUtils.TopicConfigPath() + "/" + topic.getName())) + .map(this::readTopicConfig) + .orElse(Collections.emptyMap())); - topic.setPartitions( - tmd.partitionsMetadata().stream() - .map((pmd) -> parsePartitionMetadata(tmd.topic(), pmd)) - .collect(Collectors.toMap(TopicPartitionVO::getId, p -> p)) - ); - return topic; + topic.setPartitions( + tmd.partitionsMetadata().stream() + .map((pmd) -> parsePartitionMetadata(tmd.topic(), pmd)) + .collect(Collectors.toMap(TopicPartitionVO::getId, p -> p)) + ); + return topic; } private TopicPartitionVO parsePartitionMetadata(String topic, PartitionMetadata pmd) { - TopicPartitionVO partition = new TopicPartitionVO(pmd.partitionId()); - if (pmd.leader() != null) - { - partition.addReplica(new TopicPartitionVO.PartitionReplica(pmd.leader().id(), true, true)); - } + TopicPartitionVO partition = new TopicPartitionVO(pmd.partitionId()); + if (pmd.leader() != null) + { + partition.addReplica(new TopicPartitionVO.PartitionReplica(pmd.leader().id(), true, true)); + } - final List isr = getIsr(topic, pmd); - pmd.replicas().stream() - .map(replica -> new TopicPartitionVO.PartitionReplica(replica.id(), isr.contains(replica.id()), false)) - .forEach(partition::addReplica); - return partition; + final List isr = getIsr(topic, pmd); + pmd.replicas().stream() + .map(replica -> new TopicPartitionVO.PartitionReplica(replica.id(), isr.contains(replica.id()), false)) + .forEach(partition::addReplica); + return partition; } private List getIsr(String topic, PartitionMetadata pmd) { - return pmd.isr().stream().map(Broker::id).collect(Collectors.toList()); + return pmd.isr().stream().map(Broker::id).collect(Collectors.toList()); } private Map readTopicConfig(ChildData d) { - try - { - final Map configData = objectMapper.reader(Map.class).readValue(d.getData()); - return (Map) configData.get("config"); - } - catch (IOException e) - { - throw Throwables.propagate(e); - } + try + { + final Map configData = objectMapper.reader(Map.class).readValue(d.getData()); + return (Map) configData.get("config"); + } + catch (IOException e) + { + throw Throwables.propagate(e); + } } - private Optional partitionState(String topicName, int partitionId) - throws IOException + throws IOException { - final Optional partitionData = Optional.ofNullable(topicTreeCache.getCurrentData( - ZkUtils.getTopicPartitionLeaderAndIsrPath(topicName, partitionId))) - .map(ChildData::getData); - if (partitionData.isPresent()) - { - return Optional.ofNullable(objectMapper.reader(TopicPartitionStateVO.class).readValue(partitionData.get())); - } - else - { - return Optional.empty(); - } + final Optional partitionData = Optional.ofNullable(topicTreeCache.getCurrentData( + ZkUtils.getTopicPartitionLeaderAndIsrPath(topicName, partitionId))) + .map(ChildData::getData); + if (partitionData.isPresent()) + { + return Optional.ofNullable(objectMapper.reader(TopicPartitionStateVO.class).readValue(partitionData.get())); + } else + { + return Optional.empty(); + } } @Override public List getConsumers() { - validateInitialized(); - return getConsumerStream(null).collect(Collectors.toList()); + validateInitialized(); + return getConsumerStream(null).collect(Collectors.toList()); } @Override public List getConsumers(final TopicVO topic) { - validateInitialized(); - return getConsumerStream(topic) - .filter(consumer -> consumer.getTopic(topic.getName()) != null) - .collect(Collectors.toList()); + validateInitialized(); + return getConsumerStream(topic) + .filter(consumer -> consumer.getTopic(topic.getName()) != null) + .collect(Collectors.toList()); } @Override public List getConsumers(final String topic) { - return getConsumers(getTopic(topic).get()); + return getConsumers(getTopic(topic).get()); } private Stream getConsumerStream(TopicVO topic) { - return consumerTreeCache.getCurrentChildren(ZkUtils.ConsumersPath()).keySet().stream() - .map(g -> getConsumerByTopic(g, topic)) - .filter(Optional::isPresent) - .map(Optional::get) - .sorted(Comparator.comparing(ConsumerVO::getGroupId)); + return consumerTreeCache.getCurrentChildren(ZkUtils.ConsumersPath()).keySet().stream() + .map(g -> getConsumerByTopic(g, topic)) + .filter(Optional::isPresent) + .map(Optional::get) + .sorted(Comparator.comparing(ConsumerVO::getGroupId)); } @Override public Optional getConsumer(String groupId) { - validateInitialized(); - return getConsumerByTopic(groupId, null); + validateInitialized(); + return getConsumerByTopic(groupId, null); } @Override public Optional getConsumerByTopicName(String groupId, String topicName) { - return getConsumerByTopic(groupId, Optional.of(topicName).flatMap(this::getTopic).orElse(null)); + return getConsumerByTopic(groupId, Optional.of(topicName).flatMap(this::getTopic).orElse(null)); } @Override public Optional getConsumerByTopic(String groupId, TopicVO topic) { - final ConsumerVO consumer = new ConsumerVO(groupId); - final ZKGroupDirs groupDirs = new ZKGroupDirs(groupId); - - if (consumerTreeCache.getCurrentData(groupDirs.consumerGroupDir()) == null) return Optional.empty(); - - // todo: get number of threads in each instance (subscription -> topic -> # threads) - Optional.ofNullable(consumerTreeCache.getCurrentChildren(groupDirs.consumerRegistryDir())) - .ifPresent( - children -> - children.keySet().stream() - .map(id -> readConsumerRegistration(groupDirs, id)) - .forEach(consumer::addActiveInstance)); - - Stream topicStream = null; - - if (topic != null) - { - if (consumerTreeCache.getCurrentData(groupDirs.consumerGroupDir() + "/owners/" + topic.getName()) != null) - { - topicStream = Stream.of(topic.getName()); - } - else - { - topicStream = Stream.empty(); - } - } - else - { - topicStream = Optional.ofNullable( - consumerTreeCache.getCurrentChildren(groupDirs.consumerGroupDir() + "/owners")) - .map(Map::keySet) - .map(Collection::stream) - .orElse(Stream.empty()); - } - - topicStream - .map(ConsumerTopicVO::new) - .forEach(consumerTopic -> { - getConsumerPartitionStream(groupId, consumerTopic.getTopic(), topic) - .forEach(consumerTopic::addOffset); - consumer.addTopic(consumerTopic); - }); - - return Optional.of(consumer); + final ConsumerVO consumer = new ConsumerVO(groupId); + final ZKGroupDirs groupDirs = new ZKGroupDirs(groupId); + + if (consumerTreeCache.getCurrentData(groupDirs.consumerGroupDir()) == null) + { return Optional.empty(); } + + // todo: get number of threads in each instance (subscription -> topic -> # threads) + Optional.ofNullable(consumerTreeCache.getCurrentChildren(groupDirs.consumerRegistryDir())) + .ifPresent( + children -> + children.keySet().stream() + .map(id -> readConsumerRegistration(groupDirs, id)) + .forEach(consumer::addActiveInstance)); + + Stream topicStream = null; + + if (topic != null) + { + if (consumerTreeCache.getCurrentData(groupDirs.consumerGroupDir() + "/owners/" + topic.getName()) != null) + { + topicStream = Stream.of(topic.getName()); + } else + { + topicStream = Stream.empty(); + } + } else + { + topicStream = Optional.ofNullable( + consumerTreeCache.getCurrentChildren(groupDirs.consumerGroupDir() + "/owners")) + .map(Map::keySet) + .map(Collection::stream) + .orElse(Stream.empty()); + } + + topicStream + .map(ConsumerTopicVO::new) + .forEach(consumerTopic -> { + getConsumerPartitionStream(groupId, consumerTopic.getTopic(), topic) + .forEach(consumerTopic::addOffset); + consumer.addTopic(consumerTopic); + }); + + return Optional.of(consumer); + } + + @Override + public List getMessages(TopicPartition topicPartition, long offset, long count) + { + + List> records = kafkaHighLevelConsumer.getLatestRecords(topicPartition, offset, count); + List messageVOS = Lists.newArrayList(); + for (ConsumerRecord record : records) + { + MessageVO messageVo = new MessageVO(); + messageVo.setKey(record.key()); + messageVo.setMessage(record.value()); + messageVo.setChecksum(record.checksum()); + messageVo.setCompressionCodec(record.headers().toString()); + messageVo.setValid(true); + + messageVOS.add(messageVo); + } + return messageVOS; } private ConsumerRegistrationVO readConsumerRegistration(ZKGroupDirs groupDirs, String id) { - try - { - ChildData data = consumerTreeCache.getCurrentData(groupDirs.consumerRegistryDir() + "/" + id); - final Map consumerData = objectMapper.reader(Map.class).readValue(data.getData()); - Map subscriptions = (Map) consumerData.get("subscription"); + try + { + ChildData data = consumerTreeCache.getCurrentData(groupDirs.consumerRegistryDir() + "/" + id); + final Map consumerData = objectMapper.reader(Map.class).readValue(data.getData()); + Map subscriptions = (Map) consumerData.get("subscription"); - ConsumerRegistrationVO vo = new ConsumerRegistrationVO(id); - vo.setSubscriptions(subscriptions); - return vo; - } - catch (IOException ex) - { - throw Throwables.propagate(ex); - } + ConsumerRegistrationVO vo = new ConsumerRegistrationVO(id); + vo.setSubscriptions(subscriptions); + return vo; + } + catch (IOException ex) + { + throw Throwables.propagate(ex); + } } private Stream getConsumerPartitionStream(String groupId, - String topicName, - TopicVO topicOpt) + String topicName, + TopicVO topicOpt) { - ZKGroupTopicDirs groupTopicDirs = new ZKGroupTopicDirs(groupId, topicName); + ZKGroupTopicDirs groupTopicDirs = new ZKGroupTopicDirs(groupId, topicName); - if (topicOpt == null || topicOpt.getName().equals(topicName)) - { - topicOpt = getTopic(topicName).orElse(null); - } + if (topicOpt == null || topicOpt.getName().equals(topicName)) + { + topicOpt = getTopic(topicName).orElse(null); + } - if (topicOpt != null) - { - final TopicVO topic = topicOpt; + if (topicOpt != null) + { + final TopicVO topic = topicOpt; - Map consumerOffsets = getConsumerOffsets(groupId, topic); + Map consumerOffsets = getConsumerOffsets(groupId, topic); - return topic.getPartitions().stream() - .map(partition -> { - int partitionId = partition.getId(); + return topic.getPartitions().stream() + .map(partition -> { + int partitionId = partition.getId(); - final ConsumerPartitionVO consumerPartition = new ConsumerPartitionVO(groupId, topicName, partitionId); - consumerPartition.setOwner( - Optional.ofNullable( - consumerTreeCache.getCurrentData(groupTopicDirs.consumerOwnerDir() + "/" + partitionId)) - .map(data -> new String(data.getData())) - .orElse(null)); + final ConsumerPartitionVO consumerPartition = new ConsumerPartitionVO(groupId, topicName, partitionId); + consumerPartition.setOwner( + Optional.ofNullable( + consumerTreeCache.getCurrentData(groupTopicDirs.consumerOwnerDir() + "/" + partitionId)) + .map(data -> new String(data.getData())) + .orElse(null)); - consumerPartition.setOffset(consumerOffsets.getOrDefault(partitionId, -1L)); + consumerPartition.setOffset(consumerOffsets.getOrDefault(partitionId, -1L)); - final Optional topicPartition = topic.getPartition(partitionId); - consumerPartition.setSize(topicPartition.map(TopicPartitionVO::getSize).orElse(-1L)); - consumerPartition.setFirstOffset(topicPartition.map(TopicPartitionVO::getFirstOffset).orElse(-1L)); + final Optional topicPartition = topic.getPartition(partitionId); + consumerPartition.setSize(topicPartition.map(TopicPartitionVO::getSize).orElse(-1L)); + consumerPartition.setFirstOffset(topicPartition.map(TopicPartitionVO::getFirstOffset).orElse(-1L)); - return consumerPartition; - }); - } - else - { - return Stream.empty(); - } + return consumerPartition; + }); + } else + { + return Stream.empty(); + } } private Map getConsumerOffsets(String groupId, TopicVO topic) { - try - { - // Kafka doesn't really give us an indication of whether a consumer is - // using Kafka or Zookeeper based offset tracking. So look up the offsets - // for both and assume that the largest offset is the correct one. - - ForkJoinTask> kafkaTask = - threadPool.submit(() -> getConsumerOffsets(groupId, topic, false)); - - ForkJoinTask> zookeeperTask = - threadPool.submit(() -> getConsumerOffsets(groupId, topic, true)); - - Map zookeeperOffsets = zookeeperTask.get(); - Map kafkaOffsets = kafkaTask.get(); - zookeeperOffsets.entrySet() - .forEach(entry -> kafkaOffsets.merge(entry.getKey(), entry.getValue(), Math::max)); - return kafkaOffsets; - } - catch (InterruptedException ex) - { - Thread.currentThread().interrupt(); - throw Throwables.propagate(ex); - } - catch (ExecutionException ex) - { - throw Throwables.propagate(ex.getCause()); - } + try + { + // Kafka doesn't really give us an indication of whether a consumer is + // using Kafka or Zookeeper based offset tracking. So look up the offsets + // for both and assume that the largest offset is the correct one. + + ForkJoinTask> kafkaTask = + threadPool.submit(() -> getConsumerOffsets(groupId, topic, false)); + + ForkJoinTask> zookeeperTask = + threadPool.submit(() -> getConsumerOffsets(groupId, topic, true)); + + Map zookeeperOffsets = zookeeperTask.get(); + Map kafkaOffsets = kafkaTask.get(); + zookeeperOffsets.entrySet() + .forEach(entry -> kafkaOffsets.merge(entry.getKey(), entry.getValue(), Math::max)); + return kafkaOffsets; + } + catch (InterruptedException ex) + { + Thread.currentThread().interrupt(); + throw Throwables.propagate(ex); + } + catch (ExecutionException ex) + { + throw Throwables.propagate(ex.getCause()); + } } private Map getConsumerOffsets(String groupId, - TopicVO topic, - boolean zookeeperOffsets) + TopicVO topic, + boolean zookeeperOffsets) { - return retryTemplate.execute( - context -> brokerChannel(zookeeperOffsets ? null : offsetManagerBroker(groupId)) - .execute(channel -> getConsumerOffsets(channel, groupId, topic, zookeeperOffsets))); + return retryTemplate.execute( + context -> brokerChannel(zookeeperOffsets ? null : offsetManagerBroker(groupId)) + .execute(channel -> getConsumerOffsets(channel, groupId, topic, zookeeperOffsets))); } /** - * Returns the map of partitionId to consumer offset for the given group and - * topic. Uses the given blocking channel to execute the offset fetch request. - * - * @param channel The channel to send requests on - * @param groupId Consumer group to use - * @param topic Topic to query - * @param zookeeperOffsets If true, use a version of the API that retrieves - * offsets from Zookeeper. Otherwise use a version - * that pulls the offsets from Kafka itself. - * @return Map where the key is partitionId and the value is the consumer - * offset for that partition. - */ + * Returns the map of partitionId to consumer offset for the given group and + * topic. Uses the given blocking channel to execute the offset fetch request. + * @param channel The channel to send requests on + * @param groupId Consumer group to use + * @param topic Topic to query + * @param zookeeperOffsets If true, use a version of the API that retrieves + * offsets from Zookeeper. Otherwise use a version + * that pulls the offsets from Kafka itself. + * @return Map where the key is partitionId and the value is the consumer + * offset for that partition. + */ private Map getConsumerOffsets(BlockingChannel channel, - String groupId, - TopicVO topic, - boolean zookeeperOffsets) + String groupId, + TopicVO topic, + boolean zookeeperOffsets) { - final OffsetFetchRequest request = new OffsetFetchRequest( - groupId, - topic.getPartitions().stream() - .map(p -> new TopicAndPartition(topic.getName(), p.getId())) - .collect(Collectors.toList()), - (short) (zookeeperOffsets ? 0 : 1), 0, // version 0 = zookeeper offsets, 1 = kafka offsets - clientId()); + final OffsetFetchRequest request = new OffsetFetchRequest( + groupId, + topic.getPartitions().stream() + .map(p -> new TopicAndPartition(topic.getName(), p.getId())) + .collect(Collectors.toList()), + (short) (zookeeperOffsets ? 0 : 1), 0, // version 0 = zookeeper offsets, 1 = kafka offsets + clientId()); - LOG.debug("Sending consumer offset request: {}", request); + LOG.debug("Sending consumer offset request: {}", request); - channel.send(request.underlying()); + channel.send(request.underlying()); - final kafka.api.OffsetFetchResponse underlyingResponse = - kafka.api.OffsetFetchResponse.readFrom(channel.receive().buffer()); + final kafka.api.OffsetFetchResponse underlyingResponse = + kafka.api.OffsetFetchResponse.readFrom(channel.receive().buffer()); - LOG.debug("Received consumer offset response: {}", underlyingResponse); + LOG.debug("Received consumer offset response: {}", underlyingResponse); - OffsetFetchResponse response = new OffsetFetchResponse(underlyingResponse); + OffsetFetchResponse response = new OffsetFetchResponse(underlyingResponse); - return response.offsets().entrySet().stream() - .filter(entry -> entry.getValue().error() == ErrorMapping.NoError()) - .collect(Collectors.toMap(entry -> entry.getKey().partition(), entry -> entry.getValue().offset())); + return response.offsets().entrySet().stream() + .filter(entry -> entry.getValue().error() == ErrorMapping.NoError()) + .collect(Collectors.toMap(entry -> entry.getKey().partition(), entry -> entry.getValue().offset())); } /** - * Returns the broker Id that is the offset coordinator for the given group id. If not found, returns null - */ + * Returns the broker Id that is the offset coordinator for the given group id. If not found, returns null + */ private Integer offsetManagerBroker(String groupId) { - return retryTemplate.execute( - context -> - brokerChannel(null) - .execute(channel -> offsetManagerBroker(channel, groupId)) - ); + return retryTemplate.execute( + context -> + brokerChannel(null) + .execute(channel -> offsetManagerBroker(channel, groupId)) + ); } private Integer offsetManagerBroker(BlockingChannel channel, String groupId) { - final ConsumerMetadataRequest request = - new ConsumerMetadataRequest(groupId, (short) 0, 0, clientId()); + final ConsumerMetadataRequest request = + new ConsumerMetadataRequest(groupId, (short) 0, 0, clientId()); - LOG.debug("Sending consumer metadata request: {}", request); + LOG.debug("Sending consumer metadata request: {}", request); - channel.send(request); - ConsumerMetadataResponse response = - ConsumerMetadataResponse.readFrom(channel.receive().buffer()); + channel.send(request); + ConsumerMetadataResponse response = + ConsumerMetadataResponse.readFrom(channel.receive().buffer()); - LOG.debug("Received consumer metadata response: {}", response); + LOG.debug("Received consumer metadata response: {}", response); - return (response.errorCode() == ErrorMapping.NoError()) ? response.coordinator().id() : null; + return (response.errorCode() == ErrorMapping.NoError()) ? response.coordinator().id() : null; } - private Map getTopicPartitionSizes(TopicVO topic) + private Map getTopicPartitionSizes(TopicVO topic) { - return getTopicPartitionSizes(topic, kafka.api.OffsetRequest.LatestTime()); + return kafkaHighLevelConsumer.getPartitionSize(topic.getName()); } private Map getTopicPartitionSizes(TopicVO topic, long time) { - try - { - PartitionOffsetRequestInfo requestInfo = new PartitionOffsetRequestInfo(time, 1); - - return threadPool.submit(() -> - topic.getPartitions().parallelStream() - .filter(p -> p.getLeader() != null) - .collect(Collectors.groupingBy(p -> p.getLeader().getId())) // Group partitions by leader broker id - .entrySet().parallelStream() - .map(entry -> { - final Integer brokerId = entry.getKey(); - final List brokerPartitions = entry.getValue(); - try - { - // Get the size of the partitions for a topic from the leader. - final OffsetResponse offsetResponse = - sendOffsetRequest(brokerId, topic, requestInfo, brokerPartitions); - - - // Build a map of partitionId -> topic size from the response - return brokerPartitions.stream() - .collect(Collectors.toMap(TopicPartitionVO::getId, - partition -> Optional.ofNullable( - offsetResponse.offsets(topic.getName(), partition.getId())) - .map(Arrays::stream) - .orElse(LongStream.empty()) - .findFirst() - .orElse(-1L))); - } - catch (Exception ex) - { - LOG.error("Unable to get partition log size for topic {} partitions ({})", - topic.getName(), - brokerPartitions.stream() - .map(TopicPartitionVO::getId) - .map(String::valueOf) - .collect(Collectors.joining(",")), - ex); - - // Map each partition to -1, indicating we got an error - return brokerPartitions.stream().collect(Collectors.toMap(TopicPartitionVO::getId, tp -> -1L)); - } - }) - .map(Map::entrySet) - .flatMap(Collection::stream) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))) - .get(); - } - catch (InterruptedException e) - { - Thread.currentThread().interrupt(); - throw Throwables.propagate(e); - } - catch (ExecutionException e) - { - throw Throwables.propagate(e.getCause()); - } + try + { + PartitionOffsetRequestInfo requestInfo = new PartitionOffsetRequestInfo(time, 1); + + return threadPool.submit(() -> + topic.getPartitions().parallelStream() + .filter(p -> p.getLeader() != null) + .collect(Collectors.groupingBy(p -> p.getLeader().getId())) // Group partitions by leader broker id + .entrySet().parallelStream() + .map(entry -> { + final Integer brokerId = entry.getKey(); + final List brokerPartitions = entry.getValue(); + try + { + // Get the size of the partitions for a topic from the leader. + final OffsetResponse offsetResponse = + sendOffsetRequest(brokerId, topic, requestInfo, brokerPartitions); + + // Build a map of partitionId -> topic size from the response + return brokerPartitions.stream() + .collect(Collectors.toMap(TopicPartitionVO::getId, + partition -> Optional.ofNullable( + offsetResponse.offsets(topic.getName(), partition.getId())) + .map(Arrays::stream) + .orElse(LongStream.empty()) + .findFirst() + .orElse(-1L))); + } + catch (Exception ex) + { + LOG.error("Unable to get partition log size for topic {} partitions ({})", + topic.getName(), + brokerPartitions.stream() + .map(TopicPartitionVO::getId) + .map(String::valueOf) + .collect(Collectors.joining(",")), + ex); + + // Map each partition to -1, indicating we got an error + return brokerPartitions.stream().collect(Collectors.toMap(TopicPartitionVO::getId, tp -> -1L)); + } + }) + .map(Map::entrySet) + .flatMap(Collection::stream) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))) + .get(); + } + catch (InterruptedException e) + { + Thread.currentThread().interrupt(); + throw Throwables.propagate(e); + } + catch (ExecutionException e) + { + throw Throwables.propagate(e.getCause()); + } } private OffsetResponse sendOffsetRequest(Integer brokerId, TopicVO topic, - PartitionOffsetRequestInfo requestInfo, - List brokerPartitions) + PartitionOffsetRequestInfo requestInfo, + List brokerPartitions) { - final OffsetRequest offsetRequest = new OffsetRequest( - brokerPartitions.stream() - .collect(Collectors.toMap( - partition -> new TopicAndPartition(topic.getName(), partition.getId()), - partition -> requestInfo)), - (short) 0, clientId()); + final OffsetRequest offsetRequest = new OffsetRequest( + brokerPartitions.stream() + .collect(Collectors.toMap( + partition -> new TopicAndPartition(topic.getName(), partition.getId()), + partition -> requestInfo)), + (short) 0, clientId()); - LOG.debug("Sending offset request: {}", offsetRequest); + LOG.debug("Sending offset request: {}", offsetRequest); - return retryTemplate.execute( - context -> - brokerChannel(brokerId) - .execute(channel -> - { - channel.send(offsetRequest.underlying()); - final kafka.api.OffsetResponse underlyingResponse = - kafka.api.OffsetResponse.readFrom(channel.receive().buffer()); + return retryTemplate.execute( + context -> + brokerChannel(brokerId) + .execute(channel -> + { + channel.send(offsetRequest.underlying()); + final kafka.api.OffsetResponse underlyingResponse = + kafka.api.OffsetResponse.readFrom(channel.receive().buffer()); - LOG.debug("Received offset response: {}", underlyingResponse); + LOG.debug("Received offset response: {}", underlyingResponse); - return new OffsetResponse(underlyingResponse); - })); + return new OffsetResponse(underlyingResponse); + })); } private class BrokerListener implements PathChildrenCacheListener { - @Override - public void childEvent(CuratorFramework framework, PathChildrenCacheEvent event) throws Exception - { - switch (event.getType()) - { - case CHILD_REMOVED: - { - BrokerVO broker = removeBroker(brokerId(event.getData())); - break; - } - - case CHILD_ADDED: - case CHILD_UPDATED: - { - addBroker(parseBroker(event.getData())); - break; - } - - case INITIALIZED: - { - brokerPathCache.getCurrentData().stream() - .map(BrokerListener.this::parseBroker) - .forEach(CuratorKafkaMonitor.this::addBroker); - break; - } - } - updateController(); - } - - private int brokerId(ChildData input) - { - return Integer.parseInt(StringUtils.substringAfter(input.getPath(), ZkUtils.BrokerIdsPath() + "/")); - } - - - private BrokerVO parseBroker(ChildData input) - { - try - { - final BrokerVO broker = objectMapper.reader(BrokerVO.class).readValue(input.getData()); - broker.setId(brokerId(input)); - return broker; - } - catch (IOException e) - { - throw Throwables.propagate(e); - } - } + @Override + public void childEvent(CuratorFramework framework, PathChildrenCacheEvent event) + throws Exception + { + switch (event.getType()) + { + case CHILD_REMOVED: + { + BrokerVO broker = removeBroker(brokerId(event.getData())); + break; + } + + case CHILD_ADDED: + case CHILD_UPDATED: + { + addBroker(parseBroker(event.getData())); + break; + } + + case INITIALIZED: + { + brokerPathCache.getCurrentData().stream() + .map(BrokerListener.this::parseBroker) + .forEach(CuratorKafkaMonitor.this::addBroker); + break; + } + } + updateController(); + } + + private int brokerId(ChildData input) + { + return Integer.parseInt(StringUtils.substringAfter(input.getPath(), ZkUtils.BrokerIdsPath() + "/")); + } + + private BrokerVO parseBroker(ChildData input) + { + try + { + final BrokerVO broker = objectMapper.reader(BrokerVO.class).readValue(input.getData()); + broker.setId(brokerId(input)); + return broker; + } + catch (IOException e) + { + throw Throwables.propagate(e); + } + } } - } diff --git a/src/main/java/com/homeadvisor/kafdrop/service/KafkaHighLevelConsumer.java b/src/main/java/com/homeadvisor/kafdrop/service/KafkaHighLevelConsumer.java new file mode 100644 index 0000000..9196884 --- /dev/null +++ b/src/main/java/com/homeadvisor/kafdrop/service/KafkaHighLevelConsumer.java @@ -0,0 +1,158 @@ +package com.homeadvisor.kafdrop.service; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.Maps; +import com.homeadvisor.kafdrop.config.KafkaConfiguration; +import com.homeadvisor.kafdrop.model.TopicPartitionVO; +import com.homeadvisor.kafdrop.model.TopicVO; +import org.apache.kafka.clients.CommonClientConfigs; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.common.Node; +import org.apache.kafka.common.PartitionInfo; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.config.SaslConfigs; +import org.apache.kafka.common.serialization.StringDeserializer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; + +import javax.annotation.PostConstruct; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.TreeMap; +import java.util.stream.Collectors; + +/** + * Created by Satendra Sahu on 9/20/18 + */ +@Service +public class KafkaHighLevelConsumer +{ + private final Logger LOG = LoggerFactory.getLogger(getClass()); + @Autowired + private ObjectMapper objectMapper; + private KafkaConsumer kafkaConsumer; + + @Autowired + private KafkaConfiguration kafkaConfiguration; + + public KafkaHighLevelConsumer() {} + + @PostConstruct + private void initializeClient() + { + if (kafkaConsumer == null) { + + Properties properties = new Properties(); + properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false); + properties.put(ConsumerConfig.GROUP_ID_CONFIG, "kafka-drop-consumer-group"); + properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, kafkaConfiguration.getKeyDeserializer()); + properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, kafkaConfiguration.getValueDeserializer()); + properties.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 100); + properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest"); + properties.put(ConsumerConfig.CLIENT_ID_CONFIG, "kafka-drop-client"); + properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaConfiguration.getBrokerConnect()); + + if (kafkaConfiguration.getIsSecured() == true) { + properties.put(SaslConfigs.SASL_MECHANISM, kafkaConfiguration.getSaslMechanism()); + properties.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, kafkaConfiguration.getSecurityProtocol()); + } + + kafkaConsumer = new KafkaConsumer(properties); + } + } + + public synchronized Map getPartitionSize(String topic) + { + initializeClient(); + + List partitionInfoSet = kafkaConsumer.partitionsFor(topic); + kafkaConsumer.assign(partitionInfoSet.stream().map(partitionInfo -> { + return new TopicPartition(partitionInfo.topic(), partitionInfo.partition()); + }).collect(Collectors.toList()) + ); + + kafkaConsumer.poll(0); + Set assignedPartitionList = kafkaConsumer.assignment(); + TopicVO topicVO = getTopicInfo(topic); + Map partitionsVo = topicVO.getPartitionMap(); + + kafkaConsumer.seekToBeginning(assignedPartitionList); + assignedPartitionList.stream().forEach(topicPartition -> { + TopicPartitionVO topicPartitionVO = partitionsVo.get(topicPartition.partition()); + long startOffset = kafkaConsumer.position(topicPartition); + LOG.debug("topic: {}, partition: {}, startOffset: {}", topicPartition.topic(), topicPartition.partition(), startOffset); + topicPartitionVO.setFirstOffset(startOffset); + }); + + kafkaConsumer.seekToEnd(assignedPartitionList); + assignedPartitionList.stream().forEach(topicPartition -> { + long latestOffset = kafkaConsumer.position(topicPartition); + LOG.debug("topic: {}, partition: {}, latestOffset: {}", topicPartition.topic(), topicPartition.partition(), latestOffset); + TopicPartitionVO partitionVO = partitionsVo.get(topicPartition.partition()); + partitionVO.setSize(latestOffset); + }); + return partitionsVo; + } + + public synchronized List> getLatestRecords(TopicPartition topicPartition, long offset, Long count) + { + initializeClient(); + kafkaConsumer.assign(Arrays.asList(topicPartition)); + kafkaConsumer.seek(topicPartition, offset); + + ConsumerRecords records = null; + + records = kafkaConsumer.poll(10); + if (records.count() > 0) { + return records.records(topicPartition).subList(0, count.intValue()); + } + return null; + } + + public synchronized Map getTopicsInfo(String[] topics) + { + initializeClient(); + if (topics.length == 0) { + Set topicSet = kafkaConsumer.listTopics().keySet(); + topics = Arrays.copyOf(topicSet.toArray(), topicSet.size(), String[].class); + } + Map topicVOMap = Maps.newHashMap(); + + for (String topic : topics) { + topicVOMap.put(topic, getTopicInfo(topic)); + } + + return topicVOMap; + } + + private TopicVO getTopicInfo(String topic) + { + List partitionInfoList = kafkaConsumer.partitionsFor(topic); + TopicVO topicVO = new TopicVO(topic); + Map partitions = new TreeMap<>(); + + for (PartitionInfo partitionInfo : partitionInfoList) { + TopicPartitionVO topicPartitionVO = new TopicPartitionVO(partitionInfo.partition()); + + Node leader = partitionInfo.leader(); + topicPartitionVO.addReplica(new TopicPartitionVO.PartitionReplica(leader.id(), true, true)); + + for (Node node : partitionInfo.replicas()) { + topicPartitionVO.addReplica(new TopicPartitionVO.PartitionReplica(node.id(), true, false)); + } + partitions.put(partitionInfo.partition(), topicPartitionVO); + } + + topicVO.setPartitions(partitions); + return topicVO; + } +} diff --git a/src/main/java/com/homeadvisor/kafdrop/service/KafkaMonitor.java b/src/main/java/com/homeadvisor/kafdrop/service/KafkaMonitor.java index 5346b53..53794c9 100644 --- a/src/main/java/com/homeadvisor/kafdrop/service/KafkaMonitor.java +++ b/src/main/java/com/homeadvisor/kafdrop/service/KafkaMonitor.java @@ -19,9 +19,12 @@ package com.homeadvisor.kafdrop.service; import com.homeadvisor.kafdrop.model.BrokerVO; -import com.homeadvisor.kafdrop.model.ConsumerVO; import com.homeadvisor.kafdrop.model.ClusterSummaryVO; +import com.homeadvisor.kafdrop.model.ConsumerVO; +import com.homeadvisor.kafdrop.model.MessageVO; import com.homeadvisor.kafdrop.model.TopicVO; +import com.homeadvisor.kafdrop.util.*; +import org.apache.kafka.common.TopicPartition; import java.util.Collection; import java.util.List; @@ -34,9 +37,12 @@ public interface KafkaMonitor Optional getBroker(int id); + Version getKafkaVersion(); + List getTopics(); - Optional getTopic(String topic); + List getMessages(TopicPartition topicPartition, long offset, long count); + Optional getTopic(String topic); ClusterSummaryVO getClusterSummary(); diff --git a/src/main/java/com/homeadvisor/kafdrop/service/MessageInspector.java b/src/main/java/com/homeadvisor/kafdrop/service/MessageInspector.java index a2d19a2..3922ffb 100644 --- a/src/main/java/com/homeadvisor/kafdrop/service/MessageInspector.java +++ b/src/main/java/com/homeadvisor/kafdrop/service/MessageInspector.java @@ -21,10 +21,13 @@ import com.homeadvisor.kafdrop.model.MessageVO; import com.homeadvisor.kafdrop.model.TopicPartitionVO; import com.homeadvisor.kafdrop.model.TopicVO; -import com.homeadvisor.kafdrop.util.BrokerChannel; -import com.homeadvisor.kafdrop.util.ByteUtils; -import com.homeadvisor.kafdrop.util.MessageDeserializer; - +import com.homeadvisor.kafdrop.util.Version; +import java.io.UnsupportedEncodingException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.stream.StreamSupport; import kafka.api.FetchRequest; import kafka.api.FetchRequestBuilder; import kafka.javaapi.FetchResponse; @@ -32,98 +35,113 @@ import kafka.javaapi.message.ByteBufferMessageSet; import kafka.message.Message; import kafka.message.MessageAndOffset; - +import org.apache.kafka.common.TopicPartition; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; -import java.io.UnsupportedEncodingException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.stream.Collectors; -import java.util.stream.StreamSupport; +@Service +public class MessageInspector { -import io.confluent.kafka.serializers.KafkaAvroDeserializer; -import io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig; + private final Logger LOG = LoggerFactory.getLogger(getClass()); + @Autowired + private KafkaMonitor kafkaMonitor; -@Service -public class MessageInspector -{ - private final Logger LOG = LoggerFactory.getLogger(getClass()); - - @Autowired - private KafkaMonitor kafkaMonitor; - - public List getMessages( - String topicName, - int partitionId, - long offset, - long count, - MessageDeserializer deserializer) - { - final TopicVO topic = kafkaMonitor.getTopic(topicName).orElseThrow(TopicNotFoundException::new); - final TopicPartitionVO partition = topic.getPartition(partitionId).orElseThrow(PartitionNotFoundException::new); + public List getMessages(String topicName, int partitionId, long offset, long count) { + if (kafkaMonitor.getKafkaVersion().compareTo(new Version(0, 8, 2)) > 0) { + final TopicVO topic = kafkaMonitor.getTopic(topicName) + .orElseThrow(TopicNotFoundException::new); + final TopicPartitionVO partition = topic.getPartition(partitionId) + .orElseThrow(PartitionNotFoundException::new); + + TopicPartition topicPartition = new TopicPartition(topicName, partitionId); + return kafkaMonitor.getMessages(topicPartition, offset, count); + } else { + final TopicVO topic = kafkaMonitor.getTopic(topicName) + .orElseThrow(TopicNotFoundException::new); + final TopicPartitionVO partition = topic.getPartition(partitionId) + .orElseThrow(PartitionNotFoundException::new); return kafkaMonitor.getBroker(partition.getLeader().getId()) - .map(broker -> { - SimpleConsumer consumer = new SimpleConsumer(broker.getHost(), broker.getPort(), 10000, 100000, ""); + .map(broker -> { + SimpleConsumer consumer = new SimpleConsumer(broker.getHost(), broker.getPort(), 10000, + 100000, ""); final FetchRequestBuilder fetchRequestBuilder = new FetchRequestBuilder() - .clientId("KafDrop") - .maxWait(5000) // todo: make configurable - .minBytes(1); + .clientId("KafDrop") + .maxWait(5000) // todo: make configurable + .minBytes(1); List messages = new ArrayList<>(); long currentOffset = offset; - while (messages.size() < count) - { - final FetchRequest fetchRequest = + while (messages.size() < count) { + final FetchRequest fetchRequest = fetchRequestBuilder - .addFetch(topicName, partitionId, currentOffset, 1024 * 1024) - .build(); + .addFetch(topicName, partitionId, currentOffset, 1024 * 1024) + .build(); - FetchResponse fetchResponse = consumer.fetch(fetchRequest); + FetchResponse fetchResponse = consumer.fetch(fetchRequest); - final ByteBufferMessageSet messageSet = fetchResponse.messageSet(topicName, partitionId); - if (messageSet.validBytes() <= 0) break; + final ByteBufferMessageSet messageSet = fetchResponse + .messageSet(topicName, partitionId); + if (messageSet.validBytes() <= 0) { + break; + } - - int oldSize = messages.size(); - StreamSupport.stream(messageSet.spliterator(), false) + int oldSize = messages.size(); + StreamSupport.stream(messageSet.spliterator(), false) .limit(count - messages.size()) .map(MessageAndOffset::message) - .map(m -> createMessage(m, deserializer)) + .map(this::createMessage) .forEach(messages::add); - currentOffset += messages.size() - oldSize; + currentOffset += messages.size() - oldSize; } return messages; - }) - .orElseGet(Collections::emptyList); - } - - private MessageVO createMessage(Message message, MessageDeserializer deserializer) - { - MessageVO vo = new MessageVO(); - if (message.hasKey()) - { - vo.setKey(ByteUtils.readString(message.key())); - } - if (!message.isNull()) - { - final String messageString = deserializer.deserializeMessage(message.payload()); - vo.setMessage(messageString); - } - - vo.setValid(message.isValid()); - vo.setCompressionCodec(message.compressionCodec().name()); - vo.setChecksum(message.checksum()); - vo.setComputedChecksum(message.computeChecksum()); - - return vo; - } - + }) + .orElseGet(Collections::emptyList); + } + } + + private MessageVO createMessage(Message message) { + MessageVO vo = new MessageVO(); + if (message.hasKey()) { + vo.setKey(readString(message.key())); + } + if (!message.isNull()) { + vo.setMessage(readString(message.payload())); + } + + vo.setValid(message.isValid()); + vo.setCompressionCodec(message.compressionCodec().name()); + vo.setChecksum(message.checksum()); + vo.setComputedChecksum(message.computeChecksum()); + + return vo; + } + + private String readString(ByteBuffer buffer) { + try { + return new String(readBytes(buffer), "UTF-8"); + } catch (UnsupportedEncodingException e) { + return ""; + } + } + + private byte[] readBytes(ByteBuffer buffer) { + return readBytes(buffer, 0, buffer.limit()); + } + + private byte[] readBytes(ByteBuffer buffer, int offset, int size) { + byte[] dest = new byte[size]; + if (buffer.hasArray()) { + System.arraycopy(buffer.array(), buffer.arrayOffset() + offset, dest, 0, size); + } else { + buffer.mark(); + buffer.get(dest); + buffer.reset(); + } + return dest; + } } diff --git a/src/main/resources/application.yml b/src/main/resources/application.yml index 0a65a8e..47c1727 100644 --- a/src/main/resources/application.yml +++ b/src/main/resources/application.yml @@ -15,11 +15,11 @@ metrics.jmx.domain: ${spring.jmx.default_domain}-metrics management.contextPath: /debug kafdrop.monitor: - kafkaVersion: "0.8.2.2" + kafkaVersion: "0.11.0.2" threadPoolSize: 10 retry: - maxAttempts: 3 - backoffMillis: 1000 + maxAttempts: 5 + backoffMillis: 2000 curator.discovery: @@ -39,3 +39,16 @@ project: name: KafDropr (DEV) description: ${project.name} version: DEV + +# env can be 'local', 'stage', 'prod' +kafka: + env: local + brokerConnect: localhost:9092 + keyDeserializer: "org.apache.kafka.common.serialization.StringDeserializer" + valueDeserializer: "org.apache.kafka.common.serialization.StringDeserializer" + isSecured: false + saslMechanism: "PLAIN" + securityProtocol: "SASL_PLAINTEXT" + +zookeeper: + connect: localhost:2181 \ No newline at end of file diff --git a/src/main/resources/static/css/baseless.min.css b/src/main/resources/static/css/baseless.min.css new file mode 100644 index 0000000..cb6556a --- /dev/null +++ b/src/main/resources/static/css/baseless.min.css @@ -0,0 +1 @@ +/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */hr,legend{border:0;padding:0}body,h1,h2,h3,h4,h5,h6{font-family:"PT Sans",Helvetica,'Helvetica Neue',Arial,sans-serif}hr,legend,td,th{padding:0}pre,textarea{overflow:auto}.bs-btn,a,ins{text-decoration:none}.row .col.eightcol.no-collapse,.row .col.elevencol.no-collapse,.row .col.fivecol.no-collapse,.row .col.fourcol.no-collapse,.row .col.ninecol.no-collapse,.row .col.no-collapse,.row .col.onecol.no-collapse,.row .col.sixcol.no-collapse,.row .col.tencol.no-collapse,.row .col.threecol.no-collapse,.row .col.twelvecol.no-collapse,.row .col.twocol.no-collapse{min-height:1px}.bs-form-group:after,.bs-list:after,.bs-panel:after,.cf:after,.clearfix:after,.container:after,.row:after,h1:after,h2:after,h3:after,h4:after,h5:after,h6:after{clear:both}.bs-btn,.bs-label,.bs-shape{-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none}html{font-family:sans-serif;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%;box-sizing:border-box}body{margin:0}article,aside,details,figcaption,figure,footer,header,hgroup,main,menu,nav,section,summary{display:block}audio,canvas,progress,video{display:inline-block;vertical-align:baseline}audio:not([controls]){display:none;height:0}[hidden],template{display:none}abbr[title]{border-bottom:1px dotted}b,optgroup,strong{font-weight:700}dfn{font-style:italic}small{font-size:80%}svg:not(:root){overflow:hidden}figure{margin:1em 40px}button,input,optgroup,select,textarea{color:inherit;font:inherit;margin:0}.bs-alert .alert-subtext,.bs-alert .alert-title,.bs-btn,.bs-label,.bs-panel .panel-heading,h4,h5,h6,mark{font-weight:700}button{overflow:visible}button,select{text-transform:none}button,html input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer}button[disabled],html input[disabled]{cursor:default}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}input{line-height:normal}input[type=checkbox],input[type=radio]{box-sizing:border-box;padding:0}input[type=number]::-webkit-inner-spin-button,input[type=number]::-webkit-outer-spin-button{height:auto}input[type=search]{-webkit-appearance:textfield;box-sizing:content-box}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}fieldset{border:1px solid silver;margin:0 2px;padding:.35em .625em .75em}.cf:after,.cf:before,.clearfix:after,.clearfix:before{content:" ";display:table}.pr,.pull-right{float:right}.pl,.pull-left{float:left}.text-primary{color:#2371b1}.text-success{color:#9ebf24}.text-warning{color:#e6781e}.text-notice{color:#cc0c39}.text-info{color:#e3dfba}.bs-btn,.bs-btn:active,.bs-btn:hover,.bs-btn:visited,.text-inverted,blockquote,body,ins,mark{color:#333}a,a:active,a:focus,a:hover,a:visited{color:#2371b1}.text-right{text-align:right}.text-left{text-align:left}.bs-label,.bs-shape,.text-center{text-align:center}.center-block{display:block;margin-left:auto;margin-right:auto}h1,h2,h3,h4,h5,h6,p{margin:17.5px 0 0}hr,ol,ul{margin:17.5px 0}*,:after,:before{box-sizing:inherit}:focus{outline:#b3b3b3 dotted thin}.bs-btn:focus,a:active,a:hover{outline:0}body{font-size:14px}a{background-color:transparent;transition-property:color;transition-duration:.5s}h1,h2,h3,h4,h5,h6{color:14px;line-height:1.15em}h1:after,h1:before,h2:after,h2:before,h3:after,h3:before,h4:after,h4:before,h5:after,h5:before,h6:after,h6:before{content:" ";display:table}h1{font-size:35px}h2{font-size:31.5px}h3{font-size:29.75px}h4{font-size:28px}h5{font-size:26.25px}h6{font-size:24.5px}blockquote{margin:0;padding-left:17.5px;border-left:.5em #ccc solid}hr{box-sizing:content-box;display:block;height:2px;border-top:none;border-bottom:1px solid #e6e6e6}code,kbd,pre,samp{font-family:"PT Mono",Menlo,'Ubuntu Mono','Lucida Console','Courier New',Courier,monospace;color:#999;font-size:1em}pre{white-space:pre}ins{background:#ff9}mark{background:#ff0}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}ol,ul{padding:0 0 0 17.5px}li p:last-child{margin:0}dd{margin:0 0 0 17.5px}img{max-width:100%;border:0;-ms-interpolation-mode:bicubic;vertical-align:middle}table{border-collapse:collapse;border-spacing:0}td{vertical-align:top}.bs-btn{cursor:pointer;margin:0;padding:3px 10px;border:0;display:inline-block;font-family:"PT Sans",Helvetica,'Helvetica Neue',Arial,sans-serif;font-size:14px;-webkit-appearance:none;-moz-appearance:none;appearance:none;border-radius:3px;background:repeat-x #f2f2f2;background-image:-moz-linear-gradient(top,#fff,#f2f2f2);background-image:-webkit-gradient(linear,0 0,0 100%,from(#fff),to(#f2f2f2));background-image:-webkit-linear-gradient(top,#fff,#f2f2f2);background-image:-o-linear-gradient(top,#fff,#f2f2f2);background-image:linear-gradient(to bottom,#fff,#f2f2f2);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff2f2f2', GradientType=0)}.bs-btn:hover{background-color:#ededed;background-image:-moz-linear-gradient(top,#fff,#ededed);background-image:-webkit-gradient(linear,0 0,0 100%,from(#fff),to(#ededed));background-image:-webkit-linear-gradient(top,#fff,#ededed);background-image:-o-linear-gradient(top,#fff,#ededed);background-image:linear-gradient(to bottom,#fff,#ededed);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffededed', GradientType=0)}.bs-btn:active{background:#e0e0e0}.bs-btn.notice,.bs-btn.notice:visited,.bs-btn.primary:visited,.bs-btn.success,.bs-btn.success:visited,.bs-btn.warning,.bs-btn.warning:visited{color:#fff}.bs-btn.disabled,.bs-btn:disabled{cursor:not-allowed;background:#e0e0e0}.bs-btn.large{font-size:17.5px}.bs-btn.small{font-size:11.9px}.bs-btn.mini{font-size:10.5px}.bs-btn.primary{background:repeat-x #1f639c;background-image:-moz-linear-gradient(top,#2371b1,#1f639c);background-image:-webkit-gradient(linear,0 0,0 100%,from(#2371b1),to(#1f639c));background-image:-webkit-linear-gradient(top,#2371b1,#1f639c);background-image:-o-linear-gradient(top,#2371b1,#1f639c);background-image:linear-gradient(to bottom,#2371b1,#1f639c);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff2371b1', endColorstr='#ff1f639c', GradientType=0);color:#fff}.bs-btn.primary:hover{background-color:#1d5e93;background-image:-moz-linear-gradient(top,#2371b1,#1d5e93);background-image:-webkit-gradient(linear,0 0,0 100%,from(#2371b1),to(#1d5e93));background-image:-webkit-linear-gradient(top,#2371b1,#1d5e93);background-image:-o-linear-gradient(top,#2371b1,#1d5e93);background-image:linear-gradient(to bottom,#2371b1,#1d5e93);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff2371b1', endColorstr='#ff1d5e93', GradientType=0)}.bs-btn.primary:active{background:#19507e}.bs-btn.primary.disabled,.bs-btn.primary:disabled{cursor:not-allowed;background:#19507e}.bs-btn.success{background:repeat-x #8caa20;background-image:-moz-linear-gradient(top,#9ebf24,#8caa20);background-image:-webkit-gradient(linear,0 0,0 100%,from(#9ebf24),to(#8caa20));background-image:-webkit-linear-gradient(top,#9ebf24,#8caa20);background-image:-o-linear-gradient(top,#9ebf24,#8caa20);background-image:linear-gradient(to bottom,#9ebf24,#8caa20);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff9ebf24', endColorstr='#ff8caa20', GradientType=0)}.bs-btn.success:hover{background-color:#85a11e;background-image:-moz-linear-gradient(top,#9ebf24,#85a11e);background-image:-webkit-gradient(linear,0 0,0 100%,from(#9ebf24),to(#85a11e));background-image:-webkit-linear-gradient(top,#9ebf24,#85a11e);background-image:-o-linear-gradient(top,#9ebf24,#85a11e);background-image:linear-gradient(to bottom,#9ebf24,#85a11e);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff9ebf24', endColorstr='#ff85a11e', GradientType=0)}.bs-btn.success:active{background:#738c1a}.bs-btn.success.disabled,.bs-btn.success:disabled{cursor:not-allowed;background:#738c1a}.bs-btn.warning{background:repeat-x #d36c17;background-image:-moz-linear-gradient(top,#e6781e,#d36c17);background-image:-webkit-gradient(linear,0 0,0 100%,from(#e6781e),to(#d36c17));background-image:-webkit-linear-gradient(top,#e6781e,#d36c17);background-image:-o-linear-gradient(top,#e6781e,#d36c17);background-image:linear-gradient(to bottom,#e6781e,#d36c17);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe6781e', endColorstr='#ffd36c17', GradientType=0)}.bs-btn.warning:hover{background-color:#ca6716;background-image:-moz-linear-gradient(top,#e6781e,#ca6716);background-image:-webkit-gradient(linear,0 0,0 100%,from(#e6781e),to(#ca6716));background-image:-webkit-linear-gradient(top,#e6781e,#ca6716);background-image:-o-linear-gradient(top,#e6781e,#ca6716);background-image:linear-gradient(to bottom,#e6781e,#ca6716);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe6781e', endColorstr='#ffca6716', GradientType=0)}.bs-btn.warning:active{background:#b35b14}.bs-btn.warning.disabled,.bs-btn.warning:disabled{cursor:not-allowed;background:#b35b14}.bs-btn.notice{background:repeat-x #b40b32;background-image:-moz-linear-gradient(top,#cc0c39,#b40b32);background-image:-webkit-gradient(linear,0 0,0 100%,from(#cc0c39),to(#b40b32));background-image:-webkit-linear-gradient(top,#cc0c39,#b40b32);background-image:-o-linear-gradient(top,#cc0c39,#b40b32);background-image:linear-gradient(to bottom,#cc0c39,#b40b32);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffcc0c39', endColorstr='#ffb40b32', GradientType=0)}.bs-btn.notice:hover{background-color:#aa0a30;background-image:-moz-linear-gradient(top,#cc0c39,#aa0a30);background-image:-webkit-gradient(linear,0 0,0 100%,from(#cc0c39),to(#aa0a30));background-image:-webkit-linear-gradient(top,#cc0c39,#aa0a30);background-image:-o-linear-gradient(top,#cc0c39,#aa0a30);background-image:linear-gradient(to bottom,#cc0c39,#aa0a30);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffcc0c39', endColorstr='#ffaa0a30', GradientType=0)}.bs-btn.notice:active{background:#920929}.bs-btn.notice.disabled,.bs-btn.notice:disabled{cursor:not-allowed;background:#920929}.bs-btn.info{background:repeat-x #e6e3cc;background-image:-moz-linear-gradient(top,#eeecdd,#e6e3cc);background-image:-webkit-gradient(linear,0 0,0 100%,from(#eeecdd),to(#e6e3cc));background-image:-webkit-linear-gradient(top,#eeecdd,#e6e3cc);background-image:-o-linear-gradient(top,#eeecdd,#e6e3cc);background-image:linear-gradient(to bottom,#eeecdd,#e6e3cc);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffeeecdd', endColorstr='#ffe6e3cc', GradientType=0);color:#333}.bs-btn.info:hover{background-color:#e2e0c5;background-image:-moz-linear-gradient(top,#eeecdd,#e2e0c5);background-image:-webkit-gradient(linear,0 0,0 100%,from(#eeecdd),to(#e2e0c5));background-image:-webkit-linear-gradient(top,#eeecdd,#e2e0c5);background-image:-o-linear-gradient(top,#eeecdd,#e2e0c5);background-image:linear-gradient(to bottom,#eeecdd,#e2e0c5);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffeeecdd', endColorstr='#ffe2e0c5', GradientType=0)}.bs-btn.info:active{background:#dad6b4}.bs-btn.info:visited{color:#333}.bs-btn.info.disabled,.bs-btn.info:disabled{cursor:not-allowed;background:#dad6b4}.bs-btn.inverted{background:repeat-x #262626;background-image:-moz-linear-gradient(top,#333,#262626);background-image:-webkit-gradient(linear,0 0,0 100%,from(#333),to(#262626));background-image:-webkit-linear-gradient(top,#333,#262626);background-image:-o-linear-gradient(top,#333,#262626);background-image:linear-gradient(to bottom,#333,#262626);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff333333', endColorstr='#ff262626', GradientType=0);color:#fff}.bs-btn.inverted:hover{background-color:#212121;background-image:-moz-linear-gradient(top,#333,#212121);background-image:-webkit-gradient(linear,0 0,0 100%,from(#333),to(#212121));background-image:-webkit-linear-gradient(top,#333,#212121);background-image:-o-linear-gradient(top,#333,#212121);background-image:linear-gradient(to bottom,#333,#212121);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff333333', endColorstr='#ff212121', GradientType=0)}.bs-btn.inverted:active{background:#141414}.bs-btn.inverted:visited{color:#fff}.bs-btn.inverted.disabled,.bs-btn.inverted:disabled{cursor:not-allowed;background:#141414}.bs-alert{border-radius:3px;margin:15px 0;padding:0;background:#f2f0de;border:1px solid #dcd7a8;color:#333}.bs-alert .alert-text{margin:0;padding:10px}.bs-alert .alert-title{margin:0;padding:6px;color:#333;text-shadow:1px 1px 0 #d4ce96}.bs-alert .alert-subtext{margin:0;padding:0 10px;color:#b7ad4d}.bs-alert .alert-notice{margin:0;padding:10px;text-shadow:-1px -1px 0 #7d7c6b;background:#8b8976;border-top:1px solid #d4ce96}.bs-alert.primary{background:#318cd6;border:1px solid #1f639c;color:#fff}.bs-alert.primary .alert-title{color:#fff;text-shadow:1px 1px 0 #1b5686}.bs-alert.primary .alert-subtext{color:#0a1f31}.bs-alert.primary .alert-notice{text-shadow:-1px -1px 0 #7eacd1;background:#91b8d8;border-top:1px solid #1b5686}.bs-alert.success{background:#b8da3c;border:1px solid #8caa20;color:#fff}.bs-alert.success .alert-title{color:#fff;text-shadow:1px 1px 0 #7b941c}.bs-alert.success .alert-subtext{color:#343e0c}.bs-alert.success .alert-notice{text-shadow:-1px -1px 0 #c6d97e;background:#cfdf92;border-top:1px solid #7b941c}.bs-alert.warning{background:#eb944c;border:1px solid #d36c17;color:#fff}.bs-alert.warning .alert-title{color:#fff;text-shadow:1px 1px 0 #bc6015}.bs-alert.warning .alert-subtext{color:#60310b}.bs-alert.warning .alert-notice{text-shadow:-1px -1px 0 #f0ae78;background:#f3bb8f;border-top:1px solid #bc6015}.bs-alert.notice{background:#f21a4c;border:1px solid #b40b32;color:#fff}.bs-alert.notice .alert-title{color:#fff;text-shadow:1px 1px 0 #9c092c}.bs-alert.notice .alert-subtext{color:#3c0411}.bs-alert.notice .alert-notice{text-shadow:-1px -1px 0 #e1708b;background:#e6859c;border-top:1px solid #9c092c}.bs-alert.info{background:#f2f0de;border:1px solid #dcd7a8;color:#333}.bs-alert.info .alert-title{color:#333;text-shadow:1px 1px 0 #d4ce96}.bs-alert.info .alert-subtext{color:#b7ad4d}.bs-alert.info .alert-notice{text-shadow:-1px -1px 0 #7d7c6b;background:#8b8976;border-top:1px solid #d4ce96}.bs-alert.inverted{background:#4d4d4d;border:1px solid #262626;color:#fff}.bs-alert.inverted .alert-title{color:#fff;text-shadow:1px 1px 0 #1a1a1a}.bs-alert.inverted .alert-subtext{color:#000}.bs-alert.inverted .alert-notice{text-shadow:-1px -1px 0 #8c8c8c;background:#999;border-top:1px solid #1a1a1a}.bs-form{margin:0;padding:0}.bs-form-group{margin:3px 0;padding:0}.bs-form-group:after,.bs-form-group:before{content:" ";display:table}.bs-form-group label{font-weight:700;display:block}.bs-form-group.inline label{display:inline-block}.bs-form-elem{margin:0;padding:3px 7px;border-radius:3px;border:1px solid #ccc;box-shadow:inset 1px 1px 0 #eee}.bs-form-elem.full-width{width:100%}.bs-form-control{margin:10px 0 0;padding:10px 0 0;border-top:1px solid #ccc}.row .col,.row .col.twelvecol{padding-left:10px;padding-right:10px;-webkit-box-sizing:border-box;-moz-box-sizing:border-box}.container{max-width:1140px;display:block;margin-left:auto;margin-right:auto}.container:after,.container:before,.row:after,.row:before{display:table;content:" "}.row{margin-left:-10px;margin-right:-10px}.row .col{box-sizing:border-box}.row .col.twelvecol{box-sizing:border-box}.row .col.elevencol,.row .col.tencol{padding-left:10px;padding-right:10px;-webkit-box-sizing:border-box;-moz-box-sizing:border-box}.row .col.elevencol{box-sizing:border-box}.row .col.tencol{box-sizing:border-box}.row .col.eightcol,.row .col.ninecol{padding-left:10px;padding-right:10px;-webkit-box-sizing:border-box;-moz-box-sizing:border-box}.row .col.ninecol{box-sizing:border-box}.row .col.eightcol{box-sizing:border-box}.row .col.sevencol,.row .col.sixcol{padding-left:10px;padding-right:10px;-webkit-box-sizing:border-box;-moz-box-sizing:border-box}.row .col.sevencol{box-sizing:border-box}.row .col.sevencol.no-collapse{min-height:1px}.row .col.sixcol{box-sizing:border-box}.row .col.fivecol,.row .col.fourcol{padding-left:10px;padding-right:10px;-webkit-box-sizing:border-box;-moz-box-sizing:border-box}.row .col.fivecol{box-sizing:border-box}.row .col.fourcol{box-sizing:border-box}.row .col.threecol,.row .col.twocol{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;padding-left:10px;padding-right:10px}.row .col.threecol{box-sizing:border-box}.row .col.twocol{box-sizing:border-box}.row .col.onecol{padding-left:10px;padding-right:10px;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}@media (min-width:768px){.row .col,.row .col.twelvecol{float:left;width:100%}.row .col.elevencol{float:left;width:91.66666667%}.row .col.tencol{float:left;width:83.33333333%}.row .col.ninecol{float:left;width:75%}.row .col.eightcol{float:left;width:66.66666667%}.row .col.sevencol{float:left;width:58.33333333%}.row .col.sixcol{float:left;width:50%}.row .col.fivecol{float:left;width:41.66666667%}.row .col.fourcol{float:left;width:33.33333333%}.row .col.threecol{float:left;width:25%}.row .col.twocol{float:left;width:16.66666667%}.row .col.onecol{float:left;width:8.33333333%}}.bs-label{margin:0;padding:0 7px 2px;border-radius:15px;display:inline-block;font-size:14px;cursor:default;background:#eee;color:#333}.bs-list.flat,.bs-list.flat li{margin:0;padding:0}.bs-label.small{font-size:11.9px}.bs-label.large{font-size:17.5px}.bs-label.mini{font-size:10.5px}.bs-label.box{border-radius:3px}.bs-label.primary{background:#2371b1;color:#fff}.bs-label.success{background:#9ebf24;color:#fff}.bs-label.warning{background:#e6781e;color:#fff}.bs-label.notice{background:#cc0c39;color:#fff}.bs-label.info{background:#e3dfba;color:#333}.bs-label.inverted{background:#333;color:#fff}.bs-list:after,.bs-list:before{content:" ";display:table}.bs-list.flat{list-style:none}.bs-list.inline li{display:inline-block}.bs-list.nav li{max-width:100%;border-radius:15px;margin:7px 2px 7px 0;padding:0 10px;line-height:21px;background:#2371b1;color:#fff}.bs-list.nav li:hover{cursor:pointer;background:#2984cf}.bs-list.nav li a{color:#fff}.bs-list.nav.primary li{background:#2371b1;color:#fff}.bs-list.nav.primary li:hover{background:#2984cf}.bs-list.nav.primary li a{color:#fff}.bs-list.nav.success li{background:#9ebf24;color:#fff}.bs-list.nav.success li:hover{background:#b4d82f}.bs-list.nav.success li a{color:#fff}.bs-list.nav.warning li{background:#e6781e;color:#fff}.bs-list.nav.warning li:hover{background:#ea8b3e}.bs-list.nav.warning li a{color:#fff}.bs-list.nav.notice li{background:#cc0c39;color:#fff}.bs-list.nav.notice li:hover{background:#ee0e42}.bs-list.nav.notice li a{color:#fff}.bs-list.nav.info li{background:#e3dfba;color:#fff}.bs-list.nav.info li:hover{background:#edebd3}.bs-list.nav.info li a{color:#fff}.bs-list.nav.inverted li{background:#333;color:#fff}.bs-list.nav.inverted li:hover{background:#454545}.bs-list.nav.inverted li a{color:#fff}.bs-panel .panel-heading,.bs-panel.default .panel-heading{color:#333;background:#eee;border-bottom:1px solid #ccc;text-shadow:1px 1px 0 #d5d5d5}.bs-panel{margin:0 0 10px;padding:0;border:1px solid #ccc}.bs-panel:after,.bs-panel:before{content:" ";display:table}.bs-panel .panel-body,.bs-panel .panel-heading,.bs-panel .panel-list-item,.bs-panel .panel-notice{margin:0;padding:10px}.bs-panel .panel-foot{margin:0;padding:5px 10px;font-size:75%}.bs-panel.round{border-radius:3px}.bs-panel .panel-list .panel-list-item.first,.bs-panel .panel-list .panel-list-item:first-child{border-top:none}.bs-panel .panel-list .panel-list-item,.bs-panel.default .panel-list .panel-list-item{border-top:1px solid #ccc}.bs-panel.default{border:1px solid #ccc}.bs-panel.primary{border:1px solid #1b5686}.bs-panel.primary .panel-heading{color:#fff;background:#2371b1;border-bottom:1px solid #1b5686;text-shadow:1px 1px 0 #1b5686}.bs-panel.primary .panel-list .panel-list-item{border-top:1px solid #1b5686}.bs-panel.success{border:1px solid #7b941c}.bs-panel.success .panel-heading{color:#fff;background:#9ebf24;border-bottom:1px solid #7b941c;text-shadow:1px 1px 0 #7b941c}.bs-panel.success .panel-list .panel-list-item{border-top:1px solid #7b941c}.bs-panel.warning{border:1px solid #bc6015}.bs-panel.warning .panel-heading{color:#fff;background:#e6781e;border-bottom:1px solid #bc6015;text-shadow:1px 1px 0 #bc6015}.bs-panel.warning .panel-list .panel-list-item{border-top:1px solid #bc6015}.bs-panel.notice{border:1px solid #9c092c}.bs-panel.notice .panel-heading{color:#fff;background:#cc0c39;border-bottom:1px solid #9c092c;text-shadow:1px 1px 0 #9c092c}.bs-panel.notice .panel-list .panel-list-item{border-top:1px solid #9c092c}.bs-panel.info{border:1px solid #d4ce96}.bs-panel.info .panel-heading{color:#333;background:#e3dfba;border-bottom:1px solid #d4ce96;text-shadow:1px 1px 0 #d4ce96}.bs-panel.info .panel-list .panel-list-item{border-top:1px solid #d4ce96}.bs-panel.inverted{border:1px solid #1a1a1a}.bs-panel.inverted .panel-heading{color:#fff;background:#333;border-bottom:1px solid #1a1a1a;text-shadow:1px 1px 0 #1a1a1a}.bs-panel.inverted .panel-list .panel-list-item{border-top:1px solid #1a1a1a}.bs-shape{margin:3px auto;padding:0;display:block;cursor:default;width:75px;height:75px;line-height:75px;background:#eee}.bs-shape.small{width:51px;height:51px;line-height:51px}.bs-shape.mini{width:33px;height:33px;line-height:33px}.bs-shape.large{width:100px;height:100px;line-height:100px}.bs-shape.circle{border-radius:100%}.bs-shape.primary{background:#2371b1}.bs-shape.success{background:#9ebf24}.bs-shape.warning{background:#e6781e}.bs-shape.notice{background:#cc0c39}.bs-shape.info{background:#e3dfba}.bs-shape.inverted{background:#333}.bs-shape:after{content:" "}.bs-table{width:100%}.bs-table td,.bs-table th{margin:0;padding:5px;border:1px solid #ccc}.bs-table thead tr td,.bs-table thead tr th{background:#e6e6e6}.bs-table.striped tbody tr{background:#f2f2f2}.bs-table.striped tbody tr:nth-child(odd){background:#fff}.bs-table.hover td:hover,.bs-table.hover th:hover{background:#e6e6e6}.bs-table.info td,.bs-table.info th{border:1px solid #e3dfba}.bs-table.info thead tr td,.bs-table.info thead tr th{background:#f2f0de}.bs-table.info.striped tbody tr{background:#f9f8f0}.bs-table.info.striped tbody tr:nth-child(odd){background:#fff}.bs-table.info.hover td:hover,.bs-table.info.hover th:hover{background:#f2f0de}.bs-table.small td,.bs-table.small th{font-size:11.9px}.bs-table.large td,.bs-table.large th{margin:0;padding:7px 5px;font-size:17.5px}.bs-well{margin:0;padding:15px;background:#eee;border:1px solid #d5d5d5;color:#333}.bs-well.small{margin:0;padding:10px}.bs-well.large{margin:0;padding:25px}.bs-well.round{border-radius:3px}.bs-well.info{background:#e3dfba;border:1px solid #c6bd71;color:#4c481f}@media print{blockquote,img,pre,tr{page-break-inside:avoid}a,a:visited{text-decoration:underline}hr{height:1px;border:0;border-bottom:1px solid #000}a[href]:after{content:" (" attr(href) ")"}a[href^="javascript:"]:after,a[href^="#"]:after{content:""}abbr[title]:after{content:" (" attr(title) ")"}blockquote,pre{border:1px solid #999;padding-right:1em}img{max-width:100%!important}h2,h3,p{orphans:3;widows:3}h2,h3{page-break-after:avoid}} \ No newline at end of file diff --git a/src/main/resources/static/css/global.css b/src/main/resources/static/css/global.css index c42eb0e..42cbb5a 100644 --- a/src/main/resources/static/css/global.css +++ b/src/main/resources/static/css/global.css @@ -16,42 +16,31 @@ * */ -.page-header { - margin-top: 0; +.l-content { + padding-left: 20px; + padding-right: 20px; + margin: 0 auto; } -.page-header h1 { - font-family: "PT Sans", Helvetica, 'Helvetica Neue', Arial, sans-serif; - font-weight: bold; +.clearing { + height: 1px; + line-height: 1px; + font-size: 1px; + clear: both; } -.page-header .small { - font-size: 50% -} - -.page-header a { - text-decoration: none; -} - -.table.overview { +table.overview { width: 33%; } -.container, .container-fluid { - padding-left: 0; - padding-right: 0; +.hidden { + display: none; } -.l-container { - padding-bottom: 25px; +.error { + color: red; } -.table th { - text-align: left; - background-color: #eee; +input[type=text].error { + border-color: red; } - -.table td.profile { - padding-left: 3px; - vertical-align: bottom; -} \ No newline at end of file diff --git a/src/main/resources/static/js/global.js b/src/main/resources/static/js/global.js index f53bd9b..c42eb0e 100644 --- a/src/main/resources/static/js/global.js +++ b/src/main/resources/static/js/global.js @@ -16,14 +16,42 @@ * */ -jQuery(document).ready(function(){ - jQuery(document).on('click', '.toggle-link', function(e) { - var self = jQuery(this), - linkText = self.find("i"), - target = jQuery(document).find(self.data('toggle-target')); - - e.preventDefault(); - target.slideToggle(); - linkText.toggleClass('fa-chevron-circle-down fa-chevron-circle-right'); - }); -}); \ No newline at end of file +.page-header { + margin-top: 0; +} + +.page-header h1 { + font-family: "PT Sans", Helvetica, 'Helvetica Neue', Arial, sans-serif; + font-weight: bold; +} + +.page-header .small { + font-size: 50% +} + +.page-header a { + text-decoration: none; +} + +.table.overview { + width: 33%; +} + +.container, .container-fluid { + padding-left: 0; + padding-right: 0; +} + +.l-container { + padding-bottom: 25px; +} + +.table th { + text-align: left; + background-color: #eee; +} + +.table td.profile { + padding-left: 3px; + vertical-align: bottom; +} \ No newline at end of file diff --git a/src/main/resources/templates/message-inspector.ftl b/src/main/resources/templates/message-inspector.ftl index ed96755..849d75c 100644 --- a/src/main/resources/templates/message-inspector.ftl +++ b/src/main/resources/templates/message-inspector.ftl @@ -80,7 +80,6 @@ -