Skip to content

Commit

Permalink
Merge branch 'master' into fix/assembly-log-config
Browse files Browse the repository at this point in the history
  • Loading branch information
wu-sheng committed Nov 28, 2017
2 parents 2adfcc0 + a489583 commit 2e44eee
Show file tree
Hide file tree
Showing 11 changed files with 168 additions and 94 deletions.
Expand Up @@ -52,6 +52,12 @@ public static class Agent {
* memory cost estimated.
*/
public static int SPAN_LIMIT_PER_SEGMENT = 300;

/**
* If true, skywalking agent will save all instrumented classes files in `/debugging` folder.
* Skywalking team may ask for these files in order to resolve compatible problem.
*/
public static boolean IS_OPEN_DEBUGGING_CLASS = false;
}

public static class Collector {
Expand Down
@@ -0,0 +1,71 @@
/*
* Copyright 2017, OpenSkywalking Organization All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Project repository: https://github.com/OpenSkywalking/skywalking
*/

package org.skywalking.apm.agent;

import java.io.File;
import java.io.IOException;
import net.bytebuddy.description.type.TypeDescription;
import net.bytebuddy.dynamic.DynamicType;
import org.skywalking.apm.agent.core.boot.AgentPackageNotFoundException;
import org.skywalking.apm.agent.core.boot.AgentPackagePath;
import org.skywalking.apm.agent.core.conf.Config;
import org.skywalking.apm.agent.core.logging.api.ILog;
import org.skywalking.apm.agent.core.logging.api.LogManager;

/**
* @author wu-sheng
*/
public enum InstrumentDebuggingClass {
INSTANCE;

private static final ILog logger = LogManager.getLogger(InstrumentDebuggingClass.class);
private File debuggingClassesRootPath;

public void log(TypeDescription typeDescription, DynamicType dynamicType) {
if (!Config.Agent.IS_OPEN_DEBUGGING_CLASS) {
return;
}

/**
* try to do I/O things in synchronized way, to avoid unexpected situations.
*/
synchronized (INSTANCE) {
try {
if (debuggingClassesRootPath == null) {
try {
debuggingClassesRootPath = new File(AgentPackagePath.getPath(), "/debugging");
if (!debuggingClassesRootPath.exists()) {
debuggingClassesRootPath.mkdir();
}
} catch (AgentPackageNotFoundException e) {
logger.error(e, "Can't find the root path for creating /debugging folder.");
}
}

try {
dynamicType.saveIn(debuggingClassesRootPath);
} catch (IOException e) {
logger.error(e, "Can't save class {} to file." + typeDescription.getActualName());
}
} catch (Throwable t) {
logger.error(t, "Save debugging classes fail.");
}
}
}
}
Expand Up @@ -18,6 +18,8 @@

package org.skywalking.apm.agent;

import java.lang.instrument.Instrumentation;
import java.util.List;
import net.bytebuddy.agent.builder.AgentBuilder;
import net.bytebuddy.description.type.TypeDescription;
import net.bytebuddy.dynamic.DynamicType;
Expand All @@ -26,10 +28,11 @@
import org.skywalking.apm.agent.core.conf.SnifferConfigInitializer;
import org.skywalking.apm.agent.core.logging.api.ILog;
import org.skywalking.apm.agent.core.logging.api.LogManager;
import org.skywalking.apm.agent.core.plugin.*;

import java.lang.instrument.Instrumentation;
import java.util.List;
import org.skywalking.apm.agent.core.plugin.AbstractClassEnhancePluginDefine;
import org.skywalking.apm.agent.core.plugin.EnhanceContext;
import org.skywalking.apm.agent.core.plugin.PluginBootstrap;
import org.skywalking.apm.agent.core.plugin.PluginException;
import org.skywalking.apm.agent.core.plugin.PluginFinder;

/**
* The main entrance of sky-waking agent,
Expand Down Expand Up @@ -103,6 +106,8 @@ public void onTransformation(TypeDescription typeDescription, ClassLoader classL
if (logger.isDebugEnable()) {
logger.debug("On Transformation class {}.", typeDescription.getName());
}

InstrumentDebuggingClass.INSTANCE.log(typeDescription, dynamicType);
}

@Override
Expand All @@ -113,7 +118,7 @@ public void onIgnored(TypeDescription typeDescription, ClassLoader classLoader,

@Override public void onError(String typeName, ClassLoader classLoader, JavaModule module, boolean loaded,
Throwable throwable) {
logger.error("Failed to enhance class " + typeName, throwable);
logger.error("Enhance class " + typeName + " error.", throwable);
}

@Override
Expand Down
4 changes: 4 additions & 0 deletions apm-sniffer/config/agent.config
Expand Up @@ -12,6 +12,10 @@ agent.application_code=Your_ApplicationName
# Ignore the segments if their operation names start with these suffix.
# agent.ignore_suffix=.jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg

# If true, skywalking agent will save all instrumented classes files in `/debugging` folder.
# Skywalking team may ask for these files in order to resolve compatible problem.
# agent.is_open_debugging_class = true

# Server addresses.
# Mapping to `agent_server/jetty/port` in `config/application.yml` of Collector.
# Examples:
Expand Down
1 change: 0 additions & 1 deletion docs/README.md
@@ -1,5 +1,4 @@
## Documents
[![version](https://img.shields.io/badge/document--version-3.2.5--2017-green.svg)]()
[![cn doc](https://img.shields.io/badge/document-中文-blue.svg)](README_ZH.md)

* Getting Started
Expand Down
1 change: 0 additions & 1 deletion docs/README_ZH.md
@@ -1,5 +1,4 @@
## 中文文档
[![version](https://img.shields.io/badge/document--version-3.2.5--2017-green.svg)]()
[![EN doc](https://img.shields.io/badge/document-English-blue.svg)](README.md)

* [项目简介](/README_ZH.md)
Expand Down
39 changes: 24 additions & 15 deletions docs/cn/Deploy-collector-in-cluster-mode-CN.md
Expand Up @@ -23,49 +23,58 @@ thread_pool.bulk.queue_size: 1000

### 部署collector
1. 解压安装包`tar -xvf skywalking-collector.tar.gz`,windows用户可以选择zip包
1. 运行`bin/startup.sh`启动。windows用户为.bat文件。
2. 设置Collector集群模式

集群模式主要依赖Zookeeper的注册和应用发现能力。所以,你只需要调整 `config/application.yml`中的host和port配置,使用实际IP和端口,代替默认配置。
其次,将storage的注释取消,并修改为Elasticsearch集群的节点地址信息。


- `config/application.yml`
```
cluster:
# Zookeeper地址配置
# 配置zookeeper集群信息
zookeeper:
hostPort: localhost:2181
sessionTimeout: 100000
# agent_server, agent_stream, ui, collector_inside中配置的IP都是Collector所使用的IP地址
agent_server:
naming:
# 配置探针使用的host和port
jetty:
host: localhost
# The port used
port: 10800
context_path: /
agent_stream:
grpc:
remote:
gRPC:
host: localhost
port: 11800
agent_gRPC:
gRPC:
host: localhost
port: 11800
agent_jetty:
jetty:
host: localhost
port: 12800
context_path: /
agent_stream:
default:
buffer_file_path: ../buffer/
buffer_offset_max_file_size: 10M
buffer_segment_max_file_size: 500M
ui:
jetty:
host: localhost
port: 12800
context_path: /
collector_inside:
grpc:
host: localhost
port: 11800
# 配置 Elasticsearch 集群连接信息
storage:
elasticsearch:
cluster_name: CollectorDBCluster
cluster_transport_sniffer: true
# Elastic Search地址信息
cluster_nodes: localhost:9300
index_shards_number: 2
index_replicas_number: 0
ttl: 7
```

## Collector集群模式启动
集群模式主要依赖Zookeeper的注册和应用发现能力。所以,你只需要调整 `config/application.yml`中,agent_server, agent_stream, ui, collector_inside这些配置项的ip信息,使用真实的IP地址或者hostname,Collector就会使用集群模式运行。
其次,将elasticsearch的注释取消,并修改集群的节点地址信息。

3. 运行`bin/startup.sh`启动。windows用户为.bat文件。
53 changes: 5 additions & 48 deletions docs/cn/Deploy-collector-in-standalone-mode-CN.md
@@ -1,5 +1,5 @@
## 用途说明
单机模式使用本地H2数据库,不支持集群部署。主要用于:预览、功能测试、演示和低压力系统。
# 用途说明
单机模式使用本地H2数据库,不支持集群部署。主要用于:预览、功能测试、演示和低压力系统。你可选择使用Elasticsearch作为

## 所需的第三方软件
- JDK8+
Expand All @@ -10,55 +10,12 @@
## Quick Start
Collector单机模拟启动简单,提供和集群模式相同的功能,单机模式下除端口被占用的情况下,直接启动即可。

### 部署collector
## 部署collector
1. 解压安装包`tar -xvf skywalking-collector.tar.gz`,windows用户可以选择zip包
1. 运行`bin/startup.sh`启动。windows用户为.bat文件。

- `config/application.yml`
```
# 单机模式下无需配置集群相关信息
#cluster:
# zookeeper:
# hostPort: localhost:2181
# sessionTimeout: 100000
# agent_server, agent_stream, ui, collector_inside中配置的IP都是Collector所使用的IP地址
agent_server:
jetty:
host: localhost
# The port used
port: 10800
context_path: /
agent_stream:
grpc:
host: localhost
port: 11800
jetty:
host: localhost
port: 12800
context_path: /
ui:
jetty:
host: localhost
port: 12800
context_path: /
collector_inside:
grpc:
host: localhost
port: 11800
#storage:
# elasticsearch:
# cluster_name: CollectorDBCluster
# cluster_transport_sniffer: true
# Elastic Search地址信息
# cluster_nodes: localhost:9300
# index_shards_number: 2
# index_replicas_number: 0
```

## 使用Elastic Search代替H2存储
由于H2数据库性能的局限性,我们在单机模式下,也支持使用ElasticSearch 5.3作为存储。你需要安装对应的ElasticSearch 5.3,并取消
- 在单机模式下除了支持内置的H2数据库运行,也支持其他的存储(当前已支持的ElasticSearch 5.3),安装storage注释,修改配置信息即可。取消Storage相关配置节的注释。
- 在单机模式下除了支持内置的H2数据库运行,也支持其他的存储(当前已支持的ElasticSearch 5.3),取消Storage相关配置节的注释,并修改配置。
```yaml
#storage:
# elasticsearch:
Expand All @@ -69,7 +26,7 @@ collector_inside:
# index_replicas_number: 0
```

### 部署Elasticsearch
## 部署Elasticsearch
- 修改`elasticsearch.yml`文件
- 设置 `cluster.name: CollectorDBCluster`。此名称需要和collector配置文件一致。
- 设置 `node.name: anyname`, 可以设置为任意名字,如Elasticsearch为集群模式,则每个节点名称需要不同。
Expand Down
4 changes: 4 additions & 0 deletions docs/cn/Deploy-skywalking-agent-CN.md
Expand Up @@ -38,6 +38,10 @@ agent.application_code=Your_ApplicationName
# 默认配置如下
# agent.ignore_suffix=.jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg

# 探针调试开关,如果设置为true,探针会将所有操作字节码的类输出到/debugging目录下
# skywalking团队可能在调试,需要此文件
# agent.is_open_debugging_class = true

# 对应Collector的config/application.yml配置文件中 agent_server/jetty/port 配置内容
# 例如:
# 单节点配置:SERVERS="127.0.0.1:8080"
Expand Down
41 changes: 23 additions & 18 deletions docs/en/Deploy-collector-in-cluster-mode.md
Expand Up @@ -12,7 +12,7 @@
- Set `cluster.name: CollectorDBCluster`
- Set `node.name: anyname`, this name can be any, it based on Elasticsearch.
- Add the following configurations to

```
# The ip used for listening
network.host: 0.0.0.0
Expand All @@ -21,53 +21,58 @@ thread_pool.bulk.queue_size: 1000

- Start Elasticsearch

## Single Node Mode Collector
Single Node collector is easy to deploy, and provides same features as cluster mode. You can use almost all default config to run in this mode. And attention, all the default configs of single node mode, depend on running the collector, traced application, ElasticSearch and Zookeeper in the same machine.

### Deploy collector servers
1. Run `tar -xvf skywalking-collector.tar.gz`
1. Run `bin/startup.sh`
2. Config collector in cluster mode.

Cluster mode depends on Zookeeper register and application discovery capabilities. So, you just need to adjust the IP config items in `config/application.yml`. Change IP and port configs of agent_server, agent_stream, ui, collector_inside, replace them to the real ip or hostname which you want to use for cluster.

- `config/application.yml`
```
cluster:
# The address of Zookeeper
# The Zookeeper cluster for collector cluster management.
zookeeper:
hostPort: localhost:2181
sessionTimeout: 100000
# IPs in agent_server, agent_stream, ui, collector_inside are addresses of Collector
agent_server:
naming:
# Host and port used for agent config
jetty:
host: localhost
# The port used
port: 10800
context_path: /
agent_stream:
grpc:
remote:
gRPC:
host: localhost
port: 11800
agent_gRPC:
gRPC:
host: localhost
port: 11800
agent_jetty:
jetty:
host: localhost
port: 12800
context_path: /
agent_stream:
default:
buffer_file_path: ../buffer/
buffer_offset_max_file_size: 10M
buffer_segment_max_file_size: 500M
ui:
jetty:
host: localhost
port: 12800
context_path: /
collector_inside:
grpc:
host: localhost
port: 11800
# Config Elasticsearch cluster connection info.
storage:
elasticsearch:
cluster_name: CollectorDBCluster
cluster_transport_sniffer: true
# The address of Elastic Search
cluster_nodes: localhost:9300
index_shards_number: 2
index_replicas_number: 0
ttl: 7
```

## Cluster Mode Collector
Cluster mode depends on Zookeeper register and application discovery capabilities. So, you just need to adjust the IP config items in `config/application.yml`. Change IP and port configs of agent_server, agent_stream, ui, collector_inside, replace them to the real ip or hostname which you want to use for cluster.

3. Run `bin/startup.sh`

0 comments on commit 2e44eee

Please sign in to comment.