问题描述
在windows上使用ftp上传文件到Linux上,中文名称在Linux系统中显示为乱码。
原因在于,Windows 的文件名中文编码默认为GBK,压缩或者上传后,文件名还会是GBK编码,而Linux中默认文件名编码为UTF8,由于编码不一致所以导致了文件名乱码的问题,解决这个问题需要对文件名进行转码。
问题解决
# 安装
yum install convmv
# 转换
convmv -f gbk -t utf-8 -r --notest /home/test
问题描述:希望将request
中参数传递到service
中处理,同时service
是使用异步@Async
,此时通过request.getParameterMap()
获取参数列表只能在程序启动第一次
才能获取到,第二次及之后获取为空,测试如果去掉@Async
注解则每次都能获取到参数.
// Controller层
@GetMapping("/call")
public String callback(HttpServletRequest request) {
// 这是一个异步方法
service.call(request.getParameterMap());
return "ok";
}
// Service层
@Async
public String call(Map<String, String[]>extras) {
//todo
// 对extras做处理
return "ok";
}
// Controller层
@GetMapping("/call")
public String callback(HttpServletRequest request) {
// 需要创建一个新的map,将request中参数导入,再传递到异步方法中
Map<String, String[]> extras = new HashMap<>();
extras.putAll(request.getParameterMap());
service.call(extras);
return "ok";
}
request.getParameterMap()
返回的是一个Map类型
的值,该返回值记录着前端所提交请求中的请求参数和请求参数值的映射关系。这个返回值有个特别之处——只能读
。不像普通的Map
类型数据一样可以修改。这是因为服务器为了实现一定的安全规范,所作的限制。
如果实在有必要在取得此值以后做修改的话,要新建一个map对象,将返回值复制到此新map对象中进行修改,用新的map对象代替使用之前的返回值。
源码分析
// Collections.unmodifiableMap()方法使得集合为只读。
public Map getParameterMap() {
Map result = new HashMap();
if (!this.paramsSnapshot.isEmpty()) {
result.putAll(this.paramsSnapshot);
}
result.putAll(super.getParameterMap());
return Collections.unmodifiableMap(result);
}
java
实现动态从jar
包加载插件并运行.
public interface PluginService {
/**
* 插件运行方法
*/
void process();
}
@Data
public class Plugin {
/**
* 插件名称
*/
private String pluginName;
/**
* jar包完整路径
*/
private String jarPath;
/**
* class完整路径
*/
private String className;
}
public class PluginException extends Exception {
public PluginException(String message) {
super(message);
}
}
@Data
@NoArgsConstructor
public class PluginManager {
private Map<String, Class> clazzMap = new HashMap<>();
public PluginManager(List<Plugin> plugins) throws PluginException {
initPlugins(plugins);
}
public void initPlugin(Plugin plugin) throws PluginException {
try {
// URL url = new URL("file:" + plugin.getJarPath());
URL url = new File(plugin.getJarPath()).toURI().toURL();
URLClassLoader classLoader = new URLClassLoader(new URL[]{url});
Class clazz = classLoader.loadClass(plugin.getClassName());
clazzMap.put(plugin.getClassName(), clazz);
} catch (Exception e) {
throw new PluginException("plugin " + plugin.getPluginName() + " init error," + e.getMessage());
}
}
public void initPlugins(List<Plugin> plugins) throws PluginException {
for (Plugin plugin : plugins) {
initPlugin(plugin);
}
}
public PluginService getInstance(String className) throws PluginException {
Class clazz = clazzMap.get(className);
Object instance = null;
try {
instance = clazz.newInstance();
} catch (Exception e) {
throw new PluginException("plugin " + className + " instantiate error," + e.getMessage());
}
return (PluginService) instance;
}
}
<!-- 采用xml方式存储配置,dom4j解析 -->
<dependency>
<groupId>org.dom4j</groupId>
<artifactId>dom4j</artifactId>
<version>2.1.1</version>
</dependency>
public class PluginXmlParser {
public static List<Plugin> getPluginList() throws PluginException {
List<Plugin> list = new ArrayList<>();
SAXReader saxReader = new SAXReader();
Document document = null;
try {
document = saxReader.read(new File("plugin.xml"));
} catch (Exception e) {
throw new PluginException("read plugin.xml error," + e.getMessage());
}
Element root = document.getRootElement();
List<?> plugins = root.elements("plugin");
for (Object pluginObj : plugins) {
Element pluginEle = (Element) pluginObj;
Plugin plugin = new Plugin();
plugin.setPluginName(pluginEle.elementText("name"));
plugin.setJarPath(pluginEle.elementText("jar"));
plugin.setClassName(pluginEle.elementText("class"));
list.add(plugin);
}
return list;
}
}
public class Main {
public static void main(String[] args) throws PluginException {
// 从配置文件加载插件
List<Plugin> pluginList = PluginXmlParser.getPluginList();
PluginManager pluginManager = new PluginManager(pluginList);
for (Plugin plugin : pluginList) {
PluginService pluginService = pluginManager.getInstance(plugin.getClassName());
System.out.println("开始执行[" + plugin.getPluginName() + "]插件...");
// 调用插件
pluginService.process();
System.out.println("[" + plugin.getPluginName() + "]插件执行完成");
}
// 动态加载插件
Plugin plugin = new Plugin();
plugin.setPluginName("B插件");
plugin.setJarPath("D:\\flinkDemo\\java8\\out\\artifacts\\test\\test.jar");
plugin.setClassName("com.deri.stream.plugina.PluginB");
pluginManager.initPlugin(plugin);
PluginService pluginService = pluginManager.getInstance("com.deri.stream.plugina.PluginB");
pluginService.process();
}
}
public class PluginA implements PluginService {
@Override
public void process() {
System.out.println("Plugin A.");
}
}
public class PluginB implements PluginService {
@Override
public void process() {
System.out.println("Plugin B.");
}
}
public class PluginC implements PluginService {
@Override
public void process() {
System.out.println("Plugin C.");
}
}
<?xml version="1.0" encoding="UTF-8"?>
<plugins>
<plugin>
<name>A插件</name>
<jar>D:\flinkDemo\java8\out\artifacts\test\test.jar</jar>
<class>com.deri.stream.plugina.PluginA</class>
</plugin>
</plugins>
influxdb
部署后,内存会随着查询等操作持续升高且不会主动释放.
go
在释放内存返回到内核时,在Linux
上使用的是MADV_DONTNEED
,虽然效率比较低,但是会让RSS
(resident set size
常驻内存集)数量下降得很快。
不过在go 1.12
里专门针对这个做了优化,在释放内存时,使用了更加高效的MADV_FREE
而不是之前的MADV_DONTNEED
,这样带来的好处是,一次GC
后的内存分配延迟得以改善,也会更加积极地将释放的内存归还给操作系统,以应对大块内存分配无法重用已存在的堆空间的问题。不过也会带来一个副作用
:RSS
不会立刻下降,而是要等到系统有内存压力了,才会延迟下降。
InfluxDB
使用MADV_FREE
,只有当系统内存使用有压力的时候才会进行真正的内存回收,这个时候RSS
才会下降,可以通过GODEBUG=madvdontneed=1 ./influxdb
的方式启动,强制使用MADV_DONTNEED
方式.
MADV_FREE
需要Linux 4.5
以及以上内核,否则会继续使用原先的MADV_DONTNEED
方式。
// 默认情况,java客户端连接服务端等待数据返回的时间很短,如果超时就会报以下错误
org.influxdb.InfluxDBIOException: java.net.SocketTimeoutException: timeout
OkHttpClient.Builder client = new OkHttpClient.Builder()
.connectTimeout(1, TimeUnit.MINUTES)
.readTimeout(1, TimeUnit.MINUTES)
.writeTimeout(1, TimeUnit.MINUTES)
.retryOnConnectionFailure(true);
InfluxDB influxdb = InfluxDBFactory.connect("http://localhost:8086", client);
val client = OkHttpClient.Builder()
.connectTimeout(1, TimeUnit.MINUTES)
.readTimeout(1, TimeUnit.MINUTES)
.writeTimeout(1, TimeUnit.MINUTES)
.retryOnConnectionFailure(true)
val influxConnection = InfluxDBFactory.connect("http://localhost:8086", client)
public static void main(String[] args) {
InfluxDB influxDB = InfluxDBFactory.connect("http://192.168.41.128:8086", "root", "root");
String dbName = "mydb";
influxDB.query(new Query("CREATE DATABASE " + dbName));
long time = 1600327592000L;
// 每张表内5w条数据
int count = 50000;
while (count > 0) {
System.out.println(count);
count--;
// 每条数据间隔5秒
time = time + 5000;
BatchPoints batchPoints = BatchPoints
.database(dbName)
.tag("async", "true")
.consistency(InfluxDB.ConsistencyLevel.ALL)
.build();
for (int i = 0; i < 10000; i++) {
// 表名node-0 ~ node-9999,共10000张表,数据随机生成
Point point1 = Point.measurement("node-" + i)
.time(time, TimeUnit.MILLISECONDS)
.addField("idle", new Random().nextInt(100))
.addField("user", new Random().nextInt(100))
.addField("system", new Random().nextInt(100))
.build();
batchPoints.point(point1);
}
influxDB.write(batchPoints);
}
influxDB.close();
}
目前influxdb
支持在以下场景中使用正则表达式:
field keys
and tag keys
in the SELECT
clausemeasurements
in the FROM
clausetag values
and string field values
in the WHERE
clause.tag keys
in the GROUP BY
clause-- 查询格式
-- Supported operators
-- =~ matches against !~ doesn’t match against
SELECT /<regular_expression_field_key>/ FROM /<regular_expression_measurement>/ WHERE [<tag_key> <operator> /<regular_expression_tag_value>/ | <field_key> <operator> /<regular_expression_field_value>/] GROUP BY /<regular_expression_tag_key>/
-- 测试用例
SELECT /l/ FROM "h2o_feet" LIMIT 1
SELECT MEAN("degrees") FROM /temperature/
SELECT MEAN(water_level) FROM "h2o_feet" WHERE "location" =~ /[m]/ AND "water_level" > 3
SELECT * FROM "h2o_feet" WHERE "location" !~ /./
SELECT MEAN("water_level") FROM "h2o_feet" WHERE "location" =~ /./
SELECT MEAN("water_level") FROM "h2o_feet" WHERE "location" = 'santa_monica' AND "level description" =~ /between/
SELECT FIRST("index") FROM "h2o_quality" GROUP BY /l/
测试使用正则表达式进行切面查询
-- 查询某个时间点,所有表的数据,切面查询
SELECT * FROM /node-/ WHERE time='2020-09-18T01:43:27Z'
-- 查询node-1001,node-1002,node-1003,node-1004,node-1005数据
SELECT * FROM /(node-100)[1-5]/ WHERE time='2020-09-18T01:43:27Z'