fix endpoint
This commit is contained in:
parent
6cf33f2549
commit
6abfbbc03a
@ -14,7 +14,6 @@
|
|||||||
-export([start_link/1]).
|
-export([start_link/1]).
|
||||||
-export([get_name/1, get_pid/1, forward/3, reload/2, clean_up/1]).
|
-export([get_name/1, get_pid/1, forward/3, reload/2, clean_up/1]).
|
||||||
-export([get_alias_pid/1]).
|
-export([get_alias_pid/1]).
|
||||||
-export([config_equals/2]).
|
|
||||||
|
|
||||||
%%%===================================================================
|
%%%===================================================================
|
||||||
%%% API
|
%%% API
|
||||||
@ -59,16 +58,4 @@ reload(Pid, NEndpoint = #endpoint{}) when is_pid(Pid) ->
|
|||||||
|
|
||||||
-spec clean_up(Pid :: pid()) -> ok.
|
-spec clean_up(Pid :: pid()) -> ok.
|
||||||
clean_up(Pid) when is_pid(Pid) ->
|
clean_up(Pid) when is_pid(Pid) ->
|
||||||
gen_server:call(Pid, clean_up, 5000).
|
gen_server:call(Pid, clean_up, 5000).
|
||||||
|
|
||||||
-spec config_equals(any(), any()) -> boolean().
|
|
||||||
config_equals(#http_endpoint{url = Url}, #http_endpoint{url = Url}) ->
|
|
||||||
true;
|
|
||||||
config_equals(#kafka_endpoint{username = Username, password = Password, bootstrap_servers = BootstrapServers, topic = Topic},
|
|
||||||
#kafka_endpoint{username = Username, password = Password, bootstrap_servers = BootstrapServers, topic = Topic}) ->
|
|
||||||
true;
|
|
||||||
config_equals(#mqtt_endpoint{host = Host, port = Port, username = Username, password = Password, topic = Topic, qos = Qos},
|
|
||||||
#mqtt_endpoint{host = Host, port = Port, username = Username, password = Password, topic = Topic, qos = Qos}) ->
|
|
||||||
true;
|
|
||||||
config_equals(_, _) ->
|
|
||||||
false.
|
|
||||||
@ -52,10 +52,12 @@ kafka_test() ->
|
|||||||
title = <<"kafka测试"/utf8>>,
|
title = <<"kafka测试"/utf8>>,
|
||||||
%% 配置项, 格式: #{<<"protocol">> => <<"http|https|ws|kafka|mqtt">>, <<"args">> => #{}}
|
%% 配置项, 格式: #{<<"protocol">> => <<"http|https|ws|kafka|mqtt">>, <<"args">> => #{}}
|
||||||
config = #kafka_endpoint{
|
config = #kafka_endpoint{
|
||||||
enable_sasl = true,
|
sasl_config = {
|
||||||
username = <<"admin">>,
|
scram_sha_256,
|
||||||
password = <<"lz4rP5UavRTiGZEZK8G51mxHcM5iPC">>,
|
<<"admin">>,
|
||||||
mechanism = scram_sha_256,
|
<<"lz4rP5UavRTiGZEZK8G51mxHcM5iPC">>
|
||||||
|
},
|
||||||
|
|
||||||
bootstrap_servers = [
|
bootstrap_servers = [
|
||||||
{"127.0.0.1", 19092}
|
{"127.0.0.1", 19092}
|
||||||
],
|
],
|
||||||
|
|||||||
@ -17,6 +17,7 @@
|
|||||||
hackney,
|
hackney,
|
||||||
poolboy,
|
poolboy,
|
||||||
mysql,
|
mysql,
|
||||||
|
gproc,
|
||||||
% gpb,
|
% gpb,
|
||||||
esockd,
|
esockd,
|
||||||
mnesia,
|
mnesia,
|
||||||
|
|||||||
@ -1,174 +0,0 @@
|
|||||||
%%%-------------------------------------------------------------------
|
|
||||||
%%% @author aresei
|
|
||||||
%%% @copyright (C) 2023, <COMPANY>
|
|
||||||
%%% @doc
|
|
||||||
%%% 1. 需要考虑集群部署的相关问题,上行的数据可能在集群中共享
|
|
||||||
%%% 2. host进程不能直接去监听topic,这样涉及到新增和下线的很多问题
|
|
||||||
%%% @end
|
|
||||||
%%% Created : 12. 3月 2023 21:27
|
|
||||||
%%%-------------------------------------------------------------------
|
|
||||||
-module(iot_mqtt_subscriber).
|
|
||||||
-author("aresei").
|
|
||||||
-include("iot.hrl").
|
|
||||||
|
|
||||||
-behaviour(gen_server).
|
|
||||||
|
|
||||||
%% API
|
|
||||||
-export([start_link/0]).
|
|
||||||
|
|
||||||
%% gen_server callbacks
|
|
||||||
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]).
|
|
||||||
|
|
||||||
-define(SERVER, ?MODULE).
|
|
||||||
|
|
||||||
%% 需要订阅的主题信息
|
|
||||||
-define(Topics,[
|
|
||||||
{<<"CET/NX/upload">>, 2}
|
|
||||||
]).
|
|
||||||
|
|
||||||
-record(state, {
|
|
||||||
conn_pid :: pid()
|
|
||||||
}).
|
|
||||||
|
|
||||||
%%%===================================================================
|
|
||||||
%%% API
|
|
||||||
%%%===================================================================
|
|
||||||
|
|
||||||
%% @doc Spawns the server and registers the local name (unique)
|
|
||||||
-spec(start_link() ->
|
|
||||||
{ok, Pid :: pid()} | ignore | {error, Reason :: term()}).
|
|
||||||
start_link() ->
|
|
||||||
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
|
|
||||||
|
|
||||||
%%%===================================================================
|
|
||||||
%%% gen_server callbacks
|
|
||||||
%%%===================================================================
|
|
||||||
|
|
||||||
%% @private
|
|
||||||
%% @doc Initializes the server
|
|
||||||
-spec(init(Args :: term()) ->
|
|
||||||
{ok, State :: #state{}} | {ok, State :: #state{}, timeout() | hibernate} |
|
|
||||||
{stop, Reason :: term()} | ignore).
|
|
||||||
init([]) ->
|
|
||||||
%% 建立到emqx服务器的连接
|
|
||||||
Opts = emqt_opts(<<"host-subscriber">>),
|
|
||||||
lager:debug("[opts] is: ~p", [Opts]),
|
|
||||||
case emqtt:start_link(Opts) of
|
|
||||||
{ok, ConnPid} ->
|
|
||||||
%% 监听和host相关的全部事件
|
|
||||||
lager:debug("[iot_mqtt_subscriber] start conntecting, pid: ~p", [ConnPid]),
|
|
||||||
{ok, _} = emqtt:connect(ConnPid),
|
|
||||||
lager:debug("[iot_mqtt_subscriber] connect success, pid: ~p", [ConnPid]),
|
|
||||||
SubscribeResult = emqtt:subscribe(ConnPid, ?Topics),
|
|
||||||
|
|
||||||
lager:debug("[iot_mqtt_subscriber] subscribe topics: ~p, result is: ~p", [?Topics, SubscribeResult]),
|
|
||||||
|
|
||||||
{ok, #state{conn_pid = ConnPid}};
|
|
||||||
ignore ->
|
|
||||||
lager:debug("[iot_mqtt_subscriber] connect emqx get ignore"),
|
|
||||||
{stop, ignore};
|
|
||||||
{error, Reason} ->
|
|
||||||
lager:debug("[iot_mqtt_subscriber] connect emqx get error: ~p", [Reason]),
|
|
||||||
{stop, Reason}
|
|
||||||
end.
|
|
||||||
|
|
||||||
%% @private
|
|
||||||
%% @doc Handling call messages
|
|
||||||
-spec(handle_call(Request :: term(), From :: {pid(), Tag :: term()},
|
|
||||||
State :: #state{}) ->
|
|
||||||
{reply, Reply :: term(), NewState :: #state{}} |
|
|
||||||
{reply, Reply :: term(), NewState :: #state{}, timeout() | hibernate} |
|
|
||||||
{noreply, NewState :: #state{}} |
|
|
||||||
{noreply, NewState :: #state{}, timeout() | hibernate} |
|
|
||||||
{stop, Reason :: term(), Reply :: term(), NewState :: #state{}} |
|
|
||||||
{stop, Reason :: term(), NewState :: #state{}}).
|
|
||||||
handle_call(_Info, _From, State = #state{conn_pid = _ConnPid}) ->
|
|
||||||
{reply, ok, State}.
|
|
||||||
|
|
||||||
%% @private
|
|
||||||
%% @doc Handling cast messages
|
|
||||||
-spec(handle_cast(Request :: term(), State :: #state{}) ->
|
|
||||||
{noreply, NewState :: #state{}} |
|
|
||||||
{noreply, NewState :: #state{}, timeout() | hibernate} |
|
|
||||||
{stop, Reason :: term(), NewState :: #state{}}).
|
|
||||||
handle_cast(_Request, State = #state{}) ->
|
|
||||||
{noreply, State}.
|
|
||||||
|
|
||||||
%% @private
|
|
||||||
%% @doc Handling all non call/cast messages
|
|
||||||
-spec(handle_info(Info :: timeout() | term(), State :: #state{}) ->
|
|
||||||
{noreply, NewState :: #state{}} |
|
|
||||||
{noreply, NewState :: #state{}, timeout() | hibernate} |
|
|
||||||
{stop, Reason :: term(), NewState :: #state{}}).
|
|
||||||
handle_info({disconnect, ReasonCode, Properties}, State = #state{}) ->
|
|
||||||
lager:debug("[iot_mqtt_subscriber] Recv a DISONNECT packet - ReasonCode: ~p, Properties: ~p", [ReasonCode, Properties]),
|
|
||||||
{stop, disconnected, State};
|
|
||||||
%% 必须要做到消息的快速分发,数据的json反序列需要在host进程进行
|
|
||||||
handle_info({publish, #{packet_id := _PacketId, payload := Payload, qos := Qos, topic := Topic}}, State = #state{conn_pid = _ConnPid}) ->
|
|
||||||
lager:debug("[iot_mqtt_subscriber] Recv a topic: ~p, publish packet: ~p, qos: ~p", [Topic, Payload, Qos]),
|
|
||||||
%% 将消息分发到对应的host进程去处理
|
|
||||||
{noreply, State};
|
|
||||||
handle_info({puback, Packet = #{packet_id := _PacketId}}, State = #state{}) ->
|
|
||||||
lager:debug("[iot_mqtt_subscriber] receive puback packet: ~p", [Packet]),
|
|
||||||
{noreply, State};
|
|
||||||
|
|
||||||
handle_info(Info, State = #state{}) ->
|
|
||||||
lager:debug("[iot_mqtt_subscriber] get info: ~p", [Info]),
|
|
||||||
{noreply, State}.
|
|
||||||
|
|
||||||
%% @private
|
|
||||||
%% @doc This function is called by a gen_server when it is about to
|
|
||||||
%% terminate. It should be the opposite of Module:init/1 and do any
|
|
||||||
%% necessary cleaning up. When it returns, the gen_server terminates
|
|
||||||
%% with Reason. The return value is ignored.
|
|
||||||
-spec(terminate(Reason :: (normal | shutdown | {shutdown, term()} | term()),
|
|
||||||
State :: #state{}) -> term()).
|
|
||||||
terminate(Reason, _State = #state{conn_pid = ConnPid}) when is_pid(ConnPid) ->
|
|
||||||
%% 取消topic的订阅
|
|
||||||
TopicNames = lists:map(fun({Name, _}) -> Name end, ?Topics),
|
|
||||||
{ok, _Props, _ReasonCode} = emqtt:unsubscribe(ConnPid, #{}, TopicNames),
|
|
||||||
|
|
||||||
ok = emqtt:disconnect(ConnPid),
|
|
||||||
lager:debug("[iot_mqtt_subscriber] terminate with reason: ~p", [Reason]),
|
|
||||||
ok;
|
|
||||||
terminate(Reason, _State) ->
|
|
||||||
lager:debug("[iot_mqtt_subscriber] terminate with reason: ~p", [Reason]),
|
|
||||||
ok.
|
|
||||||
|
|
||||||
%% @private
|
|
||||||
%% @doc Convert process state when code is changed
|
|
||||||
-spec(code_change(OldVsn :: term() | {down, term()}, State :: #state{},
|
|
||||||
Extra :: term()) ->
|
|
||||||
{ok, NewState :: #state{}} | {error, Reason :: term()}).
|
|
||||||
code_change(_OldVsn, State = #state{}, _Extra) ->
|
|
||||||
{ok, State}.
|
|
||||||
|
|
||||||
%%%===================================================================
|
|
||||||
%%% Internal functions
|
|
||||||
%%%===================================================================
|
|
||||||
|
|
||||||
emqt_opts(ClientSuffix) when is_binary(ClientSuffix) ->
|
|
||||||
%% 建立到emqx服务器的连接
|
|
||||||
{ok, Props} = application:get_env(iot, emqx_server),
|
|
||||||
EMQXHost = proplists:get_value(host, Props),
|
|
||||||
EMQXPort = proplists:get_value(port, Props, 18080),
|
|
||||||
Username = proplists:get_value(username, Props),
|
|
||||||
Password = proplists:get_value(password, Props),
|
|
||||||
RetryInterval = proplists:get_value(retry_interval, Props, 5),
|
|
||||||
Keepalive = proplists:get_value(keepalive, Props, 86400),
|
|
||||||
|
|
||||||
Node = atom_to_binary(node()),
|
|
||||||
ClientId = <<"mqtt-client-", Node/binary, "-", ClientSuffix/binary>>,
|
|
||||||
[
|
|
||||||
{clientid, ClientId},
|
|
||||||
{host, EMQXHost},
|
|
||||||
{port, EMQXPort},
|
|
||||||
{owner, self()},
|
|
||||||
{tcp_opts, []},
|
|
||||||
{username, Username},
|
|
||||||
{password, Password},
|
|
||||||
{keepalive, Keepalive},
|
|
||||||
{auto_ack, true},
|
|
||||||
{proto_ver, v5},
|
|
||||||
{retry_interval, RetryInterval}
|
|
||||||
].
|
|
||||||
@ -1,4 +1,4 @@
|
|||||||
-name iot
|
-sname iot
|
||||||
|
|
||||||
-setcookie iot_cookie
|
-setcookie iot_cookie
|
||||||
|
|
||||||
|
|||||||
@ -11,6 +11,7 @@
|
|||||||
{eredis, ".*", {git, "https://github.com/wooga/eredis.git", {tag, "v1.2.0"}}},
|
{eredis, ".*", {git, "https://github.com/wooga/eredis.git", {tag, "v1.2.0"}}},
|
||||||
{gpb, ".*", {git, "https://github.com/tomas-abrahamsson/gpb.git", {tag, "4.20.0"}}},
|
{gpb, ".*", {git, "https://github.com/tomas-abrahamsson/gpb.git", {tag, "4.20.0"}}},
|
||||||
{emqtt, ".*", {git, "https://gitea.s5s8.com/anlicheng/emqtt.git", {branch, "main"}}},
|
{emqtt, ".*", {git, "https://gitea.s5s8.com/anlicheng/emqtt.git", {branch, "main"}}},
|
||||||
|
{gproc, ".*", {git, "https://github.com/uwiger/gproc.git", {tag, "0.9.1"}}},
|
||||||
{parse_trans, ".*", {git, "https://github.com/uwiger/parse_trans", {tag, "3.0.0"}}},
|
{parse_trans, ".*", {git, "https://github.com/uwiger/parse_trans", {tag, "3.0.0"}}},
|
||||||
{lager, ".*", {git,"https://github.com/erlang-lager/lager.git", {tag, "3.9.2"}}}
|
{lager, ".*", {git,"https://github.com/erlang-lager/lager.git", {tag, "3.9.2"}}}
|
||||||
]}.
|
]}.
|
||||||
|
|||||||
@ -31,6 +31,10 @@
|
|||||||
{git,"https://github.com/tomas-abrahamsson/gpb.git",
|
{git,"https://github.com/tomas-abrahamsson/gpb.git",
|
||||||
{ref,"edda1006d863a09509673778c455d33d88e6edbc"}},
|
{ref,"edda1006d863a09509673778c455d33d88e6edbc"}},
|
||||||
0},
|
0},
|
||||||
|
{<<"gproc">>,
|
||||||
|
{git,"https://github.com/uwiger/gproc.git",
|
||||||
|
{ref,"4ca45e0a97722a418a31eb1753f4e3b953f7fb1d"}},
|
||||||
|
0},
|
||||||
{<<"hackney">>,
|
{<<"hackney">>,
|
||||||
{git,"https://github.com/benoitc/hackney.git",
|
{git,"https://github.com/benoitc/hackney.git",
|
||||||
{ref,"f3e9292db22c807e73f57a8422402d6b423ddf5f"}},
|
{ref,"f3e9292db22c807e73f57a8422402d6b423ddf5f"}},
|
||||||
|
|||||||
@ -1,26 +1,27 @@
|
|||||||
## 整体架构
|
## 整体架构
|
||||||
|
整个系统可以理解为对微服务的部署管理和数据收集,每个微服务在全局拥有一个全局唯一的uuid作为service_id
|
||||||
|
|
||||||
## 通讯模式
|
## 通讯模式的修改
|
||||||
1. 微服务(tcp + json) => efka(ssl + protobuf) => iot
|
1. 微服务(tcp|websocket + json) => efka(ssl + protobuf) => iot (efka与iot之间之间是使用的json格式,服务器端解析压力过大)
|
||||||
2. ssl的加解密通过nginx反向代理实现, 利用nginx提交数据的加解密效率
|
2. ssl的加解密通过nginx反向代理实现, 利用nginx提交数据的加解密效率(已完成)
|
||||||
|
|
||||||
## 数据模式
|
|
||||||
|
|
||||||
## 已完成
|
## 已完成
|
||||||
1. 微服务的部署/启动/停止功能
|
1. 微服务的部署/启动/停止功能, 要求微服务要以前台运行的方式启动
|
||||||
2. 微服务和efka之间基于tcp的通讯逻辑规范已经形成
|
2. 微服务和efka之间基于tcp的通讯逻辑规范已经形成
|
||||||
3. 云服务离线的时候,efka对微服务产生的文件的缓存逻辑已经实现
|
3. 云服务离线的时候,efka对微服务产生的文件的缓存逻辑已经实现
|
||||||
4. 微服务部署的日志相关的逻辑,管理后台可以通过taskId获取部署的所有相关日志
|
4. 微服务部署的日志相关的逻辑,管理后台可以通过taskId获取部署的所有相关日志
|
||||||
|
|
||||||
## 待完成
|
## 待完成
|
||||||
1. 微服务和efka之间增加基于websocket的通讯 + json的格式, => 基于文档可以不用为微服务提供sdk
|
1. 微服务和efka之间增加基于websocket的通讯 + json的格式, => 基于文档可以不用为微服务提供sdk
|
||||||
2. 多租户
|
2. 场景编排
|
||||||
|
3. 多租户(后台也要配合)
|
||||||
|
|
||||||
## 实现修改
|
## 实现修改
|
||||||
1. 将配置项目和采集项目合并成一个配置项目,并通过json格式管理
|
1. 将配置项目和采集项目合并成一个配置项目,并通过json格式管理
|
||||||
2. 数据传输中需要对通过aes加密,修改为在链接层面全部数据加密传输(基于efka端基于ssl,云端基于nginx反向代理)
|
2. 数据传输中需要对通过aes加密,修改为在链接层面全部数据加密传输(efka端基于ssl,云端基于nginx反向代理)
|
||||||
3. 取消对数据上传中数据格式的限制,整个数据链路中对数据的约束为二进制格式;这样可以兼容所有的数据类型(efka,iot其实不关心数据负载;只负责转发和路由)
|
3. 取消对数据上传中数据格式的限制,整个数据链路中对数据的约束为二进制格式;这样可以兼容所有的数据类型(efka,iot其实不关心数据负载;只负责转发和路由)
|
||||||
4. todo 微服务启动的时候通过 register方法建立到efka的关联;返回的时候直接将 config_json 返回;避免服务启动时需要手动获取
|
4. todo 微服务启动的时候通过 register方法建立到efka的关联;返回的时候直接将 config_json 返回;避免服务启动时需要手动获取
|
||||||
## 新增加
|
## 新增加
|
||||||
1. pub/sub机制,微服务可以通过topic监听自己关注的消息(将一对一和一对多的消息机制统一)
|
1. pub/sub机制,微服务可以通过topic监听自己关注的消息(将一对一和一对多的消息机制统一)
|
||||||
2. 增加了endpoint,支持将数据路由到http|kafka|mqtt; 增加路由机制,数据上报的时候通过route_key路由到对应的endpoint
|
2. 增加了endpoint,支持将数据路由到http|kafka|mqtt; 增加路由机制,数据上报的时候通过route_key路由到对应的endpoint
|
||||||
|
3. 新增远程调用
|
||||||
Loading…
x
Reference in New Issue
Block a user