lager-3.8.0/0000755000232200023220000000000013523436621013167 5ustar debalancedebalancelager-3.8.0/priv/0000755000232200023220000000000013523436621014147 5ustar debalancedebalancelager-3.8.0/priv/edoc.css0000644000232200023220000000422013523436621015571 0ustar debalancedebalance/* Baseline rhythm */ body { font-size: 16px; font-family: Helvetica, sans-serif; margin: 8px; } p { font-size: 1em; /* 16px */ line-height: 1.5em; /* 24px */ margin: 0 0 1.5em 0; } h1 { font-size: 1.5em; /* 24px */ line-height: 1em; /* 24px */ margin-top: 1em; margin-bottom: 0em; } h2 { font-size: 1.375em; /* 22px */ line-height: 1.0909em; /* 24px */ margin-top: 1.0909em; margin-bottom: 0em; } h3 { font-size: 1.25em; /* 20px */ line-height: 1.2em; /* 24px */ margin-top: 1.2em; margin-bottom: 0em; } h4 { font-size: 1.125em; /* 18px */ line-height: 1.3333em; /* 24px */ margin-top: 1.3333em; margin-bottom: 0em; } .class-for-16px { font-size: 1em; /* 16px */ line-height: 1.5em; /* 24px */ margin-top: 1.5em; margin-bottom: 0em; } .class-for-14px { font-size: 0.875em; /* 14px */ line-height: 1.7143em; /* 24px */ margin-top: 1.7143em; margin-bottom: 0em; } ul { margin: 0 0 1.5em 0; } /* Customizations */ body { color: #333; } tt, code, pre { font-family: "Andale Mono", "Inconsolata", "Monaco", "DejaVu Sans Mono", monospaced; } tt, code { font-size: 0.875em } pre { font-size: 0.875em; /* 14px */ line-height: 1.7143em; /* 24px */ margin: 0 1em 1.7143em; padding: 0 1em; background: #eee; } .navbar img, hr { display: none } table { border-collapse: collapse; } h1 { border-left: 0.5em solid #fa0; padding-left: 0.5em; } h2.indextitle { font-size: 1.25em; /* 20px */ line-height: 1.2em; /* 24px */ margin: -8px -8px 0.6em; background-color: #fa0; color: white; padding: 0.3em; } ul.index { list-style: none; margin-left: 0em; padding-left: 0; } ul.index li { display: inline; padding-right: 0.75em } div.spec p { margin-bottom: 0; padding-left: 1.25em; background-color: #eee; } h3.function { border-left: 0.5em solid #fa0; padding-left: 0.5em; background: #fc9; } a, a:visited, a:hover, a:active { color: #C60 } h2 a, h3 a { color: #333 } i { font-size: 0.875em; /* 14px */ line-height: 1.7143em; /* 24px */ margin-top: 1.7143em; margin-bottom: 0em; font-style: normal; } lager-3.8.0/test/0000755000232200023220000000000013523436621014146 5ustar debalancedebalancelager-3.8.0/test/special_process.erl0000644000232200023220000000125013523436621020026 0ustar debalancedebalance-module(special_process). -export([start/0, init/1]). start() -> proc_lib:start_link(?MODULE, init, [self()]). init(Parent) -> proc_lib:init_ack(Parent, {ok, self()}), loop(). loop() -> receive function_clause -> foo(bar), loop(); exit -> exit(byebye), loop(); error -> erlang:error(mybad), loop(); {case_clause, X} -> case X of notgonnamatch -> ok; notthiseither -> error end, loop(); _ -> loop() end. foo(baz) -> ok. lager-3.8.0/test/pr_composite_test.erl0000644000232200023220000000304513523436621020416 0ustar debalancedebalance-module(pr_composite_test). -compile([{parse_transform, lager_transform}]). -record(a, {field1 :: term(), field2 :: term()}). -record(b, {field1 :: term() , field2 :: term()}). -include_lib("eunit/include/eunit.hrl"). nested_record_test() -> A = #a{field1 = x, field2 = y}, B = #b{field1 = A, field2 = {}}, Pr_B = lager:pr(B, ?MODULE), ?assertEqual({'$lager_record', b, [{field1, {'$lager_record', a, [{field1, x},{field2, y}]}}, {field2, {}}]}, Pr_B). list_field_test() -> As = [#a{field1 = 1, field2 = a2}, #a{field1 = 2, field2 = a2}], B = #b{field1 = As, field2 = b2}, Pr_B = lager:pr(B, ?MODULE), ?assertEqual({'$lager_record', b, [{field1, [{'$lager_record', a, [{field1, 1},{field2, a2}]}, {'$lager_record', a, [{field1, 2},{field2, a2}]}]}, {field2, b2}]}, Pr_B). list_of_records_test() -> As = [#a{field1 = 1, field2 = a2}, #a{field1 = 2, field2 = a2}], Pr_As = lager:pr(As, ?MODULE), ?assertEqual([{'$lager_record', a, [{field1, 1},{field2, a2}]}, {'$lager_record', a, [{field1, 2},{field2, a2}]}], Pr_As). improper_list_test() -> A = #a{field1 = [1|2], field2 = a2}, Pr_A = lager:pr(A, ?MODULE), ?assertEqual({'$lager_record',a, [{field1,[1|2]},{field2,a2}]}, Pr_A). lager-3.8.0/test/pr_stacktrace_test.erl0000644000232200023220000000412513523436621020540 0ustar debalancedebalance-module(pr_stacktrace_test). -compile([{parse_transform, lager_transform}]). -ifdef(OTP_RELEASE). %% this implies 21 or higher -define(EXCEPTION(Class, Reason, Stacktrace), Class:Reason:Stacktrace). -define(GET_STACK(Stacktrace), Stacktrace). -else. -define(EXCEPTION(Class, Reason, _), Class:Reason). -define(GET_STACK(_), erlang:get_stacktrace()). -endif. -include_lib("eunit/include/eunit.hrl"). make_throw() -> throw({test, exception}). bad_arity() -> lists:concat([], []). bad_arg() -> integer_to_list(1.0). pr_stacktrace_throw_test() -> Got = try make_throw() catch ?EXCEPTION(Class, Reason, Stacktrace) -> lager:pr_stacktrace(?GET_STACK(Stacktrace), {Class, Reason}) end, Want = "pr_stacktrace_test:pr_stacktrace_throw_test/0 line 26\n pr_stacktrace_test:make_throw/0 line 16\nthrow:{test,exception}", ?assertNotEqual(nomatch, string:find(Got, Want)). pr_stacktrace_bad_arg_test() -> Got = try bad_arg() catch ?EXCEPTION(Class, Reason, Stacktrace) -> lager:pr_stacktrace(?GET_STACK(Stacktrace), {Class, Reason}) end, Want = "pr_stacktrace_test:pr_stacktrace_bad_arg_test/0 line 36\n pr_stacktrace_test:bad_arg/0 line 22\nerror:badarg", ?assertNotEqual(nomatch, string:find(Got, Want)). pr_stacktrace_bad_arity_test() -> Got = try bad_arity() catch ?EXCEPTION(Class, Reason, Stacktrace) -> lager:pr_stacktrace(?GET_STACK(Stacktrace), {Class, Reason}) end, Want = "pr_stacktrace_test:pr_stacktrace_bad_arity_test/0 line 46\n lists:concat([], [])\nerror:undef", ?assertNotEqual(nomatch, string:find(Got, Want)). pr_stacktrace_no_reverse_test() -> application:set_env(lager, reverse_pretty_stacktrace, false), Got = try bad_arity() catch ?EXCEPTION(Class, Reason, Stacktrace) -> lager:pr_stacktrace(?GET_STACK(Stacktrace), {Class, Reason}) end, Want = "error:undef\n lists:concat([], [])\n pr_stacktrace_test:pr_stacktrace_bad_arity_test/0 line 57", ?assertEqual(nomatch, string:find(Got, Want)). lager-3.8.0/test/crash_fsm.erl0000644000232200023220000000167313523436621016626 0ustar debalancedebalance-module(crash_fsm). -behaviour(gen_fsm). -compile([{nowarn_deprecated_function, [{gen_fsm, start, 4}, {gen_fsm, sync_send_event, 2}]}]). -export([start/0, crash/0, state1/2]). %% gen_fsm callbacks -export([init/1, handle_event/3, handle_sync_event/4, handle_info/3, terminate/3, code_change/4]). -record(state, {}). start() -> gen_fsm:start({local, ?MODULE}, ?MODULE, [], []). crash() -> gen_fsm:sync_send_event(?MODULE, crash). %% gen_fsm callbacks init([]) -> {ok, state1, #state{}}. handle_event(_Event, StateName, State) -> {next_state, StateName, State}. handle_sync_event(_Event, _From, StateName, State) -> Reply = ok, {reply, Reply, StateName, State}. handle_info(_Info, StateName, State) -> {next_state, StateName, State}. terminate(_Reason, _StateName, _State) -> ok. code_change(_OldVersion, StateName, State, _Extra) -> {ok, StateName, State}. state1(_Event, S) -> {next_state, state1, S}. lager-3.8.0/test/lager_rotate.erl0000644000232200023220000001605213523436621017326 0ustar debalancedebalance%% ------------------------------------------------------------------- %% %% Copyright (c) 2016-2017 Basho Technologies, Inc. %% %% This file is provided to you under the Apache License, %% Version 2.0 (the "License"); you may not use this file %% except in compliance with the License. You may obtain %% a copy of the License at %% %% http://www.apache.org/licenses/LICENSE-2.0 %% %% Unless required by applicable law or agreed to in writing, %% software distributed under the License is distributed on an %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY %% KIND, either express or implied. See the License for the %% specific language governing permissions and limitations %% under the License. %% %% ------------------------------------------------------------------- -module(lager_rotate). -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). -endif. -record(state, { dir :: string(), log1 :: string(), log1r :: string(), log2 :: string(), log2r :: string(), sink :: string(), sinkr :: string() }). rotate_test_() -> {foreach, fun() -> {ok, Dir} = lager_util:create_test_dir(), Log1 = filename:join(Dir, "test1.log"), Log2 = filename:join(Dir, "test2.log"), Sink = filename:join(Dir, "sink.log"), State = #state{ dir = Dir, log1 = Log1, log1r = Log1 ++ ".0", log2 = Log2, log2r = Log2 ++ ".0", sink = Sink, sinkr = Sink ++ ".0" }, file:write_file(Log1, []), file:write_file(Log2, []), file:write_file(Sink, []), error_logger:tty(false), application:load(lager), application:set_env(lager, handlers, [ {lager_file_backend, [{file, Log1}, {level, info}]}, {lager_file_backend, [{file, Log2}, {level, info}]} ]), application:set_env(lager, extra_sinks, [ {sink_event, [{handlers, [{lager_file_backend, [{file, Sink}, {level, info}]}]} ]}]), application:set_env(lager, error_logger_redirect, false), application:set_env(lager, async_threshold, undefined), lager:start(), timer:sleep(1000), State end, fun(#state{}) -> ok = application:stop(lager), ok = application:stop(goldrush), ok = lager_util:delete_test_dir(), ok = error_logger:tty(true) end, [ fun(State) -> {"Rotate single file", fun() -> lager:log(error, self(), "Test message 1"), lager:log(sink_event, error, self(), "Sink test message 1", []), lager:rotate_handler({lager_file_backend, State#state.log1}), ok = wait_until(fun() -> filelib:is_regular(State#state.log1r) end, 10), lager:log(error, self(), "Test message 2"), lager:log(sink_event, error, self(), "Sink test message 2", []), {ok, File1} = file:read_file(State#state.log1), {ok, File2} = file:read_file(State#state.log2), {ok, SinkFile} = file:read_file(State#state.sink), {ok, File1Old} = file:read_file(State#state.log1r), have_no_log(File1, <<"Test message 1">>), have_log(File1, <<"Test message 2">>), have_log(File2, <<"Test message 1">>), have_log(File2, <<"Test message 2">>), have_log(File1Old, <<"Test message 1">>), have_no_log(File1Old, <<"Test message 2">>), have_log(SinkFile, <<"Sink test message 1">>), have_log(SinkFile, <<"Sink test message 2">>) end} end, fun(State) -> {"Rotate sink", fun() -> lager:log(error, self(), "Test message 1"), lager:log(sink_event, error, self(), "Sink test message 1", []), lager:rotate_sink(sink_event), ok = wait_until(fun() -> filelib:is_regular(State#state.sinkr) end, 10), lager:log(error, self(), "Test message 2"), lager:log(sink_event, error, self(), "Sink test message 2", []), {ok, File1} = file:read_file(State#state.log1), {ok, File2} = file:read_file(State#state.log2), {ok, SinkFile} = file:read_file(State#state.sink), {ok, SinkFileOld} = file:read_file(State#state.sinkr), have_log(File1, <<"Test message 1">>), have_log(File1, <<"Test message 2">>), have_log(File2, <<"Test message 1">>), have_log(File2, <<"Test message 2">>), have_log(SinkFileOld, <<"Sink test message 1">>), have_no_log(SinkFileOld, <<"Sink test message 2">>), have_no_log(SinkFile, <<"Sink test message 1">>), have_log(SinkFile, <<"Sink test message 2">>) end} end, fun(State) -> {"Rotate all", fun() -> lager:log(error, self(), "Test message 1"), lager:log(sink_event, error, self(), "Sink test message 1", []), lager:rotate_all(), ok = wait_until(fun() -> filelib:is_regular(State#state.sinkr) end, 10), lager:log(error, self(), "Test message 2"), lager:log(sink_event, error, self(), "Sink test message 2", []), {ok, File1} = file:read_file(State#state.log1), {ok, File2} = file:read_file(State#state.log2), {ok, SinkFile} = file:read_file(State#state.sink), {ok, File1Old} = file:read_file(State#state.log1r), {ok, File2Old} = file:read_file(State#state.log2r), {ok, SinkFileOld} = file:read_file(State#state.sinkr), have_no_log(File1, <<"Test message 1">>), have_log(File1, <<"Test message 2">>), have_no_log(File2, <<"Test message 1">>), have_log(File2, <<"Test message 2">>), have_no_log(SinkFile, <<"Sink test message 1">>), have_log(SinkFile, <<"Sink test message 2">>), have_log(SinkFileOld, <<"Sink test message 1">>), have_no_log(SinkFileOld, <<"Sink test message 2">>), have_log(File1Old, <<"Test message 1">>), have_no_log(File1Old, <<"Test message 2">>), have_log(File2Old, <<"Test message 1">>), have_no_log(File2Old, <<"Test message 2">>) end} end ]}. have_log(Data, Log) -> {_,_} = binary:match(Data, Log). have_no_log(Data, Log) -> nomatch = binary:match(Data, Log). wait_until(_Fun, 0) -> {error, too_many_retries}; wait_until(Fun, Retry) -> case Fun() of true -> ok; false -> timer:sleep(500), wait_until(Fun, Retry-1) end. lager-3.8.0/test/sync_error_logger.erl0000644000232200023220000000540113523436621020376 0ustar debalancedebalance%% %% %CopyrightBegin% %% %% Copyright Ericsson AB 1996-2009. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in %% compliance with the License. You should have received a copy of the %% Erlang Public License along with this software. If not, it can be %% retrieved online at http://www.erlang.org/. %% %% Software distributed under the License is distributed on an "AS IS" %% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See %% the License for the specific language governing rights and limitations %% under the License. %% %% %CopyrightEnd% %% -module(sync_error_logger). %% The error_logger API, but synchronous! %% This is helpful for tests, otherwise you need lots of nasty timer:sleep. %% Additionally, the warning map can be set on a per-process level, for %% convienience, via the process dictionary value `warning_map'. -export([ info_msg/1, info_msg/2, warning_msg/1, warning_msg/2, error_msg/1,error_msg/2 ]). -export([ info_report/1, info_report/2, warning_report/1, warning_report/2, error_report/1, error_report/2 ]). info_msg(Format) -> info_msg(Format, []). info_msg(Format, Args) -> gen_event:sync_notify(error_logger, {info_msg, group_leader(), {self(), Format, Args}}). warning_msg(Format) -> warning_msg(Format, []). warning_msg(Format, Args) -> gen_event:sync_notify(error_logger, {warning_msg_tag(), group_leader(), {self(), Format, Args}}). error_msg(Format) -> error_msg(Format, []). error_msg(Format, Args) -> gen_event:sync_notify(error_logger, {error, group_leader(), {self(), Format, Args}}). info_report(Report) -> info_report(std_info, Report). info_report(Type, Report) -> gen_event:sync_notify(error_logger, {info_report, group_leader(), {self(), Type, Report}}). warning_report(Report) -> warning_report(std_warning, Report). warning_report(Type, Report) -> {Tag, NType} = warning_report_tag(Type), gen_event:sync_notify(error_logger, {Tag, group_leader(), {self(), NType, Report}}). error_report(Report) -> error_report(std_error, Report). error_report(Type, Report) -> gen_event:sync_notify(error_logger, {error_report, group_leader(), {self(), Type, Report}}). warning_msg_tag() -> case get(warning_map) of warning -> warning_msg; info -> info_msg; _ -> error end. warning_report_tag(Type) -> case {get(warning_map), Type == std_warning} of {warning, _} -> {warning_report, Type}; {info, true} -> {info_report, std_info}; {info, false} -> {info_report, Type}; {_, true} -> {error_report, std_error}; {_, false} -> {error_report, Type} end. lager-3.8.0/test/lager_trace_test.erl0000644000232200023220000000746313523436621020173 0ustar debalancedebalance-module(lager_trace_test). -compile([{parse_transform, lager_transform}]). -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). % Our expectation is that the first log entry will appear so we won't actually % wait out ?FIRST_LOG_ENTRY_TIMEOUT. On the other hand, the second log entry is % expected never to arrive, so the test will wait out ?SECOND_LOG_ENTRY_TIMEOUT; % that's why it is shorter. -define(FIRST_LOG_ENTRY_TIMEOUT, (10 * 1000)). % 10 seconds -define(SECOND_LOG_ENTRY_TIMEOUT, 1000). % 1 second -define(FNAME, "test/test1.log"). trace_test_() -> {timeout, 10, {foreach, fun() -> file:write_file(?FNAME, ""), error_logger:tty(false), application:load(lager), application:set_env(lager, log_root, "test"), application:set_env(lager, handlers, [{lager_file_backend, [{file, "test1.log"}, {level, none}, {formatter, lager_default_formatter}, {formatter_config, [message, "\n"]} ]}]), application:set_env(lager, traces, [{{lager_file_backend, "test1.log"}, [{tag, mytag}], info}]), application:set_env(lager, error_logger_redirect, false), application:set_env(lager, async_threshold, undefined), lager:start() end, fun(_) -> file:delete(?FNAME), application:stop(lager), application:stop(goldrush), application:unset_env(lager, log_root), application:unset_env(lager, handlers), application:unset_env(lager, traces), application:unset_env(lager, error_logger_redirect), application:unset_env(lager, async_threshold), error_logger:tty(true) end, [{"Trace combined with log_root", fun() -> lager:info([{tag, mytag}], "Test message"), % Wait until we have the expected log entry in the log file. case wait_until(fun() -> count_lines(?FNAME) >= 1 end, ?FIRST_LOG_ENTRY_TIMEOUT) of ok -> ok; {error, timeout} -> throw({file_empty, file:read_file(?FNAME)}) end, % Let's wait a little to see that we don't get a duplicate log % entry. case wait_until(fun() -> count_lines(?FNAME) >= 2 end, ?SECOND_LOG_ENTRY_TIMEOUT) of ok -> throw({too_many_entries, file:read_file(?FNAME)}); {error, timeout} -> ok end end} ]}}. % Wait until Fun() returns true. wait_until(Fun, Timeout) -> wait_until(Fun, Timeout, {8, 13}). wait_until(_Fun, Timeout, {T1, _}) when T1 > Timeout -> {error, timeout}; wait_until(Fun, Timeout, {T1, T2}) -> case Fun() of true -> ok; false -> timer:sleep(T1), wait_until(Fun, Timeout, {T2, T1 + T2}) end. % Return the number of lines in a file. Return 0 for a non-existent file. count_lines(Filename) -> case file:read_file(Filename) of {ok, Content} -> Lines = binary:split(Content, <<"\n">>, [global, trim]), length(Lines); {error, _} -> 0 end. -endif. lager-3.8.0/test/compress_pr_record_test.erl0000644000232200023220000000125013523436621021601 0ustar debalancedebalance-module(compress_pr_record_test). -compile([{parse_transform, lager_transform}]). -record(a, {field1 :: term(), field2 :: term(), foo :: term(), bar :: term(), baz :: term(), zyu :: term(), zix :: term()}). -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). -endif. nested_record_test() -> A = #a{field1 = "Notice me senpai"}, Pr_A = lager:pr(A, ?MODULE), Pr_A_Comp = lager:pr(A, ?MODULE, [compress]), ?assertMatch({'$lager_record', a, [{field1, "Notice me senpai"}, {field2, undefined} | _]}, Pr_A), ?assertEqual({'$lager_record', a, [{field1, "Notice me senpai"}]}, Pr_A_Comp). lager-3.8.0/test/lager_test_backend.erl0000644000232200023220000030323213523436621020455 0ustar debalancedebalance%% ------------------------------------------------------------------- %% %% Copyright (c) 2011-2017 Basho Technologies, Inc. %% %% This file is provided to you under the Apache License, %% Version 2.0 (the "License"); you may not use this file %% except in compliance with the License. You may obtain %% a copy of the License at %% %% http://www.apache.org/licenses/LICENSE-2.0 %% %% Unless required by applicable law or agreed to in writing, %% software distributed under the License is distributed on an %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY %% KIND, either express or implied. See the License for the %% specific language governing permissions and limitations %% under the License. %% %% ------------------------------------------------------------------- -module(lager_test_backend). -behaviour(gen_event). -export([init/1, handle_call/2, handle_event/2, handle_info/2, terminate/2, code_change/3]). -include("lager.hrl"). -define(TEST_SINK_NAME, '__lager_test_sink'). %% <-- used by parse transform -define(TEST_SINK_EVENT, '__lager_test_sink_lager_event'). %% <-- used by lager API calls and internals for gen_event -record(state, { level :: list(), buffer :: list(), ignored :: term() }). -compile({parse_transform, lager_transform}). -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). -record(test, { attrs :: list(), format :: list(), args :: list() }). -export([ count/0, count_ignored/0, flush/0, message_stuffer/3, pop/0, pop_ignored/0, print_state/0, get_buffer/0 ]). -endif. init(Level) -> {ok, #state{level=lager_util:config_to_mask(Level), buffer=[], ignored=[]}}. handle_call(count, #state{buffer=Buffer} = State) -> {ok, length(Buffer), State}; handle_call(count_ignored, #state{ignored=Ignored} = State) -> {ok, length(Ignored), State}; handle_call(flush, State) -> {ok, ok, State#state{buffer=[], ignored=[]}}; handle_call(pop, #state{buffer=Buffer} = State) -> case Buffer of [] -> {ok, undefined, State}; [H|T] -> {ok, H, State#state{buffer=T}} end; handle_call(pop_ignored, #state{ignored=Ignored} = State) -> case Ignored of [] -> {ok, undefined, State}; [H|T] -> {ok, H, State#state{ignored=T}} end; handle_call(get_buffer, #state{buffer=Buffer} = State) -> {ok, Buffer, State}; handle_call(get_loglevel, #state{level=Level} = State) -> {ok, Level, State}; handle_call({set_loglevel, Level}, State) -> {ok, ok, State#state{level=lager_util:config_to_mask(Level)}}; handle_call(print_state, State) -> spawn(fun() -> lager:info("State ~p", [lager:pr(State, ?MODULE)]) end), timer:sleep(100), {ok, ok, State}; handle_call(print_bad_state, State) -> spawn(fun() -> lager:info("State ~p", [lager:pr({state, 1}, ?MODULE)]) end), timer:sleep(100), {ok, ok, State}; handle_call(_Request, State) -> {ok, ok, State}. handle_event({log, Msg}, #state{level=LogLevel,buffer=Buffer,ignored=Ignored} = State) -> case lager_util:is_loggable(Msg, LogLevel, ?MODULE) of true -> {ok, State#state{buffer=Buffer ++ [{lager_msg:severity_as_int(Msg), lager_msg:datetime(Msg), lager_msg:message(Msg), lager_msg:metadata(Msg)}]}}; _ -> {ok, State#state{ignored=Ignored ++ [Msg]}} end; handle_event(_Event, State) -> {ok, State}. handle_info(_Info, State) -> {ok, State}. terminate(_Reason, _State) -> ok. code_change(_OldVsn, State, _Extra) -> {ok, State}. -ifdef(TEST). pop() -> pop(lager_event). pop_ignored() -> pop_ignored(lager_event). get_buffer() -> get_buffer(lager_event). count() -> count(lager_event). count_ignored() -> count_ignored(lager_event). flush() -> flush(lager_event). print_state() -> print_state(lager_event). print_bad_state() -> print_bad_state(lager_event). pop(Sink) -> gen_event:call(Sink, ?MODULE, pop). pop_ignored(Sink) -> gen_event:call(Sink, ?MODULE, pop_ignored). get_buffer(Sink) -> gen_event:call(Sink, ?MODULE, get_buffer). count(Sink) -> gen_event:call(Sink, ?MODULE, count). count_ignored(Sink) -> gen_event:call(Sink, ?MODULE, count_ignored). flush(Sink) -> gen_event:call(Sink, ?MODULE, flush). print_state(Sink) -> gen_event:call(Sink, ?MODULE, print_state). print_bad_state(Sink) -> gen_event:call(Sink, ?MODULE, print_bad_state). not_running_test() -> ?assertEqual({error, lager_not_running}, lager:log(info, self(), "not running")). lager_test_() -> {foreach, fun setup/0, fun cleanup/1, [ {"observe that there is nothing up my sleeve", fun() -> ?assertEqual(undefined, pop()), ?assertEqual(0, count()) end }, {"test sink not running", fun() -> ?assertEqual({error, {sink_not_configured, test}}, lager:log(test, info, self(), "~p", "not running")) end }, {"logging works", fun() -> lager:warning("test message"), ?assertEqual(1, count()), {Level, _Time, Message, _Metadata} = pop(), ?assertMatch(Level, lager_util:level_to_num(warning)), ?assertEqual("test message", Message), ok end }, {"logging with macro works", fun() -> ?lager_warning("test message", []), ?assertEqual(1, count()), {Level, _Time, Message, _Metadata} = pop(), ?assertMatch(Level, lager_util:level_to_num(warning)), ?assertEqual("test message", Message), ok end }, {"unsafe logging works", fun() -> lager:warning_unsafe("test message"), ?assertEqual(1, count()), {Level, _Time, Message, _Metadata} = pop(), ?assertMatch(Level, lager_util:level_to_num(warning)), ?assertEqual("test message", Message), ok end }, {"logging with arguments works", fun() -> lager:warning("test message ~p", [self()]), ?assertEqual(1, count()), {Level, _Time, Message,_Metadata} = pop(), ?assertMatch(Level, lager_util:level_to_num(warning)), ?assertEqual(lists:flatten(io_lib:format("test message ~p", [self()])), lists:flatten(Message)), ok end }, {"logging with macro and arguments works", fun() -> ?lager_warning("test message ~p", [self()]), ?assertEqual(1, count()), {Level, _Time, Message,_Metadata} = pop(), ?assertMatch(Level, lager_util:level_to_num(warning)), ?assertEqual(lists:flatten(io_lib:format("test message ~p", [self()])), lists:flatten(Message)), ok end }, {"unsafe logging with args works", fun() -> lager:warning_unsafe("test message ~p", [self()]), ?assertEqual(1, count()), {Level, _Time, Message,_Metadata} = pop(), ?assertMatch(Level, lager_util:level_to_num(warning)), ?assertEqual(lists:flatten(io_lib:format("test message ~p", [self()])), lists:flatten(Message)), ok end }, {"logging works from inside a begin/end block", fun() -> ?assertEqual(0, count()), begin lager:warning("test message 2") end, ?assertEqual(1, count()), ok end }, {"logging works from inside a list comprehension", fun() -> ?assertEqual(0, count()), [lager:warning("test message") || _N <- lists:seq(1, 10)], ?assertEqual(10, count()), ok end }, {"logging works from a begin/end block inside a list comprehension", fun() -> ?assertEqual(0, count()), [ begin lager:warning("test message") end || _N <- lists:seq(1, 10)], ?assertEqual(10, count()), ok end }, {"logging works from a nested list comprehension", fun() -> ?assertEqual(0, count()), [ [lager:warning("test message") || _N <- lists:seq(1, 10)] || _I <- lists:seq(1, 10)], ?assertEqual(100, count()), ok end }, {"logging with only metadata works", fun() -> ?assertEqual(0, count()), lager:warning([{just, metadata}]), lager:warning([{just, metadata}, {foo, bar}]), ?assertEqual(2, count()), ok end }, {"variables inplace of literals in logging statements work", fun() -> ?assertEqual(0, count()), Attr = [{a, alpha}, {b, beta}], Fmt = "format ~p", Args = [world], lager:info(Attr, "hello"), lager:info(Attr, "hello ~p", [world]), lager:info(Fmt, [world]), lager:info("hello ~p", Args), lager:info(Attr, "hello ~p", Args), lager:info([{d, delta}, {g, gamma}], Fmt, Args), ?assertEqual(6, count()), {_Level, _Time, Message, Metadata} = pop(), ?assertMatch([{a, alpha}, {b, beta}|_], Metadata), ?assertEqual("hello", lists:flatten(Message)), {_Level, _Time2, Message2, _Metadata2} = pop(), ?assertEqual("hello world", lists:flatten(Message2)), {_Level, _Time3, Message3, _Metadata3} = pop(), ?assertEqual("format world", lists:flatten(Message3)), {_Level, _Time4, Message4, _Metadata4} = pop(), ?assertEqual("hello world", lists:flatten(Message4)), {_Level, _Time5, Message5, _Metadata5} = pop(), ?assertEqual("hello world", lists:flatten(Message5)), {_Level, _Time6, Message6, Metadata6} = pop(), ?assertMatch([{d, delta}, {g, gamma}|_], Metadata6), ?assertEqual("format world", lists:flatten(Message6)), ok end }, {"list comprehension inplace of literals in logging statements work", fun() -> ?assertEqual(0, count()), Attr = [{a, alpha}, {b, beta}], Fmt = "format ~p", Args = [world], lager:info([{K, atom_to_list(V)} || {K, V} <- Attr], "hello"), lager:info([{K, atom_to_list(V)} || {K, V} <- Attr], "hello ~p", [{atom, X} || X <- Args]), lager:info([X || X <- Fmt], [world]), lager:info("hello ~p", [{atom, X} || X <- Args]), lager:info([{K, atom_to_list(V)} || {K, V} <- Attr], "hello ~p", [{atom, X} || X <- Args]), lager:info([{d, delta}, {g, gamma}], Fmt, [{atom, X} || X <- Args]), ?assertEqual(6, count()), {_Level, _Time, Message, Metadata} = pop(), ?assertMatch([{a, "alpha"}, {b, "beta"}|_], Metadata), ?assertEqual("hello", lists:flatten(Message)), {_Level, _Time2, Message2, _Metadata2} = pop(), ?assertEqual("hello {atom,world}", lists:flatten(Message2)), {_Level, _Time3, Message3, _Metadata3} = pop(), ?assertEqual("format world", lists:flatten(Message3)), {_Level, _Time4, Message4, _Metadata4} = pop(), ?assertEqual("hello {atom,world}", lists:flatten(Message4)), {_Level, _Time5, Message5, _Metadata5} = pop(), ?assertEqual("hello {atom,world}", lists:flatten(Message5)), {_Level, _Time6, Message6, Metadata6} = pop(), ?assertMatch([{d, delta}, {g, gamma}|_], Metadata6), ?assertEqual("format {atom,world}", lists:flatten(Message6)), ok end }, {"function calls inplace of literals in logging statements work", fun() -> ?assertEqual(0, count()), put(attrs, [{a, alpha}, {b, beta}]), put(format, "format ~p"), put(args, [world]), lager:info(get(attrs), "hello"), lager:info(get(attrs), "hello ~p", get(args)), lager:info(get(format), [world]), lager:info("hello ~p", erlang:get(args)), lager:info(fun() -> get(attrs) end(), "hello ~p", get(args)), lager:info([{d, delta}, {g, gamma}], get(format), get(args)), ?assertEqual(6, count()), {_Level, _Time, Message, Metadata} = pop(), ?assertMatch([{a, alpha}, {b, beta}|_], Metadata), ?assertEqual("hello", lists:flatten(Message)), {_Level, _Time2, Message2, _Metadata2} = pop(), ?assertEqual("hello world", lists:flatten(Message2)), {_Level, _Time3, Message3, _Metadata3} = pop(), ?assertEqual("format world", lists:flatten(Message3)), {_Level, _Time4, Message4, _Metadata4} = pop(), ?assertEqual("hello world", lists:flatten(Message4)), {_Level, _Time5, Message5, _Metadata5} = pop(), ?assertEqual("hello world", lists:flatten(Message5)), {_Level, _Time6, Message6, Metadata6} = pop(), ?assertMatch([{d, delta}, {g, gamma}|_], Metadata6), ?assertEqual("format world", lists:flatten(Message6)), ok end }, {"record fields inplace of literals in logging statements work", fun() -> ?assertEqual(0, count()), Test = #test{attrs=[{a, alpha}, {b, beta}], format="format ~p", args=[world]}, lager:info(Test#test.attrs, "hello"), lager:info(Test#test.attrs, "hello ~p", Test#test.args), lager:info(Test#test.format, [world]), lager:info("hello ~p", Test#test.args), lager:info(Test#test.attrs, "hello ~p", Test#test.args), lager:info([{d, delta}, {g, gamma}], Test#test.format, Test#test.args), ?assertEqual(6, count()), {_Level, _Time, Message, Metadata} = pop(), ?assertMatch([{a, alpha}, {b, beta}|_], Metadata), ?assertEqual("hello", lists:flatten(Message)), {_Level, _Time2, Message2, _Metadata2} = pop(), ?assertEqual("hello world", lists:flatten(Message2)), {_Level, _Time3, Message3, _Metadata3} = pop(), ?assertEqual("format world", lists:flatten(Message3)), {_Level, _Time4, Message4, _Metadata4} = pop(), ?assertEqual("hello world", lists:flatten(Message4)), {_Level, _Time5, Message5, _Metadata5} = pop(), ?assertEqual("hello world", lists:flatten(Message5)), {_Level, _Time6, Message6, Metadata6} = pop(), ?assertMatch([{d, delta}, {g, gamma}|_], Metadata6), ?assertEqual("format world", lists:flatten(Message6)), ok end }, {"log messages below the threshold are ignored", fun() -> ?assertEqual(0, count()), lager:debug("this message will be ignored"), ?assertEqual(0, count()), ?assertEqual(0, count_ignored()), lager_config:set(loglevel, {element(2, lager_util:config_to_mask(debug)), []}), lager:debug("this message should be ignored"), ?assertEqual(0, count()), ?assertEqual(1, count_ignored()), lager:set_loglevel(?MODULE, debug), ?assertEqual({?DEBUG bor ?INFO bor ?NOTICE bor ?WARNING bor ?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY, []}, lager_config:get(loglevel)), lager:debug("this message should be logged"), ?assertEqual(1, count()), ?assertEqual(1, count_ignored()), ?assertEqual(debug, lager:get_loglevel(?MODULE)), ok end }, {"tracing works", fun() -> lager_config:set(loglevel, {element(2, lager_util:config_to_mask(error)), []}), ok = lager:info("hello world"), ?assertEqual(0, count()), lager:trace(?MODULE, [{module, ?MODULE}], debug), ?assertMatch({?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY, _}, lager_config:get(loglevel)), %% elegible for tracing ok = lager:info("hello world"), %% NOT elegible for tracing ok = lager:log(info, [{pid, self()}], "hello world"), ?assertEqual(1, count()), ok end }, {"tracing works with custom attributes", fun() -> lager:set_loglevel(?MODULE, error), ?assertEqual({?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY, []}, lager_config:get(loglevel)), lager_config:set(loglevel, {element(2, lager_util:config_to_mask(error)), []}), lager:info([{requestid, 6}], "hello world"), ?assertEqual(0, count()), lager:trace(?MODULE, [{requestid, 6}, {foo, bar}], debug), lager:info([{requestid, 6}, {foo, bar}], "hello world"), ?assertEqual(1, count()), lager:trace(?MODULE, [{requestid, '*'}], debug), lager:info([{requestid, 6}], "hello world"), ?assertEqual(2, count()), lager:clear_all_traces(), lager:info([{requestid, 6}], "hello world"), ?assertEqual(2, count()), ok end }, {"tracing works with custom attributes and event stream processing", fun() -> lager:set_loglevel(?MODULE, error), ?assertEqual({?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY, []}, lager_config:get(loglevel)), lager_config:set(loglevel, {element(2, lager_util:config_to_mask(error)), []}), lager:info([{requestid, 6}], "hello world"), ?assertEqual(0, count()), lager:trace(?MODULE, [{requestid, '>', 5}, {requestid, '<', 7}, {foo, bar}], debug), lager:info([{requestid, 5}, {foo, bar}], "hello world"), lager:info([{requestid, 6}, {foo, bar}], "hello world"), ?assertEqual(1, count()), lager:clear_all_traces(), lager:trace(?MODULE, [{requestid, '>', 8}, {foo, bar}]), lager:info([{foo, bar}], "hello world"), lager:info([{requestid, 6}], "hello world"), lager:info([{requestid, 7}], "hello world"), lager:info([{requestid, 8}], "hello world"), lager:info([{requestid, 9}, {foo, bar}], "hello world"), lager:info([{requestid, 10}], "hello world"), ?assertEqual(2, count()), lager:trace(?MODULE, [{requestid, '>', 8}]), lager:info([{foo, bar}], "hello world"), lager:info([{requestid, 6}], "hello world"), lager:info([{requestid, 7}], "hello world"), lager:info([{requestid, 8}], "hello world"), lager:info([{requestid, 9}, {foo, bar}], "hello world"), lager:info([{requestid, 10}], "hello world"), ?assertEqual(4, count()), lager:trace(?MODULE, [{foo, '=', bar}]), lager:info([{foo, bar}], "hello world"), lager:info([{requestid, 6}], "hello world"), lager:info([{requestid, 7}], "hello world"), lager:info([{requestid, 8}], "hello world"), lager:info([{requestid, 9}, {foo, bar}], "hello world"), lager:info([{requestid, 10}], "hello world"), lager:trace(?MODULE, [{fu, '!'}]), lager:info([{foo, bar}], "hello world"), lager:info([{ooh, car}], "hello world"), lager:info([{fu, bar}], "hello world"), lager:trace(?MODULE, [{fu, '*'}]), lager:info([{fu, bar}], "hello world"), ?assertEqual(10, count()), lager:clear_all_traces(), lager:info([{requestid, 6}], "hello world"), ?assertEqual(10, count()), lager:clear_all_traces(), lager:trace(?MODULE, [{requestid, '>=', 5}, {requestid, '=<', 7}], debug), lager:info([{requestid, 4}], "nope!"), lager:info([{requestid, 5}], "hello world"), lager:info([{requestid, 7}], "hello world again"), ?assertEqual(12, count()), lager:clear_all_traces(), lager:trace(?MODULE, [{foo, '!=', bar}]), lager:info([{foo, bar}], "hello world"), ?assertEqual(12, count()), lager:info([{foo, baz}], "blarg"), ?assertEqual(13, count()), lager:clear_all_traces(), lager:trace(?MODULE, [{all, [{foo, '=', bar}, {null, false}]}]), lager:info([{foo, bar}], "should not be logged"), ?assertEqual(13, count()), lager:clear_all_traces(), lager:trace(?MODULE, [{any, [{foo, '=', bar}, {null, true}]}]), lager:info([{foo, qux}], "should be logged"), ?assertEqual(14, count()), ok end }, {"tracing custom attributes works with event stream processing statistics and reductions", fun() -> lager:set_loglevel(?MODULE, error), ?assertEqual({?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY, []}, lager_config:get(loglevel)), lager_config:set(loglevel, {element(2, lager_util:config_to_mask(error)), []}), lager:info([{requestid, 6}], "hello world"), ?assertEqual(0, count()), lager:trace(?MODULE, [{beta, '*'}]), lager:trace(?MODULE, [{meta, "data"}]), lager:info([{meta, "data"}], "hello world"), lager:info([{beta, 2}], "hello world"), lager:info([{beta, 2.1}, {foo, bar}], "hello world"), lager:info([{meta, <<"data">>}], "hello world"), ?assertEqual(8, ?DEFAULT_TRACER:info(input)), ?assertEqual(6, ?DEFAULT_TRACER:info(output)), ?assertEqual(2, ?DEFAULT_TRACER:info(filter)), lager:clear_all_traces(), lager:trace(?MODULE, [{meta, "data"}]), lager:trace(?MODULE, [{beta, '>', 2}, {beta, '<', 2.12}]), lager:info([{meta, "data"}], "hello world"), lager:info([{beta, 2}], "hello world"), lager:info([{beta, 2.1}, {foo, bar}], "hello world"), lager:info([{meta, <<"data">>}], "hello world"), ?assertEqual(8, ?DEFAULT_TRACER:info(input)), ?assertEqual(4, ?DEFAULT_TRACER:info(output)), ?assertEqual(4, ?DEFAULT_TRACER:info(filter)), lager:clear_all_traces(), lager:trace_console([{beta, '>', 2}, {meta, "data"}]), lager:trace_console([{beta, '>', 2}, {beta, '<', 2.12}]), Reduced = {all,[{any,[{beta,'<',2.12},{meta,'=',"data"}]}, {beta,'>',2}]}, ?assertEqual(Reduced, ?DEFAULT_TRACER:info('query')), lager:clear_all_traces(), lager:info([{requestid, 6}], "hello world"), ?assertEqual(5, count()), ok end }, {"persistent traces work", fun() -> ?assertEqual(0, count()), lager:debug([{foo, bar}], "hello world"), ?assertEqual(0, count()), application:stop(lager), application:set_env(lager, traces, [{lager_test_backend, [{foo, bar}], debug}]), lager:start(), timer:sleep(5), flush(), lager:debug([{foo, bar}], "hello world"), ?assertEqual(1, count()), application:unset_env(lager, traces), ok end }, {"tracing honors loglevel", fun() -> lager:set_loglevel(?MODULE, error), ?assertEqual({?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY, []}, lager_config:get(loglevel)), {ok, T} = lager:trace(?MODULE, [{module, ?MODULE}], notice), ok = lager:info("hello world"), ?assertEqual(0, count()), ok = lager:notice("hello world"), ?assertEqual(1, count()), lager:stop_trace(T), ok = lager:notice("hello world"), ?assertEqual(1, count()), ok end }, {"stopped trace stops and removes its event handler - default sink (gh#267)", {timeout, 10, fun() -> Sink = ?DEFAULT_SINK, StartHandlers = gen_event:which_handlers(Sink), {_, T0} = lager_config:get({Sink, loglevel}), StartGlobal = lager_config:global_get(handlers), ?assertEqual([], T0), {ok, TestTrace1} = lager:trace_file("/tmp/test", [{a,b}]), MidHandlers = gen_event:which_handlers(Sink), {ok, TestTrace2} = lager:trace_file("/tmp/test", [{c,d}]), MidHandlers = gen_event:which_handlers(Sink), ?assertEqual(length(StartHandlers)+1, length(MidHandlers)), MidGlobal = lager_config:global_get(handlers), ?assertEqual(length(StartGlobal)+1, length(MidGlobal)), {_, T1} = lager_config:get({Sink, loglevel}), ?assertEqual(2, length(T1)), ok = lager:stop_trace(TestTrace1), {_, T2} = lager_config:get({Sink, loglevel}), ?assertEqual(1, length(T2)), ?assertEqual(length(StartHandlers)+1, length( gen_event:which_handlers(Sink))), ?assertEqual(length(StartGlobal)+1, length(lager_config:global_get(handlers))), ok = lager:stop_trace(TestTrace2), EndHandlers = gen_event:which_handlers(Sink), EndGlobal = lager_config:global_get(handlers), {_, T3} = lager_config:get({Sink, loglevel}), ?assertEqual([], T3), ?assertEqual(StartHandlers, EndHandlers), ?assertEqual(StartGlobal, EndGlobal), ok end} }, {"record printing works", fun() -> print_state(), {Level, _Time, Message, _Metadata} = pop(), ?assertMatch(Level, lager_util:level_to_num(info)), {mask, Mask} = lager_util:config_to_mask(info), ?assertEqual("State #state{level={mask,"++integer_to_list(Mask)++"},buffer=[],ignored=[]}", lists:flatten(Message)), ok end }, {"record printing fails gracefully", fun() -> print_bad_state(), {Level, _Time, Message, _Metadata} = pop(), ?assertMatch(Level, lager_util:level_to_num(info)), ?assertEqual("State {state,1}", lists:flatten(Message)), ok end }, {"record printing fails gracefully when no lager_record attribute", fun() -> spawn(fun() -> lager:info("State ~p", [lager:pr({state, 1}, lager)]) end), timer:sleep(100), {Level, _Time, Message, _Metadata} = pop(), ?assertMatch(Level, lager_util:level_to_num(info)), ?assertEqual("State {state,1}", lists:flatten(Message)), ok end }, {"record printing fails gracefully when input is not a tuple", fun() -> spawn(fun() -> lager:info("State ~p", [lager:pr(ok, lager)]) end), timer:sleep(100), {Level, _Time, Message, _Metadata} = pop(), ?assertMatch(Level, lager_util:level_to_num(info)), ?assertEqual("State ok", lists:flatten(Message)), ok end }, {"record printing fails gracefully when module is invalid", fun() -> spawn(fun() -> lager:info("State ~p", [lager:pr({state, 1}, not_a_module)]) end), timer:sleep(1000), {Level, _Time, Message, _Metadata} = pop(), ?assertMatch(Level, lager_util:level_to_num(info)), ?assertEqual("State {state,1}", lists:flatten(Message)), ok end }, {"installing a new handler adjusts the global loglevel if necessary", fun() -> ?assertEqual({?INFO bor ?NOTICE bor ?WARNING bor ?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY, []}, lager_config:get(loglevel)), supervisor:start_child(lager_handler_watcher_sup, [lager_event, {?MODULE, foo}, debug]), ?assertEqual({?DEBUG bor ?INFO bor ?NOTICE bor ?WARNING bor ?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY, []}, lager_config:get(loglevel)), ok end }, {"metadata in the process dictionary works", fun() -> lager:md([{platypus, gravid}, {sloth, hirsute}, {duck, erroneous}]), lager:info("I sing the animal kingdom electric!"), {_Level, _Time, _Message, Metadata} = pop(), ?assertEqual(gravid, proplists:get_value(platypus, Metadata)), ?assertEqual(hirsute, proplists:get_value(sloth, Metadata)), ?assertEqual(erroneous, proplists:get_value(duck, Metadata)), ?assertEqual(undefined, proplists:get_value(eagle, Metadata)), lager:md([{platypus, gravid}, {sloth, hirsute}, {eagle, superincumbent}]), lager:info("I sing the animal kingdom dielectric!"), {_Level2, _Time2, _Message2, Metadata2} = pop(), ?assertEqual(gravid, proplists:get_value(platypus, Metadata2)), ?assertEqual(hirsute, proplists:get_value(sloth, Metadata2)), ?assertEqual(undefined, proplists:get_value(duck, Metadata2)), ?assertEqual(superincumbent, proplists:get_value(eagle, Metadata2)), ok end }, {"unsafe messages really are not truncated", fun() -> lager:info_unsafe("doom, doom has come upon you all ~p", [string:copies("doom", 1500)]), {_, _, Msg,_Metadata} = pop(), ?assert(length(lists:flatten(Msg)) == 6035) end }, {"can't store invalid metadata", fun() -> ?assertEqual(ok, lager:md([{platypus, gravid}, {sloth, hirsute}, {duck, erroneous}])), ?assertError(badarg, lager:md({flamboyant, flamingo})), ?assertError(badarg, lager:md("zookeeper zephyr")), ok end }, {"dates should be local by default", fun() -> lager:warning("so long, and thanks for all the fish"), ?assertEqual(1, count()), {_Level, {_Date, Time}, _Message, _Metadata} = pop(), ?assertEqual(nomatch, binary:match(iolist_to_binary(Time), <<"UTC">>)), ok end }, {"dates should be UTC if SASL is configured as UTC", fun() -> application:set_env(sasl, utc_log, true), lager:warning("so long, and thanks for all the fish"), application:set_env(sasl, utc_log, false), ?assertEqual(1, count()), {_Level, {_Date, Time}, _Message, _Metadata} = pop(), ?assertNotEqual(nomatch, binary:match(iolist_to_binary(Time), <<"UTC">>)), ok end } ] }. extra_sinks_test_() -> {foreach, fun setup_sink/0, fun cleanup/1, [ {"observe that there is nothing up my sleeve", fun() -> ?assertEqual(undefined, pop(?TEST_SINK_EVENT)), ?assertEqual(0, count(?TEST_SINK_EVENT)) end }, {"logging works", fun() -> ?TEST_SINK_NAME:warning("test message"), ?assertEqual(1, count(?TEST_SINK_EVENT)), {Level, _Time, Message, _Metadata} = pop(?TEST_SINK_EVENT), ?assertMatch(Level, lager_util:level_to_num(warning)), ?assertEqual("test message", Message), ok end }, {"logging with arguments works", fun() -> ?TEST_SINK_NAME:warning("test message ~p", [self()]), ?assertEqual(1, count(?TEST_SINK_EVENT)), {Level, _Time, Message,_Metadata} = pop(?TEST_SINK_EVENT), ?assertMatch(Level, lager_util:level_to_num(warning)), ?assertEqual(lists:flatten(io_lib:format("test message ~p", [self()])), lists:flatten(Message)), ok end }, {"variables inplace of literals in logging statements work", fun() -> ?assertEqual(0, count(?TEST_SINK_EVENT)), Attr = [{a, alpha}, {b, beta}], Fmt = "format ~p", Args = [world], ?TEST_SINK_NAME:info(Attr, "hello"), ?TEST_SINK_NAME:info(Attr, "hello ~p", [world]), ?TEST_SINK_NAME:info(Fmt, [world]), ?TEST_SINK_NAME:info("hello ~p", Args), ?TEST_SINK_NAME:info(Attr, "hello ~p", Args), ?TEST_SINK_NAME:info([{d, delta}, {g, gamma}], Fmt, Args), ?assertEqual(6, count(?TEST_SINK_EVENT)), {_Level, _Time, Message, Metadata} = pop(?TEST_SINK_EVENT), ?assertMatch([{a, alpha}, {b, beta}|_], Metadata), ?assertEqual("hello", lists:flatten(Message)), {_Level, _Time2, Message2, _Metadata2} = pop(?TEST_SINK_EVENT), ?assertEqual("hello world", lists:flatten(Message2)), {_Level, _Time3, Message3, _Metadata3} = pop(?TEST_SINK_EVENT), ?assertEqual("format world", lists:flatten(Message3)), {_Level, _Time4, Message4, _Metadata4} = pop(?TEST_SINK_EVENT), ?assertEqual("hello world", lists:flatten(Message4)), {_Level, _Time5, Message5, _Metadata5} = pop(?TEST_SINK_EVENT), ?assertEqual("hello world", lists:flatten(Message5)), {_Level, _Time6, Message6, Metadata6} = pop(?TEST_SINK_EVENT), ?assertMatch([{d, delta}, {g, gamma}|_], Metadata6), ?assertEqual("format world", lists:flatten(Message6)), ok end }, {"stopped trace stops and removes its event handler - test sink (gh#267)", fun() -> Sink = ?TEST_SINK_EVENT, StartHandlers = gen_event:which_handlers(Sink), {_, T0} = lager_config:get({Sink, loglevel}), StartGlobal = lager_config:global_get(handlers), ?assertEqual([], T0), {ok, TestTrace1} = lager:trace_file("/tmp/test", [{sink, Sink}, {a,b}]), MidHandlers = gen_event:which_handlers(Sink), {ok, TestTrace2} = lager:trace_file("/tmp/test", [{sink, Sink}, {c,d}]), MidHandlers = gen_event:which_handlers(Sink), ?assertEqual(length(StartHandlers)+1, length(MidHandlers)), MidGlobal = lager_config:global_get(handlers), ?assertEqual(length(StartGlobal)+1, length(MidGlobal)), {_, T1} = lager_config:get({Sink, loglevel}), ?assertEqual(2, length(T1)), ok = lager:stop_trace(TestTrace1), {_, T2} = lager_config:get({Sink, loglevel}), ?assertEqual(1, length(T2)), ?assertEqual(length(StartHandlers)+1, length( gen_event:which_handlers(Sink))), ?assertEqual(length(StartGlobal)+1, length(lager_config:global_get(handlers))), ok = lager:stop_trace(TestTrace2), EndHandlers = gen_event:which_handlers(Sink), EndGlobal = lager_config:global_get(handlers), {_, T3} = lager_config:get({Sink, loglevel}), ?assertEqual([], T3), ?assertEqual(StartHandlers, EndHandlers), ?assertEqual(StartGlobal, EndGlobal), ok end }, {"log messages below the threshold are ignored", fun() -> ?assertEqual(0, count(?TEST_SINK_EVENT)), ?TEST_SINK_NAME:debug("this message will be ignored"), ?assertEqual(0, count(?TEST_SINK_EVENT)), ?assertEqual(0, count_ignored(?TEST_SINK_EVENT)), lager_config:set({?TEST_SINK_EVENT, loglevel}, {element(2, lager_util:config_to_mask(debug)), []}), ?TEST_SINK_NAME:debug("this message should be ignored"), ?assertEqual(0, count(?TEST_SINK_EVENT)), ?assertEqual(1, count_ignored(?TEST_SINK_EVENT)), lager:set_loglevel(?TEST_SINK_EVENT, ?MODULE, undefined, debug), ?assertEqual({?DEBUG bor ?INFO bor ?NOTICE bor ?WARNING bor ?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY, []}, lager_config:get({?TEST_SINK_EVENT, loglevel})), ?TEST_SINK_NAME:debug("this message should be logged"), ?assertEqual(1, count(?TEST_SINK_EVENT)), ?assertEqual(1, count_ignored(?TEST_SINK_EVENT)), ?assertEqual(debug, lager:get_loglevel(?TEST_SINK_EVENT, ?MODULE)), ok end } ] }. setup_sink() -> error_logger:tty(false), application:load(lager), application:set_env(lager, handlers, []), application:set_env(lager, error_logger_redirect, false), application:set_env(lager, extra_sinks, [{?TEST_SINK_EVENT, [{handlers, [{?MODULE, info}]}]}]), lager:start(), gen_event:call(lager_event, ?MODULE, flush), gen_event:call(?TEST_SINK_EVENT, ?MODULE, flush). setup() -> error_logger:tty(false), application:load(lager), application:set_env(lager, handlers, [{?MODULE, info}]), application:set_env(lager, error_logger_redirect, false), application:unset_env(lager, traces), lager:start(), %% There is a race condition between the application start up, lager logging its own %% start up condition and several tests that count messages or parse the output of %% tests. When the lager start up message wins the race, it causes these tests %% which parse output or count message arrivals to fail. %% %% We introduce a sleep here to allow `flush' to arrive *after* the start up %% message has been received and processed. %% %% This race condition was first exposed during the work on %% 4b5260c4524688b545cc12da6baa2dfa4f2afec9 which introduced the lager %% manager killer PR. application:set_env(lager, suppress_supervisor_start_stop, true), application:set_env(lager, suppress_application_start_stop, true), timer:sleep(1000), gen_event:call(lager_event, ?MODULE, flush). cleanup(_) -> catch ets:delete(lager_config), %% kill the ets config table with fire application:stop(lager), application:stop(goldrush), error_logger:tty(true). crash(Type) -> spawn(fun() -> gen_server:call(crash, Type) end), timer:sleep(100), _ = gen_event:which_handlers(error_logger), ok. test_body(Expected, Actual) -> ExLen = length(Expected), {Body, Rest} = case length(Actual) > ExLen of true -> {string:substr(Actual, 1, ExLen), string:substr(Actual, (ExLen + 1))}; _ -> {Actual, []} end, ?assertEqual(Expected, Body), % OTP-17 (and maybe later releases) may tack on additional info % about the failure, so if Actual starts with Expected (already % confirmed by having gotten past assertEqual above) and ends % with " line NNN" we can ignore what's in-between. By extension, % since there may not be line information appended at all, any % text we DO find is reportable, but not a test failure. case Rest of [] -> ok; _ -> % isolate the extra data and report it if it's not just % a line number indicator case re:run(Rest, "^.*( line \\d+)$", [{capture, [1]}]) of nomatch -> ?debugFmt( "Trailing data \"~s\" following \"~s\"", [Rest, Expected]); {match, [{0, _}]} -> % the whole sting is " line NNN" ok; {match, [{Off, _}]} -> ?debugFmt( "Trailing data \"~s\" following \"~s\"", [string:substr(Rest, 1, Off), Expected]) end end. error_logger_redirect_crash_setup() -> error_logger:tty(false), application:load(lager), application:set_env(lager, error_logger_redirect, true), application:set_env(lager, handlers, [{?MODULE, error}]), lager:start(), crash:start(), lager_event. error_logger_redirect_crash_setup_sink() -> error_logger:tty(false), application:load(lager), application:set_env(lager, error_logger_redirect, true), application:unset_env(lager, handlers), application:set_env(lager, extra_sinks, [ {error_logger_lager_event, [ {handlers, [{?MODULE, error}]}]}]), lager:start(), crash:start(), error_logger_lager_event. error_logger_redirect_crash_cleanup(_Sink) -> application:stop(lager), application:stop(goldrush), application:unset_env(lager, extra_sinks), case whereis(crash) of undefined -> ok; Pid -> exit(Pid, kill) end, error_logger:tty(true). crash_fsm_setup() -> error_logger:tty(false), application:load(lager), application:set_env(lager, error_logger_redirect, true), application:set_env(lager, handlers, [{?MODULE, error}]), lager:start(), crash_fsm:start(), crash_statem:start(), lager:log(error, self(), "flush flush"), timer:sleep(100), gen_event:call(lager_event, ?MODULE, flush), lager_event. crash_fsm_sink_setup() -> ErrorSink = error_logger_lager_event, error_logger:tty(false), application:load(lager), application:set_env(lager, error_logger_redirect, true), application:set_env(lager, handlers, []), application:set_env(lager, extra_sinks, [{ErrorSink, [{handlers, [{?MODULE, error}]}]}]), lager:start(), crash_fsm:start(), crash_statem:start(), lager:log(ErrorSink, error, self(), "flush flush", []), timer:sleep(100), flush(ErrorSink), ErrorSink. crash_fsm_cleanup(_Sink) -> application:stop(lager), application:stop(goldrush), application:unset_env(lager, extra_sinks), lists:foreach(fun(N) -> kill_crasher(N) end, [crash_fsm, crash_statem]), error_logger:tty(true). kill_crasher(RegName) -> case whereis(RegName) of undefined -> ok; Pid -> exit(Pid, kill) end. spawn_fsm_crash(Module, Function, Args) -> spawn(fun() -> erlang:apply(Module, Function, Args) end), timer:sleep(100), _ = gen_event:which_handlers(error_logger), ok. crash_fsm_test_() -> TestBody = fun(Name, FsmModule, FSMFunc, FSMArgs, Expected) -> fun(Sink) -> {Name, fun() -> case {FsmModule =:= crash_statem, lager_util:otp_version() < 19} of {true, true} -> ok; _ -> Pid = whereis(FsmModule), spawn_fsm_crash(FsmModule, FSMFunc, FSMArgs), {Level, _, Msg, Metadata} = pop(Sink), test_body(Expected, lists:flatten(Msg)), ?assertEqual(Pid, proplists:get_value(pid, Metadata)), ?assertEqual(lager_util:level_to_num(error), Level) end end } end end, Tests = [ fun(Sink) -> {"again, there is nothing up my sleeve", fun() -> ?assertEqual(undefined, pop(Sink)), ?assertEqual(0, count(Sink)) end } end, TestBody("gen_fsm crash", crash_fsm, crash, [], "gen_fsm crash_fsm in state state1 terminated with reason: call to undefined function crash_fsm:state1/3 from gen_fsm:handle_msg/"), TestBody("gen_statem crash", crash_statem, crash, [], "gen_statem crash_statem in state state1 terminated with reason: no function clause matching crash_statem:handle"), TestBody("gen_statem stop", crash_statem, stop, [explode], "gen_statem crash_statem in state state1 terminated with reason: explode"), TestBody("gen_statem timeout", crash_statem, timeout, [], "gen_statem crash_statem in state state1 terminated with reason: timeout") ], {"FSM crash output tests", [ {"Default sink", {foreach, fun crash_fsm_setup/0, fun crash_fsm_cleanup/1, Tests}}, {"Error logger sink", {foreach, fun crash_fsm_sink_setup/0, fun crash_fsm_cleanup/1, Tests}} ]}. error_logger_redirect_crash_test_() -> TestBody=fun(Name,CrashReason,Expected) -> fun(Sink) -> {Name, fun() -> Pid = whereis(crash), crash(CrashReason), {Level, _, Msg,Metadata} = pop(Sink), test_body(Expected, lists:flatten(Msg)), ?assertEqual(Pid,proplists:get_value(pid,Metadata)), ?assertEqual(lager_util:level_to_num(error),Level) end } end end, Tests = [ fun(Sink) -> {"again, there is nothing up my sleeve", fun() -> ?assertEqual(undefined, pop(Sink)), ?assertEqual(0, count(Sink)) end } end, TestBody("bad return value",bad_return,"gen_server crash terminated with reason: bad return value: bleh"), TestBody("bad return value with string",bad_return_string,"gen_server crash terminated with reason: bad return value: {tuple,{tuple,\"string\"}}"), TestBody("bad return uncaught throw",throw,"gen_server crash terminated with reason: bad return value: a_ball"), TestBody("case clause",case_clause,"gen_server crash terminated with reason: no case clause matching {} in crash:handle_call/3"), TestBody("case clause string",case_clause_string,"gen_server crash terminated with reason: no case clause matching \"crash\" in crash:handle_call/3"), TestBody("function clause",function_clause,"gen_server crash terminated with reason: no function clause matching crash:function({})"), TestBody("if clause",if_clause,"gen_server crash terminated with reason: no true branch found while evaluating if expression in crash:handle_call/3"), TestBody("try clause",try_clause,"gen_server crash terminated with reason: no try clause matching [] in crash:handle_call/3"), TestBody("undefined function",undef,"gen_server crash terminated with reason: call to undefined function crash:booger/0 from crash:handle_call/3"), TestBody("bad math",badarith,"gen_server crash terminated with reason: bad arithmetic expression in crash:handle_call/3"), TestBody("bad match",badmatch,"gen_server crash terminated with reason: no match of right hand value {} in crash:handle_call/3"), TestBody("bad arity",badarity,"gen_server crash terminated with reason: fun called with wrong arity of 1 instead of 3 in crash:handle_call/3"), TestBody("bad arg1",badarg1,"gen_server crash terminated with reason: bad argument in crash:handle_call/3"), TestBody("bad arg2",badarg2,"gen_server crash terminated with reason: bad argument in call to erlang:iolist_to_binary([\"foo\",bar]) in crash:handle_call/3"), TestBody("bad record",badrecord,"gen_server crash terminated with reason: bad record state in crash:handle_call/3"), TestBody("noproc",noproc,"gen_server crash terminated with reason: no such process or port in call to gen_event:call(foo, bar, baz)"), TestBody("noproc_proc_lib",noproc_proc_lib,"gen_server crash terminated with reason: no such process or port in call to proc_lib:stop/3"), TestBody("badfun",badfun,"gen_server crash terminated with reason: bad function booger in crash:handle_call/3") ], {"Error logger redirect crash", [ {"Redirect to default sink", {foreach, fun error_logger_redirect_crash_setup/0, fun error_logger_redirect_crash_cleanup/1, Tests}}, {"Redirect to error_logger_lager_event sink", {foreach, fun error_logger_redirect_crash_setup_sink/0, fun error_logger_redirect_crash_cleanup/1, Tests}} ]}. error_logger_redirect_setup() -> error_logger:tty(false), application:load(lager), application:set_env(lager, error_logger_redirect, true), application:set_env(lager, handlers, [{?MODULE, info}]), application:set_env(lager, suppress_supervisor_start_stop, false), application:set_env(lager, suppress_application_start_stop, false), lager:start(), lager:log(error, self(), "flush flush"), timer:sleep(1000), gen_event:call(lager_event, ?MODULE, flush), lager_event. error_logger_redirect_setup_sink() -> error_logger:tty(false), application:load(lager), application:set_env(lager, error_logger_redirect, true), application:unset_env(lager, handlers), application:set_env(lager, extra_sinks, [ {error_logger_lager_event, [ {handlers, [{?MODULE, info}]}]}]), application:set_env(lager, suppress_supervisor_start_stop, false), application:set_env(lager, suppress_application_start_stop, false), lager:start(), lager:log(error_logger_lager_event, error, self(), "flush flush", []), timer:sleep(1000), gen_event:call(error_logger_lager_event, ?MODULE, flush), error_logger_lager_event. error_logger_redirect_cleanup(_) -> application:stop(lager), application:stop(goldrush), application:unset_env(lager, extra_sinks), error_logger:tty(true). error_logger_redirect_test_() -> Tests = [ {"error reports are printed", fun(Sink) -> sync_error_logger:error_report([{this, is}, a, {silly, format}]), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(error),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), Expected = "this: is, a, silly: format", ?assertEqual(Expected, lists:flatten(Msg)) end }, {"string error reports are printed", fun(Sink) -> sync_error_logger:error_report("this is less silly"), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(error),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), Expected = "this is less silly", ?assertEqual(Expected, lists:flatten(Msg)) end }, {"error messages are printed", fun(Sink) -> sync_error_logger:error_msg("doom, doom has come upon you all"), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(error),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), Expected = "doom, doom has come upon you all", ?assertEqual(Expected, lists:flatten(Msg)) end }, {"error messages with unicode characters in Args are printed", fun(Sink) -> sync_error_logger:error_msg("~ts", ["Привет!"]), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(error),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), ?assertEqual("Привет!", lists:flatten(Msg)) end }, {"error messages are truncated at 4096 characters", fun(Sink) -> sync_error_logger:error_msg("doom, doom has come upon you all ~p", [string:copies("doom", 10000)]), _ = gen_event:which_handlers(error_logger), {_, _, Msg,_Metadata} = pop(Sink), ?assert(length(lists:flatten(Msg)) < 5100) end }, {"info reports are printed", fun(Sink) -> sync_error_logger:info_report([{this, is}, a, {silly, format}]), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(info),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), Expected = "this: is, a, silly: format", ?assertEqual(Expected, lists:flatten(Msg)) end }, {"info reports are truncated at 4096 characters", fun(Sink) -> sync_error_logger:info_report([[{this, is}, a, {silly, format}] || _ <- lists:seq(0, 600)]), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(info),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), ?assert(length(lists:flatten(Msg)) < 5000) end }, {"single term info reports are printed", fun(Sink) -> sync_error_logger:info_report({foolish, bees}), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(info),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), ?assertEqual("{foolish,bees}", lists:flatten(Msg)) end }, {"single term error reports are printed", fun(Sink) -> sync_error_logger:error_report({foolish, bees}), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(error),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), ?assertEqual("{foolish,bees}", lists:flatten(Msg)) end }, {"string info reports are printed", fun(Sink) -> sync_error_logger:info_report("this is less silly"), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(info),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), ?assertEqual("this is less silly", lists:flatten(Msg)) end }, {"string info reports are truncated at 4096 characters", fun(Sink) -> sync_error_logger:info_report(string:copies("this is less silly", 1000)), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(info),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), ?assert(length(lists:flatten(Msg)) < 5100) end }, {"strings in a mixed report are printed as strings", fun(Sink) -> sync_error_logger:info_report(["this is less silly", {than, "this"}]), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(info),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), ?assertEqual("\"this is less silly\", than: \"this\"", lists:flatten(Msg)) end }, {"info messages are printed", fun(Sink) -> sync_error_logger:info_msg("doom, doom has come upon you all"), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(info),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), ?assertEqual("doom, doom has come upon you all", lists:flatten(Msg)) end }, {"info messages are truncated at 4096 characters", fun(Sink) -> sync_error_logger:info_msg("doom, doom has come upon you all ~p", [string:copies("doom", 10000)]), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(info),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), ?assert(length(lists:flatten(Msg)) < 5100) end }, {"info messages with unicode characters in Args are printed", fun(Sink) -> sync_error_logger:info_msg("~ts", ["Привет!"]), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(info),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), ?assertEqual("Привет!", lists:flatten(Msg)) end }, {"warning messages with unicode characters in Args are printed", %% The next 4 tests need to store the current value of %% `error_logger:warning_map/0' into a process dictionary %% key `warning_map' so that the error message level used %% to process the log messages will match what lager %% expects. %% %% The atom returned by `error_logger:warning_map/0' %% changed between OTP 17 and 18 (and later releases) %% %% `warning_map' is consumed in the `test/sync_error_logger.erl' %% module. The default message level used in sync_error_logger %% was fine for OTP releases through 17 and then broke %% when 18 was released. By storing the expected value %% in the process dictionary, sync_error_logger will %% use the correct message level to process the %% messages and these tests will no longer %% break. fun(Sink) -> Lvl = error_logger:warning_map(), put(warning_map, Lvl), sync_error_logger:warning_msg("~ts", ["Привет!"]), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(Lvl),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), ?assertEqual("Привет!", lists:flatten(Msg)) end }, {"warning messages are printed at the correct level", fun(Sink) -> Lvl = error_logger:warning_map(), put(warning_map, Lvl), sync_error_logger:warning_msg("doom, doom has come upon you all"), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(Lvl),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), ?assertEqual("doom, doom has come upon you all", lists:flatten(Msg)) end }, {"warning reports are printed at the correct level", fun(Sink) -> Lvl = error_logger:warning_map(), put(warning_map, Lvl), sync_error_logger:warning_report([{i, like}, pie]), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(Lvl),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), ?assertEqual("i: like, pie", lists:flatten(Msg)) end }, {"single term warning reports are printed at the correct level", fun(Sink) -> Lvl = error_logger:warning_map(), put(warning_map, Lvl), sync_error_logger:warning_report({foolish, bees}), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(Lvl),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), ?assertEqual("{foolish,bees}", lists:flatten(Msg)) end }, {"application stop reports", fun(Sink) -> sync_error_logger:info_report([{application, foo}, {exited, quittin_time}, {type, lazy}]), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(info),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), ?assertEqual("Application foo exited with reason: quittin_time", lists:flatten(Msg)) end }, {"supervisor reports", fun(Sink) -> sync_error_logger:error_report(supervisor_report, [{errorContext, france}, {offender, [{name, mini_steve}, {mfargs, {a, b, [c]}}, {pid, bleh}]}, {reason, fired}, {supervisor, {local, steve}}]), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(error),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), ?assertEqual("Supervisor steve had child mini_steve started with a:b(c) at bleh exit with reason fired in context france", lists:flatten(Msg)) end }, {"supervisor reports with real error", fun(Sink) -> sync_error_logger:error_report(supervisor_report, [{errorContext, france}, {offender, [{name, mini_steve}, {mfargs, {a, b, [c]}}, {pid, bleh}]}, {reason, {function_clause,[{crash,handle_info,[foo]}]}}, {supervisor, {local, steve}}]), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(error),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), ?assertEqual("Supervisor steve had child mini_steve started with a:b(c) at bleh exit with reason no function clause matching crash:handle_info(foo) in context france", lists:flatten(Msg)) end }, {"supervisor reports with real error and pid", fun(Sink) -> sync_error_logger:error_report(supervisor_report, [{errorContext, france}, {offender, [{name, mini_steve}, {mfargs, {a, b, [c]}}, {pid, bleh}]}, {reason, {function_clause,[{crash,handle_info,[foo]}]}}, {supervisor, somepid}]), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(error),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), ?assertEqual("Supervisor somepid had child mini_steve started with a:b(c) at bleh exit with reason no function clause matching crash:handle_info(foo) in context france", lists:flatten(Msg)) end }, {"supervisor_bridge reports", fun(Sink) -> sync_error_logger:error_report(supervisor_report, [{errorContext, france}, {offender, [{mod, mini_steve}, {pid, bleh}]}, {reason, fired}, {supervisor, {local, steve}}]), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(error),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), ?assertEqual("Supervisor steve had child at module mini_steve at bleh exit with reason fired in context france", lists:flatten(Msg)) end }, {"application progress report", fun(Sink) -> sync_error_logger:info_report(progress, [{application, foo}, {started_at, node()}]), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(info),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), Expected = lists:flatten(io_lib:format("Application foo started on node ~w", [node()])), ?assertEqual(Expected, lists:flatten(Msg)) end }, {"supervisor progress report", fun(Sink) -> lager:set_loglevel(Sink, ?MODULE, undefined, debug), ?assertEqual({?DEBUG bor ?INFO bor ?NOTICE bor ?WARNING bor ?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY, []}, lager_config:get({Sink, loglevel})), sync_error_logger:info_report(progress, [{supervisor, {local, foo}}, {started, [{mfargs, {foo, bar, 1}}, {pid, baz}]}]), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(debug),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), ?assertEqual("Supervisor foo started foo:bar/1 at pid baz", lists:flatten(Msg)) end }, {"supervisor progress report with pid", fun(Sink) -> lager:set_loglevel(Sink, ?MODULE, undefined, debug), ?assertEqual({?DEBUG bor ?INFO bor ?NOTICE bor ?WARNING bor ?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY, []}, lager_config:get({Sink, loglevel})), sync_error_logger:info_report(progress, [{supervisor, somepid}, {started, [{mfargs, {foo, bar, 1}}, {pid, baz}]}]), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(debug),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), ?assertEqual("Supervisor somepid started foo:bar/1 at pid baz", lists:flatten(Msg)) end }, {"crash report for emfile", fun(Sink) -> sync_error_logger:error_report(crash_report, [[{pid, self()}, {registered_name, []}, {error_info, {error, emfile, [{stack, trace, 1}]}}], []]), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(error),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), Expected = lists:flatten(io_lib:format("CRASH REPORT Process ~w with 0 neighbours crashed with reason: maximum number of file descriptors exhausted, check ulimit -n", [self()])), ?assertEqual(Expected, lists:flatten(Msg)) end }, {"crash report for system process limit", fun(Sink) -> sync_error_logger:error_report(crash_report, [[{pid, self()}, {registered_name, []}, {error_info, {error, system_limit, [{erlang, spawn, 1}]}}], []]), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(error),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), Expected = lists:flatten(io_lib:format("CRASH REPORT Process ~w with 0 neighbours crashed with reason: system limit: maximum number of processes exceeded", [self()])), ?assertEqual(Expected, lists:flatten(Msg)) end }, {"crash report for system process limit2", fun(Sink) -> sync_error_logger:error_report(crash_report, [[{pid, self()}, {registered_name, []}, {error_info, {error, system_limit, [{erlang, spawn_opt, 1}]}}], []]), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(error),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), Expected = lists:flatten(io_lib:format("CRASH REPORT Process ~w with 0 neighbours crashed with reason: system limit: maximum number of processes exceeded", [self()])), ?assertEqual(Expected, lists:flatten(Msg)) end }, {"crash report for system port limit", fun(Sink) -> sync_error_logger:error_report(crash_report, [[{pid, self()}, {registered_name, []}, {error_info, {error, system_limit, [{erlang, open_port, 1}]}}], []]), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(error),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), Expected = lists:flatten(io_lib:format("CRASH REPORT Process ~w with 0 neighbours crashed with reason: system limit: maximum number of ports exceeded", [self()])), ?assertEqual(Expected, lists:flatten(Msg)) end }, {"crash report for system port limit", fun(Sink) -> sync_error_logger:error_report(crash_report, [[{pid, self()}, {registered_name, []}, {error_info, {error, system_limit, [{erlang, list_to_atom, 1}]}}], []]), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(error),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), Expected = lists:flatten(io_lib:format("CRASH REPORT Process ~w with 0 neighbours crashed with reason: system limit: tried to create an atom larger than 255, or maximum atom count exceeded", [self()])), ?assertEqual(Expected, lists:flatten(Msg)) end }, {"crash report for system ets table limit", fun(Sink) -> sync_error_logger:error_report(crash_report, [[{pid, self()}, {registered_name, test}, {error_info, {error, system_limit, [{ets,new,[segment_offsets,[ordered_set,public]]},{mi_segment,open_write,1},{mi_buffer_converter,handle_cast,2},{gen_server,handle_msg,5},{proc_lib,init_p_do_apply,3}]}}], []]), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(error),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), Expected = lists:flatten(io_lib:format("CRASH REPORT Process ~w with 0 neighbours crashed with reason: system limit: maximum number of ETS tables exceeded", [test])), ?assertEqual(Expected, lists:flatten(Msg)) end }, {"crash report for unknown system limit should be truncated at 500 characters", fun(Sink) -> sync_error_logger:error_report(crash_report, [[{pid, self()}, {error_info, {error, system_limit, [{wtf,boom,[string:copies("aaaa", 4096)]}]}}], []]), _ = gen_event:which_handlers(error_logger), {_, _, Msg,_Metadata} = pop(Sink), ?assert(length(lists:flatten(Msg)) > 550), ?assert(length(lists:flatten(Msg)) < 600) end }, {"crash reports for 'special processes' should be handled right - function_clause", fun(Sink) -> {ok, Pid} = special_process:start(), unlink(Pid), Pid ! function_clause, timer:sleep(500), _ = gen_event:which_handlers(error_logger), {_, _, Msg, _Metadata} = pop(Sink), Expected = lists:flatten(io_lib:format("CRASH REPORT Process ~p with 0 neighbours crashed with reason: no function clause matching special_process:foo(bar)", [Pid])), test_body(Expected, lists:flatten(Msg)) end }, {"crash reports for 'special processes' should be handled right - case_clause", fun(Sink) -> {ok, Pid} = special_process:start(), unlink(Pid), Pid ! {case_clause, wtf}, timer:sleep(500), _ = gen_event:which_handlers(error_logger), {_, _, Msg, _Metadata} = pop(Sink), Expected = lists:flatten(io_lib:format("CRASH REPORT Process ~p with 0 neighbours crashed with reason: no case clause matching wtf in special_process:loop/0", [Pid])), test_body(Expected, lists:flatten(Msg)) end }, {"crash reports for 'special processes' should be handled right - exit", fun(Sink) -> {ok, Pid} = special_process:start(), unlink(Pid), Pid ! exit, timer:sleep(500), _ = gen_event:which_handlers(error_logger), {_, _, Msg, _Metadata} = pop(Sink), Expected = lists:flatten(io_lib:format("CRASH REPORT Process ~p with 0 neighbours exited with reason: byebye in special_process:loop/0", [Pid])), test_body(Expected, lists:flatten(Msg)) end }, {"crash reports for 'special processes' should be handled right - error", fun(Sink) -> {ok, Pid} = special_process:start(), unlink(Pid), Pid ! error, timer:sleep(500), _ = gen_event:which_handlers(error_logger), {_, _, Msg, _Metadata} = pop(Sink), Expected = lists:flatten(io_lib:format("CRASH REPORT Process ~p with 0 neighbours crashed with reason: mybad in special_process:loop/0", [Pid])), test_body(Expected, lists:flatten(Msg)) end }, {"webmachine error reports", fun(Sink) -> Path = "/cgi-bin/phpmyadmin", Reason = {error,{error,{badmatch,{error,timeout}}, [{myapp,dostuff,2,[{file,"src/myapp.erl"},{line,123}]}, {webmachine_resource,resource_call,3,[{file,"src/webmachine_resource.erl"},{line,169}]}]}}, sync_error_logger:error_msg("webmachine error: path=~p~n~p~n", [Path, Reason]), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(error),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), ?assertEqual("Webmachine error at path \"/cgi-bin/phpmyadmin\" : no match of right hand value {error,timeout} in myapp:dostuff/2 line 123", lists:flatten(Msg)) end }, {"Cowboy error reports, 8 arg version", fun(Sink) -> Stack = [{my_handler,init, 3,[{file,"src/my_handler.erl"},{line,123}]}, {cowboy_handler,handler_init,4,[{file,"src/cowboy_handler.erl"},{line,169}]}], sync_error_logger:error_msg( "** Cowboy handler ~p terminating in ~p/~p~n" " for the reason ~p:~p~n" "** Options were ~p~n" "** Request was ~p~n" "** Stacktrace: ~p~n~n", [my_handler, init, 3, error, {badmatch, {error, timeout}}, [], "Request", Stack]), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(error),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), ?assertEqual("Cowboy handler my_handler terminated in my_handler:init/3 with reason: no match of right hand value {error,timeout} in my_handler:init/3 line 123", lists:flatten(Msg)) end }, {"Cowboy error reports, 10 arg version", fun(Sink) -> Stack = [{my_handler,somecallback, 3,[{file,"src/my_handler.erl"},{line,123}]}, {cowboy_handler,handler_init,4,[{file,"src/cowboy_handler.erl"},{line,169}]}], sync_error_logger:error_msg( "** Cowboy handler ~p terminating in ~p/~p~n" " for the reason ~p:~p~n** Message was ~p~n" "** Options were ~p~n** Handler state was ~p~n" "** Request was ~p~n** Stacktrace: ~p~n~n", [my_handler, somecallback, 3, error, {badmatch, {error, timeout}}, hello, [], {}, "Request", Stack]), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(error),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), ?assertEqual("Cowboy handler my_handler terminated in my_handler:somecallback/3 with reason: no match of right hand value {error,timeout} in my_handler:somecallback/3 line 123", lists:flatten(Msg)) end }, {"Cowboy error reports, 5 arg version", fun(Sink) -> sync_error_logger:error_msg( "** Cowboy handler ~p terminating; " "function ~p/~p was not exported~n" "** Request was ~p~n** State was ~p~n~n", [my_handler, to_json, 2, "Request", {}]), _ = gen_event:which_handlers(error_logger), {Level, _, Msg,Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(error),Level), ?assertEqual(self(),proplists:get_value(pid,Metadata)), ?assertEqual("Cowboy handler my_handler terminated with reason: call to undefined function my_handler:to_json/2", lists:flatten(Msg)) end }, {"Cowboy error reports, 6 arg version", fun(Sink) -> Stack = [{app_http, init, 2, [{file, "app_http.erl"}, {line,9}]}, {cowboy_handler, execute, 2, [{file, "cowboy_handler.erl"}, {line, 41}]}], ConnectionPid = list_to_pid("<0.82.0>"), sync_error_logger:error_msg( "Ranch listener ~p, connection process ~p, stream ~p " "had its request process ~p exit with reason " "~999999p and stacktrace ~999999p~n", [my_listner, ConnectionPid, 1, self(), {badmatch, 2}, Stack]), _ = gen_event:which_handlers(error_logger), {Level, _, Msg, Metadata} = pop(Sink), ?assertEqual(lager_util:level_to_num(error), Level), ?assertEqual(self(), proplists:get_value(pid, Metadata)), ?assertEqual("Cowboy stream 1 with ranch listener my_listner and " "connection process <0.82.0> had its request process exit " "with reason: no match of right hand value 2 " "in app_http:init/2 line 9", lists:flatten(Msg)) end }, {"messages should not be generated if they don't satisfy the threshold", fun(Sink) -> lager:set_loglevel(Sink, ?MODULE, undefined, error), ?assertEqual({?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY, []}, lager_config:get({Sink, loglevel})), sync_error_logger:info_report([hello, world]), _ = gen_event:which_handlers(error_logger), ?assertEqual(0, count(Sink)), ?assertEqual(0, count_ignored(Sink)), lager:set_loglevel(Sink, ?MODULE, undefined, info), ?assertEqual({?INFO bor ?NOTICE bor ?WARNING bor ?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY, []}, lager_config:get({Sink, loglevel})), sync_error_logger:info_report([hello, world]), _ = gen_event:which_handlers(error_logger), ?assertEqual(1, count(Sink)), ?assertEqual(0, count_ignored(Sink)), lager:set_loglevel(Sink, ?MODULE, undefined, error), ?assertEqual({?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY, []}, lager_config:get({Sink, loglevel})), lager_config:set({Sink, loglevel}, {element(2, lager_util:config_to_mask(debug)), []}), sync_error_logger:info_report([hello, world]), _ = gen_event:which_handlers(error_logger), ?assertEqual(1, count(Sink)), ?assertEqual(1, count_ignored(Sink)) end } ], SinkTests = lists:map( fun({Name, F}) -> fun(Sink) -> {Name, fun() -> F(Sink) end} end end, Tests), {"Error logger redirect", [ {"Redirect to default sink", {foreach, fun error_logger_redirect_setup/0, fun error_logger_redirect_cleanup/1, SinkTests}}, {"Redirect to error_logger_lager_event sink", {foreach, fun error_logger_redirect_setup_sink/0, fun error_logger_redirect_cleanup/1, SinkTests}} ]}. safe_format_test() -> ?assertEqual("foo bar", lists:flatten(lager:safe_format("~p ~p", [foo, bar], 1024))), ?assertEqual("FORMAT ERROR: \"~p ~p ~p\" [foo,bar]", lists:flatten(lager:safe_format("~p ~p ~p", [foo, bar], 1024))), ok. unsafe_format_test() -> ?assertEqual("foo bar", lists:flatten(lager:unsafe_format("~p ~p", [foo, bar]))), ?assertEqual("FORMAT ERROR: \"~p ~p ~p\" [foo,bar]", lists:flatten(lager:unsafe_format("~p ~p ~p", [foo, bar]))), ok. async_threshold_test_() -> Cleanup = fun(Reset) -> _ = error_logger:tty(false), _ = application:stop(lager), _ = application:stop(goldrush), _ = application:unset_env(lager, async_threshold), if Reset -> true = ets:delete(async_threshold_test), error_logger:tty(true); true -> _ = (catch ets:delete(async_threshold_test)), ok end end, Setup = fun() -> % Evidence suggests that previous tests somewhere are leaving some of this stuff % loaded, and cleaning it out forcefully to allows the test to succeed. _ = Cleanup(false), _ = ets:new(async_threshold_test, [set, named_table, public]), ?assertEqual(true, ets:insert_new(async_threshold_test, {sync_toggled, 0})), ?assertEqual(true, ets:insert_new(async_threshold_test, {async_toggled, 0})), _ = application:load(lager), ok = application:set_env(lager, error_logger_redirect, false), ok = application:set_env(lager, async_threshold, 2), ok = application:set_env(lager, async_threshold_window, 1), ok = application:set_env(lager, handlers, [{?MODULE, info}]), ok = lager:start(), true end, {foreach, Setup, Cleanup, [ {"async threshold works", {timeout, 30, fun() -> Sleep = get_long_sleep_value(), %% we start out async ?assertEqual(true, lager_config:get(async)), ?assertEqual([{sync_toggled, 0}], ets:lookup(async_threshold_test, sync_toggled)), %% put a ton of things in the queue WorkCnt = erlang:max(10, (erlang:system_info(schedulers) * 2)), OtpVsn = lager_util:otp_version(), % newer OTPs *may* handle the messages faster, so we'll send more MsgCnt = ((OtpVsn * OtpVsn) div 2), Workers = spawn_stuffers(WorkCnt, [MsgCnt, info, "hello world"], []), %% serialize on mailbox _ = gen_event:which_handlers(lager_event), timer:sleep(Sleep), %% By now the flood of messages should have forced the backend throttle %% to turn off async mode, but it's possible all outstanding requests %% have been processed, so checking the current status (sync or async) %% is an exercise in race control. %% Instead, we'll see whether the backend throttle has toggled into sync %% mode at any point in the past. ?assertMatch([{sync_toggled, N}] when N > 0, ets:lookup(async_threshold_test, sync_toggled)), %% Wait for all the workers to return, meaning that all the messages have %% been logged (since we're definitely in sync mode at the end of the run). collect_workers(Workers), %% serialize on the mailbox again _ = gen_event:which_handlers(lager_event), timer:sleep(Sleep), lager:info("hello world"), _ = gen_event:which_handlers(lager_event), timer:sleep(Sleep), %% async is true again now that the mailbox has drained ?assertEqual(true, lager_config:get(async)), ok end}} ]}. % Fire off the stuffers with minimal resource overhead - speed is of the essence. spawn_stuffers(0, _, Refs) -> % Attempt to return them in about the order that they'll finish. lists:reverse(Refs); spawn_stuffers(N, Args, Refs) -> {_Pid, Ref} = erlang:spawn_monitor(?MODULE, message_stuffer, Args), spawn_stuffers((N - 1), Args, [Ref | Refs]). % Spawned process to stuff N copies of Message into lager's message queue as fast as possible. % Skip using a list function for speed and low memory footprint - don't want to take the % resources to create a sequence (or pass one in). message_stuffer(N, Level, Message) -> message_stuffer_(N, Level, [{pid, erlang:self()}], Message). message_stuffer_(0, _, _, _) -> ok; message_stuffer_(N, Level, Meta, Message) -> lager:log(Level, Meta, Message), message_stuffer_((N - 1), Level, Meta, Message). collect_workers([]) -> ok; collect_workers([Ref | Refs]) -> receive {'DOWN', Ref, _, _, _} -> collect_workers(Refs) end. produce_n_error_logger_msgs(N) -> lists:foreach(fun (K) -> error_logger:error_msg("Foo ~p!", [K]) end, lists:seq(0, N-1) ). high_watermark_test_() -> {foreach, fun() -> error_logger:tty(false), application:load(lager), application:set_env(lager, error_logger_redirect, true), application:set_env(lager, handlers, [{lager_test_backend, info}]), application:set_env(lager, async_threshold, undefined), lager:start() end, fun(_) -> application:stop(lager), error_logger:tty(true) end, [ {"Nothing dropped when error_logger high watermark is undefined", fun () -> ok = error_logger_lager_h:set_high_water(undefined), timer:sleep(100), produce_n_error_logger_msgs(10), timer:sleep(500), ?assert(count() >= 10) end }, {"Mostly dropped according to error_logger high watermark", fun () -> ok = error_logger_lager_h:set_high_water(5), timer:sleep(100), produce_n_error_logger_msgs(50), timer:sleep(1000), ?assert(count() < 20) end }, {"Non-notifications are not dropped", fun () -> ok = error_logger_lager_h:set_high_water(2), timer:sleep(100), spawn(fun () -> produce_n_error_logger_msgs(300) end), timer:sleep(50), %% if everything were dropped, this call would be dropped %% too, so lets hope it's not ?assert(is_integer(count())), timer:sleep(1000), ?assert(count() < 10) end } ] }. get_long_sleep_value() -> case os:getenv("CI") of false -> 500; _ -> 5000 end. -endif. lager-3.8.0/test/lager_crash_backend.erl0000644000232200023220000000401513523436621020573 0ustar debalancedebalance%% Copyright (c) 2011-2012 Basho Technologies, Inc. All Rights Reserved. %% %% This file is provided to you under the Apache License, %% Version 2.0 (the "License"); you may not use this file %% except in compliance with the License. You may obtain %% a copy of the License at %% %% http://www.apache.org/licenses/LICENSE-2.0 %% %% Unless required by applicable law or agreed to in writing, %% software distributed under the License is distributed on an %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY %% KIND, either express or implied. See the License for the %% specific language governing permissions and limitations %% under the License. -module(lager_crash_backend). -include("lager.hrl"). -behaviour(gen_event). -export([init/1, handle_call/2, handle_event/2, handle_info/2, terminate/2, code_change/3]). -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). -endif. init([CrashBefore, CrashAfter]) -> case is_tuple(CrashBefore) andalso (timer:now_diff(CrashBefore, os:timestamp()) > 0) of true -> %?debugFmt("crashing!~n", []), {error, crashed}; _ -> %?debugFmt("Not crashing!~n", []), case is_tuple(CrashAfter) of true -> CrashTime = timer:now_diff(CrashAfter, os:timestamp()) div 1000, case CrashTime > 0 of true -> %?debugFmt("crashing in ~p~n", [CrashTime]), erlang:send_after(CrashTime, self(), crash), {ok, {}}; _ -> {error, crashed} end; _ -> {ok, {}} end end. handle_call(_Request, State) -> {ok, ok, State}. handle_event(_Event, State) -> {ok, State}. handle_info(crash, _State) -> %?debugFmt("Time to crash!~n", []), crash; handle_info(_Info, State) -> {ok, State}. terminate(_Reason, _State) -> ok. code_change(_OldVsn, State, _Extra) -> {ok, State}. lager-3.8.0/test/lager_app_tests.erl0000644000232200023220000000054313523436621020030 0ustar debalancedebalance-module(lager_app_tests). -compile([{parse_transform, lager_transform}]). -include_lib("eunit/include/eunit.hrl"). get_env_test() -> application:set_env(myapp, mykey1, <<"Value">>), ?assertEqual(<<"Some">>, lager_app:get_env(myapp, mykey0, <<"Some">>)), ?assertEqual(<<"Value">>, lager_app:get_env(myapp, mykey1, <<"Some">>)), ok. lager-3.8.0/test/crash_statem.erl0000644000232200023220000000240213523436621017325 0ustar debalancedebalance-module(crash_statem). %% we're only going to compile this on OTP 19+ -ifdef(test_statem). -behaviour(gen_statem). -export([ start/0, crash/0, stop/1, timeout/0, handle_event/4 ]). -export([terminate/3,code_change/4,init/1,callback_mode/0]). start() -> gen_statem:start({local,?MODULE}, ?MODULE, [], []). crash() -> gen_statem:call(?MODULE, boom). stop(Reason) -> gen_statem:call(?MODULE, {stop, Reason}). timeout() -> gen_statem:call(?MODULE, timeout). %% Mandatory callback functions terminate(_Reason, _State, _Data) -> ok. code_change(_Vsn, State, Data, _Extra) -> {ok,State,Data}. init([]) -> %% insert rant here about breaking changes in minor versions... case erlang:system_info(version) of "8.0" -> {callback_mode(),state1,undefined}; _ -> {ok, state1, undefined} end. callback_mode() -> handle_event_function. %%% state callback(s) handle_event(state_timeout, timeout, state1, _) -> {stop, timeout}; handle_event({call, _From}, timeout, _Arg, _Data) -> {keep_state_and_data, [{state_timeout, 0, timeout}]}; handle_event({call, _From}, {stop, Reason}, state1, _Data) -> {stop, Reason}. -else. -export([start/0, crash/0]). start() -> ok. crash() -> ok. -endif. lager-3.8.0/test/lager_test_function_transform.erl0000644000232200023220000002022113523436621023000 0ustar debalancedebalance%% ------------------------------------------------------------------- %% %% Copyright (c) 2011-2017 Basho Technologies, Inc. %% %% This file is provided to you under the Apache License, %% Version 2.0 (the "License"); you may not use this file %% except in compliance with the License. You may obtain %% a copy of the License at %% %% http://www.apache.org/licenses/LICENSE-2.0 %% %% Unless required by applicable law or agreed to in writing, %% software distributed under the License is distributed on an %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY %% KIND, either express or implied. See the License for the %% specific language governing permissions and limitations %% under the License. %% %% ------------------------------------------------------------------- -module(lager_test_function_transform). -include("lager.hrl"). -compile([{nowarn_deprecated_function, [{erlang, now, 0}]}]). -lager_function_transforms([ {returns_static_emit, on_emit, {lager_test_function_transform, transform_static}}, {returns_dynamic_emit, on_emit, {lager_test_function_transform, transform_dynamic}}, {returns_undefined_emit, on_emit, {not_real_module_fake, fake_not_real_function}}, {returns_static_log, on_log, {lager_test_function_transform, transform_static}}, {returns_dynamic_log, on_log, {lager_test_function_transform, transform_dynamic}} ]). -compile({parse_transform, lager_transform}). -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). -export([ transform_static/0, transform_dynamic/0 ]). -endif. -ifdef(TEST). transform_static() -> static_result. transform_dynamic() -> case lager_util:otp_version() >= 18 of true -> erlang:monotonic_time(); false -> erlang:now() end. not_running_test() -> ?assertEqual({error, lager_not_running}, lager:log(info, self(), "not running")). setup() -> ok = error_logger:tty(false), ok = lager_util:safe_application_load(lager), ok = application:set_env(lager, handlers, [{lager_test_backend, info}]), ok = application:set_env(lager, error_logger_redirect, false), ok = application:unset_env(lager, traces), ok = lager:start(), %% There is a race condition between the application start up, lager logging its own %% start up condition and several tests that count messages or parse the output of %% tests. When the lager start up message wins the race, it causes these tests %% which parse output or count message arrivals to fail. %% %% We introduce a sleep here to allow `flush' to arrive *after* the start up %% message has been received and processed. %% %% This race condition was first exposed during the work on %% 4b5260c4524688b545cc12da6baa2dfa4f2afec9 which introduced the lager %% manager killer PR. ok = timer:sleep(250), ok = gen_event:call(lager_event, lager_test_backend, flush). cleanup(_) -> catch ets:delete(lager_config), %% kill the ets config table with fire ok = application:stop(lager), ok = application:stop(goldrush), ok = error_logger:tty(true). transform_function_test_() -> {foreach, fun setup/0, fun cleanup/1, [ {"observe that there is nothing up my sleeve", fun() -> ?assertEqual(undefined, lager_test_backend:pop()), ?assertEqual(0, lager_test_backend:count()) end }, {"logging works", fun() -> lager:warning("test message"), ?assertEqual(1, lager_test_backend:count()), {Level, _Time, Message, _Metadata} = lager_test_backend:pop(), ?assertMatch(Level, lager_util:level_to_num(warning)), ?assertEqual("test message", Message), ok end }, {"Testing calling a function returns the same content on emit", fun() -> lager:warning("static message"), ?assertEqual(1, lager_test_backend:count()), {_Level, _Time, _Message, Metadata} = lager_test_backend:pop(), Function = proplists:get_value(returns_static_emit, Metadata), ?assertEqual(transform_static(), Function()), ok end }, {"Testing calling a function which returns content which can change on emit", fun() -> lager:warning("dynamic message"), ?assertEqual(1, lager_test_backend:count()), {_Level, _Time, _Message, Metadata} = lager_test_backend:pop(), Function = proplists:get_value(returns_dynamic_emit, Metadata), ?assert(Function() =< Function()), ?assert(Function() =< Function()), ?assert(Function() =< Function()), ?assert(Function() =< Function()), ok end }, {"Testing a undefined function returns undefined on emit", fun() -> lager:warning("Undefined error"), ?assertEqual(1, lager_test_backend:count()), {_Level, _Time, _Message, Metadata} = lager_test_backend:pop(), Function = proplists:get_value(returns_undefined_emit, Metadata), [{module, Module}, {name, Name}|_] = erlang:fun_info(Function), ?assertNot(erlang:function_exported(Module, Name, 0)), ok end }, {"Testing calling a function returns the same content on log", fun() -> lager:warning("static message"), ?assertEqual(1, lager_test_backend:count()), {_Level, _Time, _Message, Metadata} = lager_test_backend:pop(), ?assertEqual(transform_static(), proplists:get_value(returns_static_log, Metadata)), ok end }, {"Testing calling a dynamic function on log which returns the same value", fun() -> lager:warning("dynamic message"), ?assertEqual(1, lager_test_backend:count()), {_Level, _Time, _Message, Metadata} = lager_test_backend:pop(), Value = proplists:get_value(returns_dynamic_log, Metadata), ?assert(Value =< transform_dynamic()), ?assert(Value =< transform_dynamic()), ?assert(Value =< transform_dynamic()), ?assert(Value =< transform_dynamic()), ?assert(Value =< transform_dynamic()), ok end }, {"Testing differences in results for on_log vs on emit from dynamic function", fun() -> lager:warning("on_log vs on emit"), ?assertEqual(1, lager_test_backend:count()), {_Level, _Time, _Message, Metadata} = lager_test_backend:pop(), Value = proplists:get_value(returns_dynamic_log, Metadata), Function = proplists:get_value(returns_dynamic_emit, Metadata), FunctionResult = Function(), ?assert(Value =< FunctionResult), ?assert(Value =< Function()), ?assert(FunctionResult =< Function()), ok end }, {"Testing a function provided via metadata", fun()-> Provided = fun() -> provided_metadata end, lager:md([{provided, Provided}]), lager:warning("Provided metadata"), ?assertEqual(1, lager_test_backend:count()), {_Level, _Time, _Message, Metadata} = lager_test_backend:pop(), Function = proplists:get_value(provided, Metadata), ?assertEqual(Provided(), Function()), ok end } ] }. -endif. lager-3.8.0/test/crash.erl0000644000232200023220000000541113523436621015753 0ustar debalancedebalance %% a module that crashes in just about every way possible -module(crash). -behaviour(gen_server). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). -export([start/0]). -record(state, { host :: term(), port :: term() }). start() -> gen_server:start({local, ?MODULE}, ?MODULE, [], []). init(_) -> {ok, {}}. handle_call(undef, _, State) -> {reply, ?MODULE:booger(), State}; handle_call(badfun, _, State) -> M = booger, {reply, M(), State}; handle_call(bad_return, _, _) -> bleh; handle_call(bad_return_string, _, _) -> {tuple, {tuple, "string"}}; handle_call(case_clause, _, State) -> case State of goober -> {reply, ok, State} end; handle_call(case_clause_string, _, State) -> Foo = atom_to_list(?MODULE), case Foo of State -> {reply, ok, State} end; handle_call(if_clause, _, State) -> if State == 1 -> {reply, ok, State} end; handle_call(try_clause, _, State) -> Res = try tuple_to_list(State) of [_A, _B] -> ok catch _:_ -> ok end, {reply, Res, State}; handle_call(badmatch, _, State) -> {A, B, C} = State, {reply, [A, B, C], State}; handle_call(badrecord, _, State) -> Host = State#state.host, {reply, Host, State}; handle_call(function_clause, _, State) -> {reply, function(State), State}; handle_call(badarith, _, State) -> Res = 1 / length(tuple_to_list(State)), {reply, Res, State}; handle_call(badarg1, _, State) -> Res = list_to_binary(["foo", bar]), {reply, Res, State}; handle_call(badarg2, _, State) -> Res = erlang:iolist_to_binary(["foo", bar]), {reply, Res, State}; handle_call(system_limit, _, State) -> Res = list_to_atom(lists:flatten(lists:duplicate(256, "a"))), {reply, Res, State}; handle_call(process_limit, _, State) -> %% run with +P 300 to make this crash [erlang:spawn(fun() -> timer:sleep(5000) end) || _ <- lists:seq(0, 500)], {reply, ok, State}; handle_call(port_limit, _, State) -> [erlang:open_port({spawn, "ls"}, []) || _ <- lists:seq(0, 1024)], {reply, ok, State}; handle_call(noproc, _, State) -> Res = gen_event:call(foo, bar, baz), {reply, Res, State}; handle_call(noproc_proc_lib, _, State) -> Res = proc_lib:stop(foo), {reply, Res, State}; handle_call(badarity, _, State) -> F = fun(A, B, C) -> A + B + C end, Res = F(State), {reply, Res, State}; handle_call(throw, _, _State) -> throw(a_ball); handle_call(_Call, _From, State) -> {reply, ok, State}. handle_cast(_Cast, State) -> {noreply, State}. handle_info(_Info, State) -> {noreply, State}. terminate(_, _) -> ok. code_change(_, State, _) -> {ok, State}. function(X) when is_list(X) -> ok. lager-3.8.0/test/lager_manager_killer_test.erl0000644000232200023220000001274413523436621022047 0ustar debalancedebalance-module(lager_manager_killer_test). -author("Sungjin Park "). -compile([{parse_transform, lager_transform}]). -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). -define(TEST_SINK_NAME, '__lager_test_sink'). %% <-- used by parse transform -define(TEST_SINK_EVENT, '__lager_test_sink_lager_event'). %% <-- used by lager API calls and internals for gen_event overload_test_() -> {timeout, 60, fun() -> application:stop(lager), application:load(lager), Delay = 1000, % sleep 1 sec on every log KillerHWM = 10, % kill the manager if there are more than 10 pending logs KillerReinstallAfter = 1000, % reinstall killer after 1 sec application:set_env(lager, handlers, [{lager_slow_backend, [{delay, Delay}]}]), application:set_env(lager, async_threshold, undefined), application:set_env(lager, error_logger_redirect, true), application:set_env(lager, killer_hwm, KillerHWM), application:set_env(lager, killer_reinstall_after, KillerReinstallAfter), ensure_started(lager), lager_config:set(async, true), Manager = whereis(lager_event), erlang:trace(all, true, [procs]), [lager:info("~p'th message", [N]) || N <- lists:seq(1,KillerHWM+2)], Margin = 100, ok = confirm_manager_exit(Manager, Delay+Margin), ok = confirm_sink_reregister(lager_event, Margin), erlang:trace(all, false, [procs]), wait_until(fun() -> case proplists:get_value(lager_manager_killer, gen_event:which_handlers(lager_event)) of [] -> false; _ -> true end end, Margin, 15), wait_until(fun() -> case gen_event:call(lager_event, lager_manager_killer, get_settings) of [KillerHWM, KillerReinstallAfter] -> true; _Other -> false end end, Margin, 15), application:stop(lager) end}. overload_alternate_sink_test_() -> {timeout, 60, fun() -> application:stop(lager), application:load(lager), Delay = 1000, % sleep 1 sec on every log KillerHWM = 10, % kill the manager if there are more than 10 pending logs KillerReinstallAfter = 1000, % reinstall killer after 1 sec application:set_env(lager, handlers, []), application:set_env(lager, extra_sinks, [{?TEST_SINK_EVENT, [ {handlers, [{lager_slow_backend, [{delay, Delay}]}]}, {killer_hwm, KillerHWM}, {killer_reinstall_after, KillerReinstallAfter}, {async_threshold, undefined} ]}]), application:set_env(lager, error_logger_redirect, true), ensure_started(lager), lager_config:set({?TEST_SINK_EVENT, async}, true), Manager = whereis(?TEST_SINK_EVENT), erlang:trace(all, true, [procs]), [?TEST_SINK_NAME:info("~p'th message", [N]) || N <- lists:seq(1,KillerHWM+2)], Margin = 100, ok = confirm_manager_exit(Manager, Delay+Margin), ok = confirm_sink_reregister(?TEST_SINK_EVENT, Margin), erlang:trace(all, false, [procs]), wait_until(fun() -> case proplists:get_value(lager_manager_killer, gen_event:which_handlers(?TEST_SINK_EVENT)) of [] -> false; _ -> true end end, Margin, 15), wait_until(fun() -> case gen_event:call(?TEST_SINK_EVENT, lager_manager_killer, get_settings) of [KillerHWM, KillerReinstallAfter] -> true; _Other -> false end end, Margin, 15), application:stop(lager) end}. ensure_started(App) -> case application:start(App) of ok -> ok; {error, {not_started, Dep}} -> ensure_started(Dep), ensure_started(App) end. confirm_manager_exit(Manager, Delay) -> receive {trace, Manager, exit, killed} -> ?debugFmt("Manager ~p killed", [Manager]); Other -> ?debugFmt("OTHER MSG: ~p", [Other]), confirm_manager_exit(Manager, Delay) after Delay -> ?assert(false) end. confirm_sink_reregister(Sink, Delay) -> receive {trace, _Pid, register, Sink} -> ?assertNot(lists:member(lager_manager_killer, gen_event:which_handlers(Sink))) after Delay -> ?assert(false) end. wait_until(_Fun, _Delay, 0) -> {error, too_many_retries}; wait_until(Fun, Delay, Retries) -> case Fun() of true -> ok; false -> timer:sleep(Delay), wait_until(Fun, Delay, Retries-1) end. -endif. lager-3.8.0/test/lager_slow_backend.erl0000644000232200023220000000133613523436621020462 0ustar debalancedebalance-module(lager_slow_backend). -author("Sungjin Park "). -behavior(gen_event). -export([init/1, handle_call/2, handle_event/2, handle_info/2, terminate/2, code_change/3]). -include("lager.hrl"). -record(state, { delay :: non_neg_integer() }). init([{delay, Delay}]) -> {ok, #state{delay=Delay}}. handle_call(get_loglevel, State) -> {ok, lager_util:config_to_mask(debug), State}; handle_call(_Request, State) -> {ok, ok, State}. handle_event({log, _Message}, State) -> timer:sleep(State#state.delay), {ok, State}; handle_event(_Event, State) -> {ok, State}. handle_info(_Info, State) -> {ok, State}. terminate(_Reason, _State) -> ok. code_change(_OldVsn, State, _Extra) -> {ok, State}. lager-3.8.0/test/zzzz_gh280_crash.erl0000644000232200023220000000232713523436621017775 0ustar debalancedebalance%% @doc This test is named zzzz_gh280_crash because it has to be run first and tests are run in %% reverse alphabetical order. %% %% The problem we are attempting to detect here is when log_mf_h is installed as a handler for error_logger %% and lager starts up to replace the current handlers with its own. This causes a start up crash because %% OTP error logging modules do not have any notion of a lager-style log level. -module(zzzz_gh280_crash). -include_lib("eunit/include/eunit.hrl"). gh280_crash_test() -> {timeout, 30, fun() -> gh280_impl() end}. gh280_impl() -> application:stop(lager), application:stop(goldrush), error_logger:tty(false), %% see https://github.com/erlang/otp/blob/maint/lib/stdlib/src/log_mf_h.erl#L81 %% for an explanation of the init arguments to log_mf_h ok = gen_event:add_sup_handler(error_logger, log_mf_h, log_mf_h:init("/tmp", 10000, 5)), lager:start(), Result = receive {gen_event_EXIT,log_mf_h,normal} -> true; {gen_event_EXIT,Handler,Reason} -> {Handler,Reason}; X -> X after 10000 -> timeout end, ?assert(Result), application:stop(lager), application:stop(goldrush). lager-3.8.0/test/trunc_io_eqc.erl0000644000232200023220000002121113523436621017321 0ustar debalancedebalance%% ------------------------------------------------------------------- %% %% trunc_io_eqc: QuickCheck test for trunc_io:format with maxlen %% %% Copyright (c) 2011-2012 Basho Technologies, Inc. All Rights Reserved. %% %% This file is provided to you under the Apache License, %% Version 2.0 (the "License"); you may not use this file %% except in compliance with the License. You may obtain %% a copy of the License at %% %% http://www.apache.org/licenses/LICENSE-2.0 %% %% Unless required by applicable law or agreed to in writing, %% software distributed under the License is distributed on an %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY %% KIND, either express or implied. See the License for the %% specific language governing permissions and limitations %% under the License. %% %% ------------------------------------------------------------------- -module(trunc_io_eqc). -ifdef(TEST). -ifdef(EQC). -export([test/0, test/1, check/0, prop_format/0, prop_equivalence/0]). -include_lib("eqc/include/eqc.hrl"). -include_lib("eunit/include/eunit.hrl"). -define(QC_OUT(P), eqc:on_output(fun(Str, Args) -> io:format(user, Str, Args) end, P)). %%==================================================================== %% eunit test %%==================================================================== eqc_test_() -> {timeout, 60, {spawn, [ {timeout, 30, ?_assertEqual(true, eqc:quickcheck(eqc:testing_time(14, ?QC_OUT(prop_format()))))}, {timeout, 30, ?_assertEqual(true, eqc:quickcheck(eqc:testing_time(14, ?QC_OUT(prop_equivalence()))))} ] }}. %%==================================================================== %% Shell helpers %%==================================================================== test() -> test(100). test(N) -> quickcheck(numtests(N, prop_format())). check() -> check(prop_format(), current_counterexample()). %%==================================================================== %% Generators %%==================================================================== gen_fmt_args() -> list(oneof([gen_print_str(), "~~", {"~10000000.p", gen_any(5)}, {"~w", gen_any(5)}, {"~s", oneof([gen_print_str(), gen_atom(), gen_quoted_atom(), gen_print_bin(), gen_iolist(5)])}, {"~1000000.P", gen_any(5), 4}, {"~W", gen_any(5), 4}, {"~i", gen_any(5)}, {"~B", nat()}, {"~b", nat()}, {"~X", nat(), "0x"}, {"~x", nat(), "0x"}, {"~.10#", nat()}, {"~.10+", nat()}, {"~.36B", nat()}, {"~1000000.62P", gen_any(5), 4}, {"~c", gen_char()}, {"~tc", gen_char()}, {"~f", real()}, {"~10.f", real()}, {"~g", real()}, {"~10.g", real()}, {"~e", real()}, {"~10.e", real()} ])). %% Generates a printable string gen_print_str() -> ?LET(Xs, list(char()), [X || X <- Xs, io_lib:printable_list([X]), X /= $~, X < 256]). gen_print_bin() -> ?LET(Xs, gen_print_str(), list_to_binary(Xs)). gen_any(MaxDepth) -> oneof([largeint(), gen_atom(), gen_quoted_atom(), nat(), %real(), binary(), gen_bitstring(), gen_pid(), gen_port(), gen_ref(), gen_fun()] ++ [?LAZY(list(gen_any(MaxDepth - 1))) || MaxDepth /= 0] ++ [?LAZY(gen_tuple(gen_any(MaxDepth - 1))) || MaxDepth /= 0]). gen_iolist(0) -> []; gen_iolist(Depth) -> list(oneof([gen_char(), gen_print_str(), gen_print_bin(), gen_iolist(Depth-1)])). gen_atom() -> elements([abc, def, ghi]). gen_quoted_atom() -> elements(['abc@bar', '@bar', '10gen']). gen_bitstring() -> ?LET(XS, binary(), <>). gen_tuple(Gen) -> ?LET(Xs, list(Gen), list_to_tuple(Xs)). gen_max_len() -> %% Generate length from 3 to whatever. Needs space for ... in output ?LET(Xs, int(), 3 + abs(Xs)). gen_pid() -> ?LAZY(spawn(fun() -> ok end)). gen_port() -> ?LAZY(begin Port = erlang:open_port({spawn, "true"}, []), catch(erlang:port_close(Port)), Port end). gen_ref() -> ?LAZY(make_ref()). gen_fun() -> ?LAZY(fun() -> ok end). gen_char() -> oneof(lists:seq($A, $z)). %%==================================================================== %% Property %%==================================================================== %% Checks that trunc_io:format produces output less than or equal to MaxLen prop_format() -> ?FORALL({FmtArgs, MaxLen}, {gen_fmt_args(), gen_max_len()}, begin %% Because trunc_io will print '...' when its running out of %% space, even if the remaining space is less than 3, it %% doesn't *exactly* stick to the specified limit. %% Also, since we don't truncate terms not printed with %% ~p/~P/~w/~W/~s, we also need to calculate the wiggle room %% for those. Hence the fudge factor calculated below. FudgeLen = calculate_fudge(FmtArgs, 50), {FmtStr, Args} = build_fmt_args(FmtArgs), try Str = lists:flatten(lager_trunc_io:format(FmtStr, Args, MaxLen)), ?WHENFAIL(begin io:format(user, "FmtStr: ~p\n", [FmtStr]), io:format(user, "Args: ~p\n", [Args]), io:format(user, "FudgeLen: ~p\n", [FudgeLen]), io:format(user, "MaxLen: ~p\n", [MaxLen]), io:format(user, "ActLen: ~p\n", [length(Str)]), io:format(user, "Str: ~p\n", [Str]) end, %% Make sure the result is a printable list %% and if the format string is less than the length, %% the result string is less than the length. conjunction([{printable, Str == "" orelse io_lib:printable_list(Str)}, {length, length(FmtStr) > MaxLen orelse length(Str) =< MaxLen + FudgeLen}])) catch _:Err -> io:format(user, "\nException: ~p\n", [Err]), io:format(user, "FmtStr: ~p\n", [FmtStr]), io:format(user, "Args: ~p\n", [Args]), false end end). %% Checks for equivalent formatting to io_lib prop_equivalence() -> ?FORALL(FmtArgs, gen_fmt_args(), begin {FmtStr, Args} = build_fmt_args(FmtArgs), Expected = lists:flatten(io_lib:format(FmtStr, Args)), Actual = lists:flatten(lager_trunc_io:format(FmtStr, Args, 10485760)), ?WHENFAIL(begin io:format(user, "FmtStr: ~p\n", [FmtStr]), io:format(user, "Args: ~p\n", [Args]), io:format(user, "Expected: ~p\n", [Expected]), io:format(user, "Actual: ~p\n", [Actual]) end, Expected == Actual) end). %%==================================================================== %% Internal helpers %%==================================================================== %% Build a tuple of {Fmt, Args} from a gen_fmt_args() return build_fmt_args(FmtArgs) -> F = fun({Fmt, Arg}, {FmtStr0, Args0}) -> {FmtStr0 ++ Fmt, Args0 ++ [Arg]}; ({Fmt, Arg1, Arg2}, {FmtStr0, Args0}) -> {FmtStr0 ++ Fmt, Args0 ++ [Arg1, Arg2]}; (Str, {FmtStr0, Args0}) -> {FmtStr0 ++ Str, Args0} end, lists:foldl(F, {"", []}, FmtArgs). calculate_fudge([], Acc) -> Acc; calculate_fudge([{"~62P", _Arg, _Depth}|T], Acc) -> calculate_fudge(T, Acc+62); calculate_fudge([{Fmt, Arg}|T], Acc) when Fmt == "~f"; Fmt == "~10.f"; Fmt == "~g"; Fmt == "~10.g"; Fmt == "~e"; Fmt == "~10.e"; Fmt == "~x"; Fmt == "~X"; Fmt == "~B"; Fmt == "~b"; Fmt == "~36B"; Fmt == "~.10#"; Fmt == "~10+" -> calculate_fudge(T, Acc + length(lists:flatten(io_lib:format(Fmt, [Arg])))); calculate_fudge([_|T], Acc) -> calculate_fudge(T, Acc). -endif. % (EQC). -endif. % (TEST). lager-3.8.0/README.md0000644000232200023220000013721013523436621014452 0ustar debalancedebalanceOverview -------- Lager (as in the beer) is a logging framework for Erlang. Its purpose is to provide a more traditional way to perform logging in an erlang application that plays nicely with traditional UNIX logging tools like logrotate and syslog. [Travis-CI](http://travis-ci.org/erlang-lager/lager) :: [![Travis-CI](https://travis-ci.org/erlang-lager/lager.svg?branch=master)](http://travis-ci.org/erlang-lager/lager) Features -------- * Finer grained log levels (debug, info, notice, warning, error, critical, alert, emergency) * Logger calls are transformed using a parse transform to allow capturing Module/Function/Line/Pid information * When no handler is consuming a log level (eg. debug) no event is sent to the log handler * Supports multiple backends, including console and file. * Supports multiple sinks * Rewrites common OTP error messages into more readable messages * Support for pretty printing records encountered at compile time * Tolerant in the face of large or many log messages, won't out of memory the node * Optional feature to bypass log size truncation ("unsafe") * Supports internal time and date based rotation, as well as external rotation tools * Syslog style log level comparison flags * Colored terminal output (requires R16+) * Map support (requires 17+) * Optional load shedding by setting a high water mark to kill (and reinstall) a sink after a configurable cool down timer Contributing ------------ We welcome contributions from the community. We are always excited to get ideas for improving lager. If you are looking for an idea to help out, please take a look at our open issues - a number of them are tagged with [Help Wanted](https://github.com/erlang-lager/lager/issues?q=is%3Aopen+is%3Aissue+label%3A%22Help+Wanted%22) and [Easy](https://github.com/erlang-lager/lager/issues?q=is%3Aopen+is%3Aissue+label%3AEasy) - some of them are tagged as both! We are happy to mentor people get started with any of these issues, and they don't need prior discussion. That being said, before you send large changes please open an issue first to discuss the change you'd like to make along with an idea of your proposal to implement that change. ### PR guidelines ### * Large changes without prior discussion are likely to be rejected. * Changes without test cases are likely to be rejected. * Please use the style of the existing codebase when submitting PRs. We review PRs and issues at least once a month as described below. OTP Support Policy ------------------ The lager maintainers intend to support the past three OTP releases from current on the main 3.x branch of the project. As of December 2018 that includes 21, 20, 19. Lager may or may not run on older OTP releases but it will only be guaranteed tested on the previous three OTP releases. If you need a version of lager which runs on older OTP releases, we recommend you use either the 3.4.0 release or the 2.x branch. Monthly triage cadence ---------------------- We have (at least) monthly issue and PR triage for lager in the #lager room on the [freenode](https://freenode.net) IRC network every third Thursday at 2 pm US/Pacific, 10 pm UTC. You are welcome to join us there to ask questions about lager or participate in the triage. Usage ----- To use lager in your application, you need to define it as a rebar dep or have some other way of including it in Erlang's path. You can then add the following option to the erlang compiler flags: ```erlang {parse_transform, lager_transform} ``` Alternately, you can add it to the module you wish to compile with logging enabled: ```erlang -compile([{parse_transform, lager_transform}]). ``` Before logging any messages, you'll need to start the lager application. The lager module's `start` function takes care of loading and starting any dependencies lager requires. ```erlang lager:start(). ``` You can also start lager on startup with a switch to `erl`: ```erlang erl -pa path/to/lager/ebin -s lager ``` Once you have built your code with lager and started the lager application, you can then generate log messages by doing the following: ```erlang lager:error("Some message") ``` Or: ```erlang lager:warning("Some message with a term: ~p", [Term]) ``` The general form is `lager:Severity()` where `Severity` is one of the log levels mentioned above. Configuration ------------- To configure lager's backends, you use an application variable (probably in your app.config): ```erlang {lager, [ {log_root, "/var/log/hello"}, {handlers, [ {lager_console_backend, [{level, info}]}, {lager_file_backend, [{file, "error.log"}, {level, error}]}, {lager_file_backend, [{file, "console.log"}, {level, info}]} ]} ]}. ``` ```log_root``` variable is optional, by default file paths are relative to CWD. The available configuration options for each backend are listed in their module's documentation. Sinks ----- Lager has traditionally supported a single sink (implemented as a `gen_event` manager) named `lager_event` to which all backends were connected. Lager now supports extra sinks; each sink can have different sync/async message thresholds and different backends. ### Sink configuration To use multiple sinks (beyond the built-in sink of lager and lager_event), you need to: 1. Setup rebar.config 2. Configure the backends in app.config #### Names Each sink has two names: one atom to be used like a module name for sending messages, and that atom with `_lager_event` appended for backend configuration. This reflects the legacy behavior: `lager:info` (or `critical`, or `debug`, etc) is a way of sending a message to a sink named `lager_event`. Now developers can invoke `audit:info` or `myCompanyName:debug` so long as the corresponding `audit_lager_event` or `myCompanyName_lager_event` sinks are configured. #### rebar.config In `rebar.config` for the project that requires lager, include a list of sink names (without the `_lager_event` suffix) in `erl_opts`: `{lager_extra_sinks, [audit]}` #### Runtime requirements To be useful, sinks must be configured at runtime with backends. In `app.config` for the project that requires lager, for example, extend the lager configuration to include an `extra_sinks` tuple with backends (aka "handlers") and optionally `async_threshold` and `async_threshold_window` values (see **Overload Protection** below). If async values are not configured, no overload protection will be applied on that sink. ```erlang [{lager, [ {log_root, "/tmp"}, %% Default handlers for lager/lager_event {handlers, [ {lager_console_backend, [{level, info}]}, {lager_file_backend, [{file, "error.log"}, {level, error}]}, {lager_file_backend, [{file, "console.log"}, {level, info}]} ]}, %% Any other sinks {extra_sinks, [ {audit_lager_event, [{handlers, [{lager_file_backend, [{file, "sink1.log"}, {level, info} ] }] }, {async_threshold, 500}, {async_threshold_window, 50}] }] } ] } ]. ``` Custom Formatting ----------------- All loggers have a default formatting that can be overriden. A formatter is any module that exports `format(#lager_log_message{},Config#any())`. It is specified as part of the configuration for the backend: ```erlang {lager, [ {handlers, [ {lager_console_backend, [{level, info}, {formatter, lager_default_formatter}, {formatter_config, [time," [",severity,"] ", message, "\n"]}]}, {lager_file_backend, [{file, "error.log"}, {level, error}, {formatter, lager_default_formatter}, {formatter_config, [date, " ", time," [",severity,"] ",pid, " ", message, "\n"]}]}, {lager_file_backend, [{file, "console.log"}, {level, info}]} ]} ]}. ``` Included is `lager_default_formatter`. This provides a generic, default formatting for log messages using a structure similar to Erlang's [iolist](http://learnyousomeerlang.com/buckets-of-sockets#io-lists) which we call "semi-iolist": * Any traditional iolist elements in the configuration are printed verbatim. * Atoms in the configuration are treated as placeholders for lager metadata and extracted from the log message. * The placeholders `date`, `time`, `message`, `sev` and `severity` will always exist. * `sev` is an abbreviated severity which is interpreted as a capitalized single letter encoding of the severity level (e.g. `'debug'` -> `$D`) * The placeholders `pid`, `file`, `line`, `module`, `function`, and `node` will always exist if the parse transform is used. * The placeholder `application` may exist if the parse transform is used. It is dependent on finding the applications `app.src` file. * If the error logger integration is used, the placeholder `pid` will always exist and the placeholder `name` may exist. * Applications can define their own metadata placeholder. * A tuple of `{atom(), semi-iolist()}` allows for a fallback for the atom placeholder. If the value represented by the atom cannot be found, the semi-iolist will be interpreted instead. * A tuple of `{atom(), semi-iolist(), semi-iolist()}` represents a conditional operator: if a value for the atom placeholder can be found, the first semi-iolist will be output; otherwise, the second will be used. Examples: ``` ["Foo"] -> "Foo", regardless of message content. [message] -> The content of the logged message, alone. [{pid,"Unknown Pid"}] -> "" if pid is in the metadata, "Unknown Pid" if not. [{pid, ["My pid is ", pid], ["Unknown Pid"]}] -> if pid is in the metadata print "My pid is ", otherwise print "Unknown Pid" [{server,{pid, ["(", pid, ")"], ["(Unknown Server)"]}}] -> user provided server metadata, otherwise "()", otherwise "(Unknown Server)" ``` Universal time -------------- By default, lager formats timestamps as local time for whatever computer generated the log message. To make lager use UTC timestamps, you can set the `sasl` application's `utc_log` configuration parameter to `true` in your application configuration file. Example: ``` %% format log timestamps as UTC [{sasl, [{utc_log, true}]}]. ``` Error logger integration ------------------------ Lager is also supplied with a `error_logger` handler module that translates traditional erlang error messages into a friendlier format and sends them into lager itself to be treated like a regular lager log call. To disable this, set the lager application variable `error_logger_redirect` to `false`. You can also disable reformatting for OTP and Cowboy messages by setting variable `error_logger_format_raw` to `true`. If you installed your own handler(s) into `error_logger`, you can tell lager to leave it alone by using the `error_logger_whitelist` environment variable with a list of handlers to allow. ``` {error_logger_whitelist, [my_handler]} ``` The `error_logger` handler will also log more complete error messages (protected with use of `trunc_io`) to a "crash log" which can be referred to for further information. The location of the crash log can be specified by the `crash_log` application variable. If set to `false` it is not written at all. Messages in the crash log are subject to a maximum message size which can be specified via the `crash_log_msg_size` application variable. Messages from `error_logger` will be redirected to `error_logger_lager_event` sink if it is defined so it can be redirected to another log file. For example: ``` [{lager, [ {extra_sinks, [ {error_logger_lager_event, [{handlers, [ {lager_file_backend, [{file, "error_logger.log"}, {level, info}]}] }] }] }] }]. ``` will send all `error_logger` messages to `error_logger.log` file. Overload Protection ------------------- ### Asynchronous mode Prior to lager 2.0, the `gen_event` at the core of lager operated purely in synchronous mode. Asynchronous mode is faster, but has no protection against message queue overload. As of lager 2.0, the `gen_event` takes a hybrid approach. it polls its own mailbox size and toggles the messaging between synchronous and asynchronous depending on mailbox size. ```erlang {async_threshold, 20}, {async_threshold_window, 5} ``` This will use async messaging until the mailbox exceeds 20 messages, at which point synchronous messaging will be used, and switch back to asynchronous, when size reduces to `20 - 5 = 15`. If you wish to disable this behaviour, simply set `async_threshold` to `undefined`. It defaults to a low number to prevent the mailbox growing rapidly beyond the limit and causing problems. In general, lager should process messages as fast as they come in, so getting 20 behind should be relatively exceptional anyway. If you want to limit the number of messages per second allowed from `error_logger`, which is a good idea if you want to weather a flood of messages when lots of related processes crash, you can set a limit: ```erlang {error_logger_hwm, 50} ``` It is probably best to keep this number small. ### Event queue flushing When the high-water mark is exceeded, lager can be configured to flush all event notifications in the message queue. This can have unintended consequences for other handlers in the same event manager (in e.g. the `error_logger`), as events they rely on may be wrongly discarded. By default, this behavior is enabled, but can be controlled, for the `error_logger` via: ```erlang {error_logger_flush_queue, true | false} ``` or for a specific sink, using the option: ```erlang {flush_queue, true | false} ``` If `flush_queue` is true, a message queue length threshold can be set, at which messages will start being discarded. The default threshold is `0`, meaning that if `flush_queue` is true, messages will be discarded if the high-water mark is exceeded, regardless of the length of the message queue. The option to control the threshold is, for `error_logger`: ```erlang {error_logger_flush_threshold, 1000} ``` and for sinks: ```erlang {flush_threshold, 1000} ``` ### Sink Killer In some high volume situations, it may be preferable to drop all pending log messages instead of letting them drain over time. If you prefer, you may choose to use the sink killer to shed load. In this operational mode, if the `gen_event` mailbox exceeds a configurable high water mark, the sink will be killed and reinstalled after a configurable cool down time. You can configure this behavior by using these configuration directives: ```erlang {killer_hwm, 1000}, {killer_reinstall_after, 5000} ``` This means if the sink's mailbox size exceeds 1000 messages, kill the entire sink and reload it after 5000 milliseconds. This behavior can also be installed into alternative sinks if desired. By default, the manager killer *is not installed* into any sink. If the `killer_reinstall_after` cool down time is not specified it defaults to 5000. "Unsafe" -------- The unsafe code pathway bypasses the normal lager formatting code and uses the same code as error_logger in OTP. This provides a marginal speedup to your logging code (we measured between 0.5-1.3% improvement during our benchmarking; others have reported better improvements.) This is a **dangerous** feature. It *will not* protect you against large log messages - large messages can kill your application and even your Erlang VM dead due to memory exhaustion as large terms are copied over and over in a failure cascade. We strongly recommend that this code pathway only be used by log messages with a well bounded upper size of around 500 bytes. If there's any possibility the log messages could exceed that limit, you should use the normal lager message formatting code which will provide the appropriate size limitations and protection against memory exhaustion. If you want to format an unsafe log message, you may use the severity level (as usual) followed by `_unsafe`. Here's an example: ```erlang lager:info_unsafe("The quick brown ~s jumped over the lazy ~s", ["fox", "dog"]). ``` Runtime loglevel changes ------------------------ You can change the log level of any lager backend at runtime by doing the following: ```erlang lager:set_loglevel(lager_console_backend, debug). ``` Or, for the backend with multiple handles (files, mainly): ```erlang lager:set_loglevel(lager_file_backend, "console.log", debug). ``` Lager keeps track of the minimum log level being used by any backend and suppresses generation of messages lower than that level. This means that debug log messages, when no backend is consuming debug messages, are effectively free. A simple benchmark of doing 1 million debug log messages while the minimum threshold was above that takes less than half a second. Syslog style loglevel comparison flags -------------------------------------- In addition to the regular log level names, you can also do finer grained masking of what you want to log: ``` info - info and higher (>= is implicit) =debug - only the debug level !=info - everything but the info level <=notice - notice and below {ok, {FD::file:io_device(), Inode::integer(), Size::integer()}} | {error, any()}). %% @doc Open a log file -callback(open_logfile(Name::list(), Buffer::{integer(), integer()} | any()) -> {ok, {FD::file:io_device(), Inode::integer(), Size::integer()}} | {error, any()}). %% @doc Ensure reference to current target, could be rotated -callback(ensure_logfile(Name::list(), FD::file:io_device(), Inode::integer(), Buffer::{integer(), integer()} | any()) -> {ok, {FD::file:io_device(), Inode::integer(), Size::integer()}} | {error, any()}). %% @doc Rotate the log file -callback(rotate_logfile(Name::list(), Count::integer()) -> ok). ``` Syslog Support -------------- Lager syslog output is provided as a separate application: [lager_syslog](https://github.com/erlang-lager/lager_syslog). It is packaged as a separate application so lager itself doesn't have an indirect dependency on a port driver. Please see the `lager_syslog` README for configuration information. Other Backends -------------- There are lots of them! Some connect log messages to AMQP, various logging analytic services ([bunyan](https://github.com/Vagabond/lager_bunyan_formatter), [loggly](https://github.com/kivra/lager_loggly), etc), and more. [Looking on hex](https://hex.pm/packages?_utf8=✓&search=lager&sort=recent_downloads) or using "lager BACKEND" where "BACKEND" is your preferred log solution on your favorite search engine is a good starting point. Exception Pretty Printing ---------------------- Up to OTP 20: ```erlang try foo() catch Class:Reason -> lager:error( "~nStacktrace:~s", [lager:pr_stacktrace(erlang:get_stacktrace(), {Class, Reason})]) end. ``` On OTP 21+: ```erlang try foo() catch Class:Reason:Stacktrace -> lager:error( "~nStacktrace:~s", [lager:pr_stacktrace(Stacktrace, {Class, Reason})]) end. ``` Record Pretty Printing ---------------------- Lager's parse transform will keep track of any record definitions it encounters and store them in the module's attributes. You can then, at runtime, print any record a module compiled with the lager parse transform knows about by using the `lager:pr/2` function, which takes the record and the module that knows about the record: ```erlang lager:info("My state is ~p", [lager:pr(State, ?MODULE)]) ``` Often, `?MODULE` is sufficent, but you can obviously substitute that for a literal module name. `lager:pr` also works from the shell. Colored terminal output ----------------------- If you have Erlang R16 or higher, you can tell lager's console backend to be colored. Simply add to lager's application environment config: ```erlang {colored, true} ``` If you don't like the default colors, they are also configurable; see the `.app.src` file for more details. The output will be colored from the first occurrence of the atom color in the formatting configuration. For example: ```erlang {lager_console_backend, [{level, info}, {formatter, lager_default_formatter}, {formatter_config, [time, color, " [",severity,"] ", message, "\e[0m\r\n"]}]]} ``` This will make the entire log message, except time, colored. The escape sequence before the line break is needed in order to reset the color after each log message. Tracing ------- Lager supports basic support for redirecting log messages based on log message attributes. Lager automatically captures the pid, module, function and line at the log message callsite. However, you can add any additional attributes you wish: ```erlang lager:warning([{request, RequestID},{vhost, Vhost}], "Permission denied to ~s", [User]) ``` Then, in addition to the default trace attributes, you'll be able to trace based on request or vhost: ```erlang lager:trace_file("logs/example.com.error", [{vhost, "example.com"}], error) ``` To persist metadata for the life of a process, you can use `lager:md/1` to store metadata in the process dictionary: ```erlang lager:md([{zone, forbidden}]) ``` Note that `lager:md` will *only* accept a list of key/value pairs keyed by atoms. You can also omit the final argument, and the loglevel will default to `debug`. Tracing to the console is similar: ```erlang lager:trace_console([{request, 117}]) ``` In the above example, the loglevel is omitted, but it can be specified as the second argument if desired. You can also specify multiple expressions in a filter, or use the `*` atom as a wildcard to match any message that has that attribute, regardless of its value. You may also use the special value `!` to mean, only select if this key is **not** present. Tracing to an existing logfile is also supported (but see **Multiple sink support** below): ```erlang lager:trace_file("log/error.log", [{module, mymodule}, {function, myfunction}], warning) ``` To view the active log backends and traces, you can use the `lager:status()` function. To clear all active traces, you can use `lager:clear_all_traces()`. To delete a specific trace, store a handle for the trace when you create it, that you later pass to `lager:stop_trace/1`: ```erlang {ok, Trace} = lager:trace_file("log/error.log", [{module, mymodule}]), ... lager:stop_trace(Trace) ``` Tracing to a pid is somewhat of a special case, since a pid is not a data-type that serializes well. To trace by pid, use the pid as a string: ```erlang lager:trace_console([{pid, "<0.410.0>"}]) ``` ### Filter expressions As of lager 3.3.1, you can also use a 3 tuple while tracing where the second element is a comparison operator. The currently supported comparison operators are: * `<` - less than * `=<` - less than or equal * `=` - equal to * `!=` - not equal to * `>` - greater than * `>=` - greater than or equal ```erlang lager:trace_console([{request, '>', 117}, {request, '<', 120}]) ``` Using `=` is equivalent to the 2-tuple form. ### Filter composition As of lager 3.3.1 you may also use the special filter composition keys of `all` or `any`. For example the filter example above could be expressed as: ```erlang lager:trace_console([{all, [{request, '>', 117}, {request, '<', 120}]}]) ``` `any` has the effect of "OR style" logical evaluation between filters; `all` means "AND style" logical evaluation between filters. These compositional filters expect a list of additional filter expressions as their values. ### Null filters The `null` filter has a special meaning. A filter of `{null, false}` acts as a black hole; nothing is passed through. A filter of `{null, true}` means *everything* passes through. No other values for the null filter are valid and will be rejected. ### Multiple sink support If using multiple sinks, there are limitations on tracing that you should be aware of. Traces are specific to a sink, which can be specified via trace filters: ```erlang lager:trace_file("log/security.log", [{sink, audit_event}, {function, myfunction}], warning) ``` If no sink is thus specified, the default lager sink will be used. This has two ramifications: * Traces cannot intercept messages sent to a different sink. * Tracing to a file already opened via `lager:trace_file` will only be successful if the same sink is specified. The former can be ameliorated by opening multiple traces; the latter can be fixed by rearchitecting lager's file backend, but this has not been tackled. ### Traces from configuration Lager supports starting traces from its configuration file. The keyword to define them is `traces`, followed by a proplist of tuples that define a backend handler and zero or more filters in a required list, followed by an optional message severity level. An example looks like this: ```erlang {lager, [ {handlers, [...]}, {traces, [ %% handler, filter, message level (defaults to debug if not given) {lager_console_backend, [{module, foo}], info }, {{lager_file_backend, "trace.log"}, [{request, '>', 120}], error}, {{lager_file_backend, "event.log"}, [{module, bar}] } %% implied debug level here ]} ]}. ``` In this example, we have three traces. One using the console backend, and two using the file backend. If the message severity level is left out, it defaults to `debug` as in the last file backend example. The `traces` keyword works on alternative sinks too but the same limitations and caveats noted above apply. **IMPORTANT**: You **must** define a severity level in all lager releases up to and including 3.1.0 or previous. The 2-tuple form wasn't added until 3.2.0. Setting dynamic metadata at compile-time ---------------------------------------- Lager supports supplying metadata from external sources by registering a callback function. This metadata is also persistent across processes even if the process dies. In general use you won't need to use this feature. However it is useful in situations such as: * Tracing information provided by [seq_trace](http://erlang.org/doc/man/seq_trace.html) * Contextual information about your application * Persistent information which isn't provided by the default placeholders * Situations where you would have to set the metadata before every logging call You can add the callbacks by using the `{lager_parse_transform_functions, X}` option. It is only available when using `parse_transform`. In rebar, you can add it to `erl_opts` as below: ```erlang {erl_opts, [{parse_transform, lager_transform}, {lager_function_transforms, [ %% Placeholder Resolve type Callback tuple {metadata_placeholder, on_emit, {module_name, function_name}}, {other_metadata_placeholder, on_log, {module_name, function_name}} ]}]}. ``` The first atom is the placeholder atom used for the substitution in your custom formatter. See [Custom Formatting](#custom-formatting) for more information. The second atom is the resolve type. This specify the callback to resolve at the time of the message being emitted or at the time of the logging call. You have to specify either the atom `on_emit` or `on_log`. There is not a 'right' resolve type to use, so please read the uses/caveats of each and pick the option which fits your requirements best. `on_emit`: * The callback functions are not resolved until the message is emitted by the backend. * If the callback function cannot be resolved, not loaded or produces unhandled errors then `undefined` will be returned. * Since the callback function is dependent on a process, there is the chance that message will be emitted after the dependent process has died resulting in `undefined` being returned. This process can also be your own process `on_log`: * The callback functions are resolved regardless whether the message is emitted or not * If the callback function cannot be resolved or not loaded the errors are not handled by lager itself. * Any potential errors in callback should be handled in the callback function itself. * Because the function is resolved at log time there should be less chance of the dependent process dying before you can resolve it, especially if you are logging from the app which contains the callback. The third element is the callback to your function consisting of a tuple in the form `{Module Function}`. The callback should look like the following regardless if using `on_emit` or `on_log`: * It should be exported * It should takes no arguments e.g. has an arity of 0 * It should return any traditional iolist elements or the atom `undefined` * For errors generated within your callback see the resolve type documentation above. If the callback returns `undefined` then it will follow the same fallback and conditional operator rules as documented in the [Custom Formatting](#custom-formatting) section. This example would work with `on_emit` but could be unsafe to use with `on_log`. If the call failed in `on_emit` it would default to `undefined`, however with `on_log` it would error. ```erlang -export([my_callback/0]). my_callback() -> my_app_serv:call('some options'). ``` This example would be to safe to work with both `on_emit` and `on_log` ```erlang -export([my_callback/0]). my_callback() -> try my_app_serv:call('some options') of Result -> Result catch _ -> %% You could define any traditional iolist elements you wanted here undefined end. ``` Note that the callback can be any Module:Function/0. It does not have be part of your application. For example you could use `cpu_sup:avg1/0` as your callback function like so `{cpu_avg1, on_emit, {cpu_sup, avg1}}` Examples: ```erlang -export([reductions/0]). reductions() -> proplists:get_value(reductions, erlang:process_info(self())). ``` ```erlang -export([seq_trace/0]). seq_trace() -> case seq_trace:get_token(label) of {label, TraceLabel} -> TraceLabel; _ -> undefined end. ``` **IMPORTANT**: Since `on_emit` relies on function calls injected at the point where a log message is emitted, your logging performance (ops/sec) will be impacted by what the functions you call do and how much latency they may introduce. This impact will even greater with `on_log` since the calls are injected at the point a message is logged. Setting the truncation limit at compile-time -------------------------------------------- Lager defaults to truncating messages at 4096 bytes, you can alter this by using the `{lager_truncation_size, X}` option. In rebar, you can add it to `erl_opts`: ```erlang {erl_opts, [{parse_transform, lager_transform}, {lager_truncation_size, 1024}]}. ``` You can also pass it to `erlc`, if you prefer: ``` erlc -pa lager/ebin +'{parse_transform, lager_transform}' +'{lager_truncation_size, 1024}' file.erl ``` Suppress applications and supervisors start/stop logs ----------------------------------------------------- If you don't want to see supervisors and applications start/stop logs in debug level of your application, you can use these configs to turn it off: ```erlang {lager, [{suppress_application_start_stop, true}, {suppress_supervisor_start_stop, true}]} ``` Sys debug functions -------------------- Lager provides an integrated way to use sys 'debug functions'. You can install a debug function in a target process by doing ```erlang lager:install_trace(Pid, notice). ``` You can also customize the tracing somewhat: ```erlang lager:install_trace(Pid, notice, [{count, 100}, {timeout, 5000}, {format_string, "my trace event ~p ~p"]}). ``` The trace options are currently: * timeout - how long the trace stays installed: `infinity` (the default) or a millisecond timeout * count - how many trace events to log: `infinity` (default) or a positive number * format_string - the format string to log the event with. *Must* have 2 format specifiers for the 2 parameters supplied. This will, on every 'system event' for an OTP process (usually inbound messages, replies and state changes) generate a lager message at the specified log level. You can remove the trace when you're done by doing: ```erlang lager:remove_trace(Pid). ``` If you want to start an OTP process with tracing enabled from the very beginning, you can do something like this: ```erlang gen_server:start_link(mymodule, [], [{debug, [{install, {fun lager:trace_func/3, lager:trace_state(undefined, notice, [])}}]}]). ``` The third argument to the trace_state function is the Option list documented above. Console output to another group leader process ---------------------------------------------- If you want to send your console output to another group_leader (typically on another node) you can provide a `{group_leader, Pid}` argument to the console backend. This can be combined with another console config option, `id` and gen_event's `{Module, ID}` to allow remote tracing of a node to standard out via nodetool: ```erlang GL = erlang:group_leader(), Node = node(GL), lager_app:start_handler(lager_event, {lager_console_backend, Node}, [{group_leader, GL}, {level, none}, {id, {lager_console_backend, Node}}]), case lager:trace({lager_console_backend, Node}, Filter, Level) of ... ``` In the above example, the code is assumed to be running via a `nodetool rpc` invocation so that the code is executing on the Erlang node, but the group_leader is that of the reltool node (eg. appname_maint_12345@127.0.0.1). If you intend to use tracing with this feature, make sure the second parameter to start_handler and the `id` parameter match. Thus when the custom group_leader process exits, lager will remove any associated traces for that handler. Elixir Support -------------- There are 2 ways in which Lager can be leveraged in an Elixir project: 1. Lager Backend for Elixir Logger 2. Directly ### Lager Backend for Elixir Logger [Elixir's Logger](https://hexdocs.pm/logger/Logger.html) is the idiomatic way to add logging into elixir code. Logger has a plug-in model, allowing for different logging [Backends](https://hexdocs.pm/logger/Logger.html#module-backends) to be used without the need to change the logging code within your project. This approach will benefit from the fact that most elixir libs and frameworks are likely to use the elixir Logger and as such logging will all flow via the same logging mechanism. In [elixir 1.5 support for parse transforms was deprecated](https://github.com/elixir-lang/elixir/issues/5762). Taking the "Lager as a Logger Backend" approach is likely bypass any related regression issues that would be introduced into a project which is using lager directly when updating to elixir 1.5. There are open source elixir Logger backends for Lager available: - [LagerLogger](https://github.com/PSPDFKit-labs/lager_logger) - [LoggerLagerBackend](https://github.com/jonathanperret/logger_lager_backend) ### Directly It is fully possible prior to elixir 1.5 to use lager and all its features directly. After elixir 1.5 there is no support for parse transforms, and it is recommended to use an elixir wrapper for the lager api that provides compile time log level exclusion via elixir macros when opting for direct use of lager. Including Lager as a dependency: ``` elixir # mix.exs def application do [ applications: [:lager], erl_opts: [parse_transform: "lager_transform"] ] end defp deps do [{:lager, "~> 3.2"}] end ``` Example Configuration: ``` elixir # config.exs use Mix.Config # Stop lager writing a crash log config :lager, :crash_log, false config :lager, log_root: '/var/log/hello', handlers: [ lager_console_backend: :info, lager_file_backend: [file: "error.log", level: :error], lager_file_backend: [file: "console.log", level: :info] ] ``` There is a known issue where Elixir's Logger and Lager both contest for the Erlang `error_logger` handle if used side by side. If using both add the following to your `config.exs`: ```elixir # config.exs use Mix.Config # Stop lager redirecting :error_logger messages config :lager, :error_logger_redirect, false # Stop lager removing Logger's :error_logger handler config :lager, :error_logger_whitelist, [Logger.ErrorHandler] ``` Example Usage: ``` elixir :lager.error('Some message') :lager.warning('Some message with a term: ~p', [term]) ``` 3.x Changelog ------------- 3.8.0 - 9 August 2019 * Breaking API change: Modify the `lager_rotator_behaviour` to pass in a file's creation time to `ensure_logfile/5` to be used to determine if file has changed on systems where inodes are not available (i.e. `win32`). The return value from `create_logfile/2`, `open_logfile/2` and `ensure_logfile/5` now requires ctime to be returned (#509) * Bugfix: ensure log file rotation works on `win32` (#509) * Bugfix: ensure test suite passes on `win32` (#509) * Bugfix: ensure file paths with Unicode are formatted properly (#510) 3.7.0 - 24 May 2019 * Policy: Officially ending support for OTP 19 (Support OTP 20, 21, 22) * Cleanup: Fix all dialyzer errors * Bugfix: Minor changes to FSM/statem exits in OTP 22. 3.6.10 - 30 April 2019 * Documentation: Fix pr_stacktrace invocation example (#494) * Bugfix: Do not count suppressed messages for message drop counts (#499) 3.6.9 - 13 March 2019 * Bugfix: Fix file rotation on windows (#493) 3.6.8 - 21 December 2018 * Documentation: Document the error_logger_whitelist environment variable. (#489) * Bugfix: Remove the built in handler inside of OTP 21 `logger` system. (#488) * Bugfix: Cleanup unneeded check for is_map (#486) * Bugfix: Cleanup ranch errors treated as cowboy errors (#485) * Testing: Remove OTP 18 from TravisCI testing matrix 3.6.7 - 14 October 2018 * Bugfix: fix tracing to work with OTP21 #480 3.6.6 - 24 September 2018 * Bugfix: When printing records, handle an improper list correctly. #478 * Bugfix: Fix various tests and make some rotation code more explicit. #476 * Bugfix: Make sure not to miscount messages during high-water mark check. #475 3.6.5 - 3 September 2018 * Feature: Allow the console backend to redirect output to a remote node #469 * Feature: is_loggble - support for severity as atom #472 * Bugfix: Prevent silent dropping of messages when hwm is exceeded #467 * Bugfix: rotation - default log file not deleted #474 * Bugfix: Handle strange crash report from gen_statem #473 * Documentation: Various markup fixes: #468 #470 3.6.4 - 11 July 2018 * Bugfix: Reinstall handlers after a sink is killed #459 * Bugfix: Fix platform_define matching not to break on OSX Mojave #461 * Feature: Add support for installing a sys trace function #462 3.6.3 - 6 June 2018 * OTP 21 support 3.6.2 - 26 April 2018 * Bugfix: flush_threshold not working (#449) * Feature: Add `node` as a formatting option (#447) * Documentation: Update Elixir section with information about parse_transform (#446) * Bugfix: Correct default console configuation to use "[{level,info}]" instead (#445) * Feature: Pretty print lists of records at top level and field values with lager:pr (#442) * Bugfix: Ignore return value of lager:dispatch_log in lager.hrl (#441) 3.6.1 - 1 February 2018 * Bugfix: Make a few corrections to the recent mailbox flushing changes (#436) * Bugfix: add flush options to proplist validation (#439) * Bugfix: Don't log when we dropped 0 messages (#440) 3.6.0 - 16 January 2018 * Feature: Support logging with macros per level (#419) * Feature: Support custom file rotation handler; support hourly file rotation (#420) * Feature: Optionally reverse pretty stacktraces (so errors are at the top and the failed function call is at the bottom.) (#424) * Bugfix: Handle OTP 20 gen_server failure where client pid is dead. (#426) * Feature: Optionally don't flush notify messages at high water mark. (#427) * Bugfix: Handle another stacktrace format (#429) * Bugfix: Fix test failure using macros on OTP 18 (#430) * Policy: Remove all code which supports R15 (#432) 3.5.2 - 19 October 2017 * Bugfix: Properly check for unicode characters in potentially deep character list. (#417) 3.5.1 - 15 June 2017 * Doc fix: Missed a curly brace in an example. (#412) * Feature: Dynamic metadata functions (#392) - It is now possible to dynamically add metadata to lager messages. See the "dynamic metadata" section above for more information. * Doc fix: Add information about the "application" placeholder. (#414) 3.5.0 - 28 May 2017 * Bugfix: Support OTP 20 gen_event messages (#410) * Feature: Enable console output to standard_error. Convert to proplist configuration style (like file handler) Deprecate previous configuration directives (#409) * Bugfix: Enable the event shaper to filter messages before they're counted; do not count application/supervisor start/stops toward high water mark. (#411) * Docs: Add PR guidelines; add info about the #lager chat room on freenode. 3.4.2 - 26 April 2017 * Docs: Document how to make lager use UTC timestamps (#405) * Docs: Add a note about our triage cadence. * Docs: Update lager_syslog URL * Docs: Document placeholders for error_logger integration (#404) * Feature: Add hex.pm metadata and full rebar3 support. 3.4.1 - 28 March 2017 * Docs: Added documentation around using lager in the context of elixir applications (#398) * Bugfix: Properly expand paths when log_root is set. (#386) * Policy: Removed R15 from Travis configuration 3.4.0 - 16 March 2017 * Policy: Adopt official OTP support policy. (This is the **last** lager 3.x release that will support R15.) * Test: Fix timeouts, R15 missing functions on possibly long-running tests in Travis. (#394, #395) * Feature: capture and log metadata from error_logger messages (#397) * Feature: Expose new trace filters and enable filter composition (#389) * Feature: Log crashes from gen_fsm and gen_statem correctly (#391) * Docs: Typo in badge URL (#390) 3.3.0 - 16 February 2017 * Docs: Fix documentation to make 'it' unambiguous when discussing asychronous operation. (#387) * Test: Fix test flappiness due to insufficient sanitation between test runs (#384, #385) * Feature: Allow metadata only logging. (#380) * Feature: Add an upper case severity formatter (#372) * Feature: Add support for suppressing start/stop messages from supervisors (#368) * Bugfix: Fix ranch crash messages (#366) * Test: Update Travis config for 18.3 and 19.0 (#365) 3.2.4 - 11 October 2016 * Test: Fix dialyzer warnings. 3.2.3 - 29 September 2016 * Dependency: Update to goldrush 0.19 3.2.2 - 22 September 2016 * Bugfix: Backwards-compatibility fix for `{crash_log, undefined}` (#371) * Fix documentation/README to reflect the preference for using `false` as the `crash_log` setting value rather than `undefined` to indicate that the crash log should not be written (#364) * Bugfix: Backwards-compatibility fix for `lager_file_backend` "legacy" configuration format (#374) 3.2.1 - 10 June 2016 * Bugfix: Recent `get_env` changes resulted in launch failure (#355) * OTP: Support typed records for Erlang 19.0 (#361) 3.2.0 - 08 April 2016 * Feature: Optional sink killer to shed load when mailbox size exceeds a configurable high water mark (#346) * Feature: Export `configure_sink/2` so users may dynamically configure previously setup and parse transformed sinks from their own code. (#342) * Feature: Re-enable Travis CI and update .travis.yml (#340) * Bugfix: Fix test race conditions for Travis CI (#344) * Bugfix: Add the atom 'none' to the log_level() type so downstream users won't get dialyzer failures if they use the 'none' log level. (#343) * Bugfix: Fix typo in documentation. (#341) * Bugfix: Fix OTP 18 test failures due to `warning_map/0` response change. (#337) * Bugfix: Make sure traces that use the file backend work correctly when specified in lager configuration. (#336) * Bugfix: Use `lager_app:get_env/3` for R15 compatibility. (#335) * Bugfix: Make sure lager uses `id` instead of `name` when reporting supervisor children failures. (The atom changed in OTP in 2014.) (#334) * Bugfix: Make lager handle improper iolists (#327) 3.1.0 - 27 January 2016 * Feature: API calls to a rotate handler, sink or all. This change introduces a new `rotate` message for 3rd party lager backends; that's why this is released as a new minor version number. (#311) 3.0.3 - 27 January 2016 * Feature: Pretty printer for human readable stack traces (#298) * Feature: Make error reformatting optional (#305) * Feature: Optional and explicit sink for error_logger messages (#303) * Bugfix: Always explicitly close a file after its been rotated (#316) * Bugfix: If a relative path already contains the log root, do not add it again (#317) * Bugfix: Configure and start extra sinks before traces are evaluated (#307) * Bugfix: Stop and remove traces correctly (#306) * Bugfix: A byte value of 255 is valid for Unicode (#300) * Dependency: Bump to goldrush 0.1.8 (#313) lager-3.8.0/LICENSE0000644000232200023220000002367713523436621014213 0ustar debalancedebalance Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS lager-3.8.0/include/0000755000232200023220000000000013523436621014612 5ustar debalancedebalancelager-3.8.0/include/lager.hrl0000644000232200023220000001606713523436621016425 0ustar debalancedebalance%% Copyright (c) 2011-2012 Basho Technologies, Inc. All Rights Reserved. %% %% This file is provided to you under the Apache License, %% Version 2.0 (the "License"); you may not use this file %% except in compliance with the License. You may obtain %% a copy of the License at %% %% http://www.apache.org/licenses/LICENSE-2.0 %% %% Unless required by applicable law or agreed to in writing, %% software distributed under the License is distributed on an %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY %% KIND, either express or implied. See the License for the %% specific language governing permissions and limitations %% under the License. -define(DEFAULT_TRUNCATION, 4096). -define(DEFAULT_TRACER, lager_default_tracer). -define(DEFAULT_SINK, lager_event). -define(ERROR_LOGGER_SINK, error_logger_lager_event). -define(METADATA(Extras), [{severity, info}, {pid, self()}, {node, node()}, {module, ?MODULE}, {function, ?FUNCTION_NAME}, {function_arity, ?FUNCTION_ARITY}, {file, ?FILE}, {line, ?LINE} | Extras]). -define(lager_log(Severity, Format, Args, Safety), ?lager_log(?DEFAULT_SINK, Severity, ?METADATA(lager:md()), Format, Args, ?DEFAULT_TRUNCATION, Safety)). -define(lager_log(Severity, Metadata, Format, Args, Safety), ?lager_log(?DEFAULT_SINK, Severity, ?METADATA(Metadata++lager:md()), Format, Args, ?DEFAULT_TRUNCATION, Safety)). -define(lager_log(Sink, Severity, Metadata, Format, Args, Size, Safety), _ = lager:dispatch_log(Sink, Severity, Metadata, Format, Args, Size, Safety)). -define(lager_debug(Format, Args), ?lager_log(debug, Format, Args, safe)). -define(lager_debug(Metadata, Format, Args), ?lager_log(debug, Metadata, Format, Args, safe)). -define(lager_info(Format, Args), ?lager_log(info, Format, Args, safe)). -define(lager_info(Metadata, Format, Args), ?lager_log(info, Metadata, Format, Args, safe)). -define(lager_notice(Format, Args), ?lager_log(notice, Format, Args, safe)). -define(lager_notice(Metadata, Format, Args), ?lager_log(notice, Metadata, Format, Args, safe)). -define(lager_warning(Format, Args), ?lager_log(warning, Format, Args, safe)). -define(lager_warning(Metadata, Format, Args), ?lager_log(warning, Metadata, Format, Args, safe)). -define(lager_error(Format, Args), ?lager_log(error, Format, Args, safe)). -define(lager_error(Metadata, Format, Args), ?lager_log(error, Metadata, Format, Args, safe)). -define(lager_critical(Format, Args), ?lager_log(critical, Format, Args, safe)). -define(lager_critical(Metadata, Format, Args), ?lager_log(critical, Metadata, Format, Args, safe)). -define(lager_alert(Format, Args), ?lager_log(alert, Format, Args, safe)). -define(lager_alert(Metadata, Format, Args), ?lager_log(alert, Metadata, Format, Args, safe)). -define(lager_emergency(Format, Args), ?lager_log(emergency, Format, Args, safe)). -define(lager_emergency(Metadata, Format, Args), ?lager_log(emergency, Metadata, Format, Args, safe)). -define(lager_none(Format, Args), ?lager_log(none, Format, Args, safe)). -define(lager_none(Metadata, Format, Args), ?lager_log(none, Metadata, Format, Args, safe)). -define(LEVELS, [debug, info, notice, warning, error, critical, alert, emergency, none]). %% Use of these "functions" means that the argument list will not be %% truncated for safety -define(LEVELS_UNSAFE, [{debug_unsafe, debug}, {info_unsafe, info}, {notice_unsafe, notice}, {warning_unsafe, warning}, {error_unsafe, error}, {critical_unsafe, critical}, {alert_unsafe, alert}, {emergency_unsafe, emergency}]). -define(DEBUG, 128). -define(INFO, 64). -define(NOTICE, 32). -define(WARNING, 16). -define(ERROR, 8). -define(CRITICAL, 4). -define(ALERT, 2). -define(EMERGENCY, 1). -define(LOG_NONE, 0). -define(LEVEL2NUM(Level), case Level of debug -> ?DEBUG; info -> ?INFO; notice -> ?NOTICE; warning -> ?WARNING; error -> ?ERROR; critical -> ?CRITICAL; alert -> ?ALERT; emergency -> ?EMERGENCY end). -define(NUM2LEVEL(Num), case Num of ?DEBUG -> debug; ?INFO -> info; ?NOTICE -> notice; ?WARNING -> warning; ?ERROR -> error; ?CRITICAL -> critical; ?ALERT -> alert; ?EMERGENCY -> emergency end). -define(SHOULD_LOG(Sink, Level), (lager_util:level_to_num(Level) band element(1, lager_config:get({Sink, loglevel}, {?LOG_NONE, []}))) /= 0). -define(SHOULD_LOG(Level), (lager_util:level_to_num(Level) band element(1, lager_config:get(loglevel, {?LOG_NONE, []}))) /= 0). -define(NOTIFY(Level, Pid, Format, Args), gen_event:notify(lager_event, {log, lager_msg:new(io_lib:format(Format, Args), Level, [{pid,Pid},{line,?LINE},{file,?FILE},{module,?MODULE}], [])} )). %% FOR INTERNAL USE ONLY %% internal non-blocking logging call %% there's some special handing for when we try to log (usually errors) while %% lager is still starting. -ifdef(TEST). -define(INT_LOG(Level, Format, Args), case ?SHOULD_LOG(Level) of true -> ?NOTIFY(Level, self(), Format, Args); _ -> ok end). -else. -define(INT_LOG(Level, Format, Args), Self = self(), %% do this in a spawn so we don't cause a deadlock calling gen_event:which_handlers %% from a gen_event handler spawn(fun() -> case catch(gen_event:which_handlers(lager_event)) of X when X == []; X == {'EXIT', noproc}; X == [lager_backend_throttle] -> %% there's no handlers yet or lager isn't running, try again %% in half a second. timer:sleep(500), ?NOTIFY(Level, Self, Format, Args); _ -> case ?SHOULD_LOG(Level) of true -> ?NOTIFY(Level, Self, Format, Args); _ -> ok end end end)). -endif. -record(lager_shaper, { id :: any(), %% how many messages per second we try to deliver hwm = undefined :: 'undefined' | pos_integer(), %% how many messages we've received this second mps = 0 :: non_neg_integer(), %% the current second lasttime = os:timestamp() :: erlang:timestamp(), %% count of dropped messages this second dropped = 0 :: non_neg_integer(), %% If true, flush notify messages from msg queue at overload flush_queue = true :: boolean(), flush_threshold = 0 :: integer(), %% timer timer = make_ref() :: reference(), %% optional filter fun to avoid counting suppressed messages against HWM totals filter = fun(_) -> false end :: fun() }). -type lager_shaper() :: #lager_shaper{}. lager-3.8.0/tools.mk0000644000232200023220000001315313523436621014663 0ustar debalancedebalance# ------------------------------------------------------------------- # # Copyright (c) 2014 Basho Technologies, Inc. # # This file is provided to you under the Apache License, # Version 2.0 (the "License"); you may not use this file # except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # ------------------------------------------------------------------- # ------------------------------------------------------------------- # NOTE: This file is is from https://github.com/basho/tools.mk. # It should not be edited in a project. It should simply be updated # wholesale when a new version of tools.mk is released. # ------------------------------------------------------------------- REBAR ?= ./rebar REVISION ?= $(shell git rev-parse --short HEAD) PROJECT ?= $(shell basename `find src -name "*.app.src"` .app.src) .PHONY: compile-no-deps test docs xref dialyzer-run dialyzer-quick dialyzer \ cleanplt upload-docs compile-no-deps: ${REBAR} compile skip_deps=true test: compile ${REBAR} eunit skip_deps=true upload-docs: docs @if [ -z "${BUCKET}" -o -z "${PROJECT}" -o -z "${REVISION}" ]; then \ echo "Set BUCKET, PROJECT, and REVISION env vars to upload docs"; \ exit 1; fi @cd doc; s3cmd put -P * "s3://${BUCKET}/${PROJECT}/${REVISION}/" > /dev/null @echo "Docs built at: http://${BUCKET}.s3-website-us-east-1.amazonaws.com/${PROJECT}/${REVISION}" docs: ${REBAR} doc skip_deps=true xref: compile ${REBAR} xref skip_deps=true PLT ?= $(HOME)/.combo_dialyzer_plt LOCAL_PLT = .local_dialyzer_plt DIALYZER_FLAGS ?= -Wunmatched_returns ${PLT}: compile @if [ -f $(PLT) ]; then \ dialyzer --check_plt --plt $(PLT) --apps $(DIALYZER_APPS) && \ dialyzer --add_to_plt --plt $(PLT) --output_plt $(PLT) --apps $(DIALYZER_APPS) ; test $$? -ne 1; \ else \ dialyzer --build_plt --output_plt $(PLT) --apps $(DIALYZER_APPS); test $$? -ne 1; \ fi ${LOCAL_PLT}: compile @if [ -d deps ]; then \ if [ -f $(LOCAL_PLT) ]; then \ dialyzer --check_plt --plt $(LOCAL_PLT) deps/*/ebin && \ dialyzer --add_to_plt --plt $(LOCAL_PLT) --output_plt $(LOCAL_PLT) deps/*/ebin ; test $$? -ne 1; \ else \ dialyzer --build_plt --output_plt $(LOCAL_PLT) deps/*/ebin ; test $$? -ne 1; \ fi \ fi dialyzer-run: @echo "==> $(shell basename $(shell pwd)) (dialyzer)" # The bulk of the code below deals with the dialyzer.ignore-warnings file # which contains strings to ignore if output by dialyzer. # Typically the strings include line numbers. Using them exactly is hard # to maintain as the code changes. This approach instead ignores the line # numbers, but takes into account the number of times a string is listed # for a given file. So if one string is listed once, for example, and it # appears twice in the warnings, the user is alerted. It is possible but # unlikely that this approach could mask a warning if one ignored warning # is removed and two warnings of the same kind appear in the file, for # example. But it is a trade-off that seems worth it. # Details of the cryptic commands: # - Remove line numbers from dialyzer.ignore-warnings # - Pre-pend duplicate count to each warning with sort | uniq -c # - Remove annoying white space around duplicate count # - Save in dialyer.ignore-warnings.tmp # - Do the same to dialyzer_warnings # - Remove matches from dialyzer.ignore-warnings.tmp from output # - Remove duplicate count # - Escape regex special chars to use lines as regex patterns # - Add pattern to match any line number (file.erl:\d+:) # - Anchor to match the entire line (^entire line$) # - Save in dialyzer_unhandled_warnings # - Output matches for those patterns found in the original warnings @if [ -f $(LOCAL_PLT) ]; then \ PLTS="$(PLT) $(LOCAL_PLT)"; \ else \ PLTS=$(PLT); \ fi; \ if [ -f dialyzer.ignore-warnings ]; then \ if [ $$(grep -cvE '[^[:space:]]' dialyzer.ignore-warnings) -ne 0 ]; then \ echo "ERROR: dialyzer.ignore-warnings contains a blank/empty line, this will match all messages!"; \ exit 1; \ fi; \ dialyzer $(DIALYZER_FLAGS) --plts $${PLTS} -c ebin > dialyzer_warnings ; \ cat dialyzer.ignore-warnings \ | sed -E 's/^([^:]+:)[^:]+:/\1/' \ | sort \ | uniq -c \ | sed -E '/.*\.erl: /!s/^[[:space:]]*[0-9]+[[:space:]]*//' \ > dialyzer.ignore-warnings.tmp ; \ egrep -v "^[[:space:]]*(done|Checking|Proceeding|Compiling)" dialyzer_warnings \ | sed -E 's/^([^:]+:)[^:]+:/\1/' \ | sort \ | uniq -c \ | sed -E '/.*\.erl: /!s/^[[:space:]]*[0-9]+[[:space:]]*//' \ | grep -F -f dialyzer.ignore-warnings.tmp -v \ | sed -E 's/^[[:space:]]*[0-9]+[[:space:]]*//' \ | sed -E 's/([]\^:+?|()*.$${}\[])/\\\1/g' \ | sed -E 's/(\\\.erl\\\:)/\1\\d+:/g' \ | sed -E 's/^(.*)$$/^\1$$/g' \ > dialyzer_unhandled_warnings ; \ rm dialyzer.ignore-warnings.tmp; \ if [ $$(cat dialyzer_unhandled_warnings | wc -l) -gt 0 ]; then \ egrep -f dialyzer_unhandled_warnings dialyzer_warnings ; \ found_warnings=1; \ fi; \ [ "$$found_warnings" != 1 ] ; \ else \ dialyzer $(DIALYZER_FLAGS) --plts $${PLTS} -c ebin; \ fi dialyzer-quick: compile-no-deps dialyzer-run dialyzer: ${PLT} ${LOCAL_PLT} dialyzer-run cleanplt: @echo @echo "Are you sure? It takes several minutes to re-build." @echo Deleting $(PLT) and $(LOCAL_PLT) in 5 seconds. @echo sleep 5 rm $(PLT) rm $(LOCAL_PLT) lager-3.8.0/Makefile0000644000232200023220000000054513523436621014633 0ustar debalancedebalance.PHONY: all compile deps clean distclean test check_plt build_plt dialyzer \ cleanplt all: deps compile compile: deps ./rebar compile deps: test -d deps || ./rebar get-deps clean: ./rebar clean distclean: clean ./rebar delete-deps DIALYZER_APPS = kernel stdlib erts sasl eunit syntax_tools compiler crypto \ common_test include tools.mk lager-3.8.0/appveyor.yml0000644000232200023220000000017313523436621015560 0ustar debalancedebalancebuild: off test_script: - escript ./rebar get-deps - escript ./rebar compile - escript ./rebar eunit deploy: false lager-3.8.0/rebar.config0000644000232200023220000000354513523436621015460 0ustar debalancedebalance%% -*- erlang -*- %% ------------------------------------------------------------------- %% %% Copyright (c) 2011-2015 Basho Technologies, Inc. %% %% This file is provided to you under the Apache License, %% Version 2.0 (the "License"); you may not use this file %% except in compliance with the License. You may obtain %% a copy of the License at %% %% http://www.apache.org/licenses/LICENSE-2.0 %% %% Unless required by applicable law or agreed to in writing, %% software distributed under the License is distributed on an %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY %% KIND, either express or implied. See the License for the %% specific language governing permissions and limitations %% under the License. %% %% ------------------------------------------------------------------- {erl_opts, [ {lager_extra_sinks, ['__lager_test_sink']}, {platform_define, "^(19|20|21|22)", test_statem}, {platform_define, "^18", 'FUNCTION_NAME', unavailable}, {platform_define, "^18", 'FUNCTION_ARITY', 0}, debug_info, report, verbose, warn_deprecated_function, warn_deprecated_type, warn_export_all, warn_export_vars, warn_obsolete_guard, warn_untyped_record, warn_unused_import % do NOT include warnings_as_errors, as rebar includes these options % when compiling for eunit, and at least one test module has code that % is deliberatly broken and will generate an un-maskable warning ]}. {erl_first_files, ["src/lager_util.erl"]}. {eunit_opts, [verbose]}. {eunit_compile_opts, [ export_all, nowarn_untyped_record, nowarn_export_all ]}. {deps, [ {goldrush, "0.1.9"} ]}. {xref_checks, []}. {xref_queries, [{"(XC - UC) || (XU - X - B - lager_default_tracer : Mod - erlang:\"(is_map|map_size)\"/1 - maps:to_list/1)", []}]}. {cover_enabled, true}. {edoc_opts, [{stylesheet_file, "./priv/edoc.css"}]}. lager-3.8.0/TODO0000644000232200023220000000012213523436621013652 0ustar debalancedebalanceTime based log rotation Syslog backends (local & remote) debug_module & debug_pid lager-3.8.0/dialyzer.ignore-warnings0000644000232200023220000000034213523436621020044 0ustar debalancedebalancelager_trunc_io.erl:283: Call to missing or unexported function erlang:is_map/1 lager_trunc_io.erl:335: Call to missing or unexported function erlang:map_size/1 Unknown functions: lager_default_tracer:info/1 maps:to_list/1 lager-3.8.0/rebar.config.script0000644000232200023220000000056413523436621016761 0ustar debalancedebalancecase erlang:function_exported(rebar3, main, 1) of true -> % rebar3 CONFIG; false -> % rebar 2.x or older %% Rebuild deps, possibly including those that have been moved to %% profiles [{deps, [ {goldrush, ".*", {git, "https://github.com/DeadZen/goldrush.git", {tag, "0.1.9"}}} ]} | lists:keydelete(deps, 1, CONFIG)] end. lager-3.8.0/src/0000755000232200023220000000000013523436621013756 5ustar debalancedebalancelager-3.8.0/src/lager_format.erl0000644000232200023220000004513013523436621017127 0ustar debalancedebalance%% %% %CopyrightBegin% %% %% Copyright Ericsson AB 1996-2011-2012. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in %% compliance with the License. You should have received a copy of the %% Erlang Public License along with this software. If not, it can be %% retrieved online at http://www.erlang.org/. %% %% Software distributed under the License is distributed on an "AS IS" %% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See %% the License for the specific language governing rights and limitations %% under the License. %% %% %CopyrightEnd% %% -module(lager_format). %% fork of io_lib_format that uses trunc_io to protect against large terms -export([format/3, format/4]). -record(options, { chomp = false :: boolean() }). format(FmtStr, Args, MaxLen) -> format(FmtStr, Args, MaxLen, []). format([], [], _, _) -> ""; format(FmtStr, Args, MaxLen, Opts) when is_atom(FmtStr) -> format(atom_to_list(FmtStr), Args, MaxLen, Opts); format(FmtStr, Args, MaxLen, Opts) when is_binary(FmtStr) -> format(binary_to_list(FmtStr), Args, MaxLen, Opts); format(FmtStr, Args, MaxLen, Opts) when is_list(FmtStr) -> case io_lib:deep_char_list(FmtStr) of true -> Options = make_options(Opts, #options{}), Cs = collect(FmtStr, Args), {Cs2, MaxLen2} = build(Cs, [], MaxLen, Options), %% count how many terms remain {Count, StrLen} = lists:foldl( fun({_C, _As, _F, _Adj, _P, _Pad, _Enc}, {Terms, Chars}) -> {Terms + 1, Chars}; (_, {Terms, Chars}) -> {Terms, Chars + 1} end, {0, 0}, Cs2), build2(Cs2, Count, MaxLen2 - StrLen); false -> erlang:error(badarg) end; format(_FmtStr, _Args, _MaxLen, _Opts) -> erlang:error(badarg). collect([$~|Fmt0], Args0) -> {C,Fmt1,Args1} = collect_cseq(Fmt0, Args0), [C|collect(Fmt1, Args1)]; collect([C|Fmt], Args) -> [C|collect(Fmt, Args)]; collect([], []) -> []. collect_cseq(Fmt0, Args0) -> {F,Ad,Fmt1,Args1} = field_width(Fmt0, Args0), {P,Fmt2,Args2} = precision(Fmt1, Args1), {Pad,Fmt3,Args3} = pad_char(Fmt2, Args2), {Encoding,Fmt4,Args4} = encoding(Fmt3, Args3), {C,As,Fmt5,Args5} = collect_cc(Fmt4, Args4), {{C,As,F,Ad,P,Pad,Encoding},Fmt5,Args5}. encoding([$t|Fmt],Args) -> {unicode,Fmt,Args}; encoding(Fmt,Args) -> {latin1,Fmt,Args}. field_width([$-|Fmt0], Args0) -> {F,Fmt,Args} = field_value(Fmt0, Args0), field_width(-F, Fmt, Args); field_width(Fmt0, Args0) -> {F,Fmt,Args} = field_value(Fmt0, Args0), field_width(F, Fmt, Args). field_width(F, Fmt, Args) when F < 0 -> {-F,left,Fmt,Args}; field_width(F, Fmt, Args) when F >= 0 -> {F,right,Fmt,Args}. precision([$.|Fmt], Args) -> field_value(Fmt, Args); precision(Fmt, Args) -> {none,Fmt,Args}. field_value([$*|Fmt], [A|Args]) when is_integer(A) -> {A,Fmt,Args}; field_value([C|Fmt], Args) when is_integer(C), C >= $0, C =< $9 -> field_value([C|Fmt], Args, 0); field_value(Fmt, Args) -> {none,Fmt,Args}. field_value([C|Fmt], Args, F) when is_integer(C), C >= $0, C =< $9 -> field_value(Fmt, Args, 10*F + (C - $0)); field_value(Fmt, Args, F) -> %Default case {F,Fmt,Args}. pad_char([$.,$*|Fmt], [Pad|Args]) -> {Pad,Fmt,Args}; pad_char([$.,Pad|Fmt], Args) -> {Pad,Fmt,Args}; pad_char(Fmt, Args) -> {$\s,Fmt,Args}. %% collect_cc([FormatChar], [Argument]) -> %% {Control,[ControlArg],[FormatChar],[Arg]}. %% Here we collect the argments for each control character. %% Be explicit to cause failure early. collect_cc([$w|Fmt], [A|Args]) -> {$w,[A],Fmt,Args}; collect_cc([$p|Fmt], [A|Args]) -> {$p,[A],Fmt,Args}; collect_cc([$W|Fmt], [A,Depth|Args]) -> {$W,[A,Depth],Fmt,Args}; collect_cc([$P|Fmt], [A,Depth|Args]) -> {$P,[A,Depth],Fmt,Args}; collect_cc([$s|Fmt], [A|Args]) -> {$s,[A],Fmt,Args}; collect_cc([$e|Fmt], [A|Args]) -> {$e,[A],Fmt,Args}; collect_cc([$f|Fmt], [A|Args]) -> {$f,[A],Fmt,Args}; collect_cc([$g|Fmt], [A|Args]) -> {$g,[A],Fmt,Args}; collect_cc([$b|Fmt], [A|Args]) -> {$b,[A],Fmt,Args}; collect_cc([$B|Fmt], [A|Args]) -> {$B,[A],Fmt,Args}; collect_cc([$x|Fmt], [A,Prefix|Args]) -> {$x,[A,Prefix],Fmt,Args}; collect_cc([$X|Fmt], [A,Prefix|Args]) -> {$X,[A,Prefix],Fmt,Args}; collect_cc([$+|Fmt], [A|Args]) -> {$+,[A],Fmt,Args}; collect_cc([$#|Fmt], [A|Args]) -> {$#,[A],Fmt,Args}; collect_cc([$c|Fmt], [A|Args]) -> {$c,[A],Fmt,Args}; collect_cc([$~|Fmt], Args) when is_list(Args) -> {$~,[],Fmt,Args}; collect_cc([$n|Fmt], Args) when is_list(Args) -> {$n,[],Fmt,Args}; collect_cc([$i|Fmt], [A|Args]) -> {$i,[A],Fmt,Args}. %% build([Control], Pc, Indentation) -> [Char]. %% Interpret the control structures. Count the number of print %% remaining and only calculate indentation when necessary. Must also %% be smart when calculating indentation for characters in format. build([{$n, _, _, _, _, _, _}], Acc, MaxLen, #options{chomp=true}) -> %% trailing ~n, ignore {lists:reverse(Acc), MaxLen}; build([{C,As,F,Ad,P,Pad,Enc}|Cs], Acc, MaxLen, O) -> {S, MaxLen2} = control(C, As, F, Ad, P, Pad, Enc, MaxLen), build(Cs, [S|Acc], MaxLen2, O); build([$\n], Acc, MaxLen, #options{chomp=true}) -> %% trailing \n, ignore {lists:reverse(Acc), MaxLen}; build([$\n|Cs], Acc, MaxLen, O) -> build(Cs, [$\n|Acc], MaxLen - 1, O); build([$\t|Cs], Acc, MaxLen, O) -> build(Cs, [$\t|Acc], MaxLen - 1, O); build([C|Cs], Acc, MaxLen, O) -> build(Cs, [C|Acc], MaxLen - 1, O); build([], Acc, MaxLen, _O) -> {lists:reverse(Acc), MaxLen}. build2([{C,As,F,Ad,P,Pad,Enc}|Cs], Count, MaxLen) -> {S, Len} = control2(C, As, F, Ad, P, Pad, Enc, MaxLen div Count), [S|build2(Cs, Count - 1, MaxLen - Len)]; build2([C|Cs], Count, MaxLen) -> [C|build2(Cs, Count, MaxLen)]; build2([], _, _) -> []. %% control(FormatChar, [Argument], FieldWidth, Adjust, Precision, PadChar, %% Indentation) -> [Char] %% This is the main dispatch function for the various formatting commands. %% Field widths and precisions have already been calculated. control($e, [A], F, Adj, P, Pad, _Enc, L) when is_float(A) -> Res = fwrite_e(A, F, Adj, P, Pad), {Res, L - lists:flatlength(Res)}; control($f, [A], F, Adj, P, Pad, _Enc, L) when is_float(A) -> Res = fwrite_f(A, F, Adj, P, Pad), {Res, L - lists:flatlength(Res)}; control($g, [A], F, Adj, P, Pad, _Enc, L) when is_float(A) -> Res = fwrite_g(A, F, Adj, P, Pad), {Res, L - lists:flatlength(Res)}; control($b, [A], F, Adj, P, Pad, _Enc, L) when is_integer(A) -> Res = unprefixed_integer(A, F, Adj, base(P), Pad, true), {Res, L - lists:flatlength(Res)}; control($B, [A], F, Adj, P, Pad, _Enc, L) when is_integer(A) -> Res = unprefixed_integer(A, F, Adj, base(P), Pad, false), {Res, L - lists:flatlength(Res)}; control($x, [A,Prefix], F, Adj, P, Pad, _Enc, L) when is_integer(A), is_atom(Prefix) -> Res = prefixed_integer(A, F, Adj, base(P), Pad, atom_to_list(Prefix), true), {Res, L - lists:flatlength(Res)}; control($x, [A,Prefix], F, Adj, P, Pad, _Enc, L) when is_integer(A) -> true = io_lib:deep_char_list(Prefix), %Check if Prefix a character list Res = prefixed_integer(A, F, Adj, base(P), Pad, Prefix, true), {Res, L - lists:flatlength(Res)}; control($X, [A,Prefix], F, Adj, P, Pad, _Enc, L) when is_integer(A), is_atom(Prefix) -> Res = prefixed_integer(A, F, Adj, base(P), Pad, atom_to_list(Prefix), false), {Res, L - lists:flatlength(Res)}; control($X, [A,Prefix], F, Adj, P, Pad, _Enc, L) when is_integer(A) -> true = io_lib:deep_char_list(Prefix), %Check if Prefix a character list Res = prefixed_integer(A, F, Adj, base(P), Pad, Prefix, false), {Res, L - lists:flatlength(Res)}; control($+, [A], F, Adj, P, Pad, _Enc, L) when is_integer(A) -> Base = base(P), Prefix = [integer_to_list(Base), $#], Res = prefixed_integer(A, F, Adj, Base, Pad, Prefix, true), {Res, L - lists:flatlength(Res)}; control($#, [A], F, Adj, P, Pad, _Enc, L) when is_integer(A) -> Base = base(P), Prefix = [integer_to_list(Base), $#], Res = prefixed_integer(A, F, Adj, Base, Pad, Prefix, false), {Res, L - lists:flatlength(Res)}; control($c, [A], F, Adj, P, Pad, unicode, L) when is_integer(A) -> Res = char(A, F, Adj, P, Pad), {Res, L - lists:flatlength(Res)}; control($c, [A], F, Adj, P, Pad, _Enc, L) when is_integer(A) -> Res = char(A band 255, F, Adj, P, Pad), {Res, L - lists:flatlength(Res)}; control($~, [], F, Adj, P, Pad, _Enc, L) -> Res = char($~, F, Adj, P, Pad), {Res, L - lists:flatlength(Res)}; control($n, [], F, Adj, P, Pad, _Enc, L) -> Res = newline(F, Adj, P, Pad), {Res, L - lists:flatlength(Res)}; control($i, [_A], _F, _Adj, _P, _Pad, _Enc, L) -> {[], L}; control($s, [A], F, Adj, P, Pad, _Enc, L) when is_atom(A) -> Res = string(atom_to_list(A), F, Adj, P, Pad), {Res, L - lists:flatlength(Res)}; control(C, A, F, Adj, P, Pad, Enc, L) -> %% save this for later - these are all the 'large' terms {{C, A, F, Adj, P, Pad, Enc}, L}. control2($w, [A], F, Adj, P, Pad, _Enc, L) -> Term = lager_trunc_io:fprint(A, L, [{lists_as_strings, false}]), Res = term(Term, F, Adj, P, Pad), {Res, lists:flatlength(Res)}; control2($p, [A], _F, _Adj, _P, _Pad, _Enc, L) -> Term = lager_trunc_io:fprint(A, L, [{lists_as_strings, true}]), {Term, lists:flatlength(Term)}; control2($W, [A,Depth], F, Adj, P, Pad, _Enc, L) when is_integer(Depth) -> Term = lager_trunc_io:fprint(A, L, [{depth, Depth}, {lists_as_strings, false}]), Res = term(Term, F, Adj, P, Pad), {Res, lists:flatlength(Res)}; control2($P, [A,Depth], _F, _Adj, _P, _Pad, _Enc, L) when is_integer(Depth) -> Term = lager_trunc_io:fprint(A, L, [{depth, Depth}, {lists_as_strings, true}]), {Term, lists:flatlength(Term)}; control2($s, [L0], F, Adj, P, Pad, latin1, L) -> List = lager_trunc_io:fprint(iolist_to_chars(L0), L, [{force_strings, true}]), Res = string(List, F, Adj, P, Pad), {Res, lists:flatlength(Res)}; control2($s, [L0], F, Adj, P, Pad, unicode, L) -> List = lager_trunc_io:fprint(cdata_to_chars(L0), L, [{force_strings, true}]), Res = uniconv(string(List, F, Adj, P, Pad)), {Res, lists:flatlength(Res)}. iolist_to_chars([C|Cs]) when is_integer(C), C >= $\000, C =< $\377 -> [C | iolist_to_chars(Cs)]; iolist_to_chars([I|Cs]) -> [iolist_to_chars(I) | iolist_to_chars(Cs)]; iolist_to_chars([]) -> []; iolist_to_chars(B) when is_binary(B) -> binary_to_list(B). cdata_to_chars([C|Cs]) when is_integer(C), C >= $\000 -> [C | cdata_to_chars(Cs)]; cdata_to_chars([I|Cs]) -> [cdata_to_chars(I) | cdata_to_chars(Cs)]; cdata_to_chars([]) -> []; cdata_to_chars(B) when is_binary(B) -> case catch unicode:characters_to_list(B) of L when is_list(L) -> L; _ -> binary_to_list(B) end. make_options([], Options) -> Options; make_options([{chomp, Bool}|T], Options) when is_boolean(Bool) -> make_options(T, Options#options{chomp=Bool}). -ifdef(UNICODE_AS_BINARIES). uniconv(C) -> unicode:characters_to_binary(C,unicode). -else. uniconv(C) -> C. -endif. %% Default integer base base(none) -> 10; base(B) when is_integer(B) -> B. %% term(TermList, Field, Adjust, Precision, PadChar) %% Output the characters in a term. %% Adjust the characters within the field if length less than Max padding %% with PadChar. term(T, none, _Adj, none, _Pad) -> T; term(T, none, Adj, P, Pad) -> term(T, P, Adj, P, Pad); term(T, F, Adj, P0, Pad) -> L = lists:flatlength(T), P = case P0 of none -> erlang:min(L, F); _ -> P0 end, if L > P -> adjust(chars($*, P), chars(Pad, F-P), Adj); F >= P -> adjust(T, chars(Pad, F-L), Adj) end. %% fwrite_e(Float, Field, Adjust, Precision, PadChar) fwrite_e(Fl, none, Adj, none, Pad) -> %Default values fwrite_e(Fl, none, Adj, 6, Pad); fwrite_e(Fl, none, _Adj, P, _Pad) when P >= 2 -> float_e(Fl, float_data(Fl), P); fwrite_e(Fl, F, Adj, none, Pad) -> fwrite_e(Fl, F, Adj, 6, Pad); fwrite_e(Fl, F, Adj, P, Pad) when P >= 2 -> term(float_e(Fl, float_data(Fl), P), F, Adj, F, Pad). float_e(Fl, Fd, P) when Fl < 0.0 -> %Negative numbers [$-|float_e(-Fl, Fd, P)]; float_e(_Fl, {Ds,E}, P) -> case float_man(Ds, 1, P-1) of {[$0|Fs],true} -> [[$1|Fs]|float_exp(E)]; {Fs,false} -> [Fs|float_exp(E-1)] end. %% float_man([Digit], Icount, Dcount) -> {[Chars],CarryFlag}. %% Generate the characters in the mantissa from the digits with Icount %% characters before the '.' and Dcount decimals. Handle carry and let %% caller decide what to do at top. float_man(Ds, 0, Dc) -> {Cs,C} = float_man(Ds, Dc), {[$.|Cs],C}; float_man([D|Ds], I, Dc) -> case float_man(Ds, I-1, Dc) of {Cs,true} when D =:= $9 -> {[$0|Cs],true}; {Cs,true} -> {[D+1|Cs],false}; {Cs,false} -> {[D|Cs],false} end; float_man([], I, Dc) -> %Pad with 0's {string:chars($0, I, [$.|string:chars($0, Dc)]),false}. float_man([D|_], 0) when D >= $5 -> {[],true}; float_man([_|_], 0) -> {[],false}; float_man([D|Ds], Dc) -> case float_man(Ds, Dc-1) of {Cs,true} when D =:= $9 -> {[$0|Cs],true}; {Cs,true} -> {[D+1|Cs],false}; {Cs,false} -> {[D|Cs],false} end; float_man([], Dc) -> {string:chars($0, Dc),false}. %Pad with 0's %% float_exp(Exponent) -> [Char]. %% Generate the exponent of a floating point number. Always include sign. float_exp(E) when E >= 0 -> [$e,$+|integer_to_list(E)]; float_exp(E) -> [$e|integer_to_list(E)]. %% fwrite_f(FloatData, Field, Adjust, Precision, PadChar) fwrite_f(Fl, none, Adj, none, Pad) -> %Default values fwrite_f(Fl, none, Adj, 6, Pad); fwrite_f(Fl, none, _Adj, P, _Pad) when P >= 1 -> float_f(Fl, float_data(Fl), P); fwrite_f(Fl, F, Adj, none, Pad) -> fwrite_f(Fl, F, Adj, 6, Pad); fwrite_f(Fl, F, Adj, P, Pad) when P >= 1 -> term(float_f(Fl, float_data(Fl), P), F, Adj, F, Pad). float_f(Fl, Fd, P) when Fl < 0.0 -> [$-|float_f(-Fl, Fd, P)]; float_f(Fl, {Ds,E}, P) when E =< 0 -> float_f(Fl, {string:chars($0, -E+1, Ds),1}, P); %Prepend enough 0's float_f(_Fl, {Ds,E}, P) -> case float_man(Ds, E, P) of {Fs,true} -> "1" ++ Fs; %Handle carry {Fs,false} -> Fs end. %% float_data([FloatChar]) -> {[Digit],Exponent} float_data(Fl) -> float_data(float_to_list(Fl), []). float_data([$e|E], Ds) -> {lists:reverse(Ds),list_to_integer(E)+1}; float_data([D|Cs], Ds) when D >= $0, D =< $9 -> float_data(Cs, [D|Ds]); float_data([_|Cs], Ds) -> float_data(Cs, Ds). %% fwrite_g(Float, Field, Adjust, Precision, PadChar) %% Use the f form if Float is >= 0.1 and < 1.0e4, %% and the prints correctly in the f form, else the e form. %% Precision always means the # of significant digits. fwrite_g(Fl, F, Adj, none, Pad) -> fwrite_g(Fl, F, Adj, 6, Pad); fwrite_g(Fl, F, Adj, P, Pad) when P >= 1 -> A = abs(Fl), E = if A < 1.0e-1 -> -2; A < 1.0e0 -> -1; A < 1.0e1 -> 0; A < 1.0e2 -> 1; A < 1.0e3 -> 2; A < 1.0e4 -> 3; true -> fwrite_f end, if P =< 1, E =:= -1; P-1 > E, E >= -1 -> fwrite_f(Fl, F, Adj, P-1-E, Pad); P =< 1 -> fwrite_e(Fl, F, Adj, 2, Pad); true -> fwrite_e(Fl, F, Adj, P, Pad) end. %% string(String, Field, Adjust, Precision, PadChar) string(S, none, _Adj, none, _Pad) -> S; string(S, F, Adj, none, Pad) -> string_field(S, F, Adj, lists:flatlength(S), Pad); string(S, none, _Adj, P, Pad) -> string_field(S, P, left, lists:flatlength(S), Pad); string(S, F, Adj, P, Pad) when F >= P -> N = lists:flatlength(S), if F > P -> if N > P -> adjust(flat_trunc(S, P), chars(Pad, F-P), Adj); N < P -> adjust([S|chars(Pad, P-N)], chars(Pad, F-P), Adj); true -> % N == P adjust(S, chars(Pad, F-P), Adj) end; true -> % F == P string_field(S, F, Adj, N, Pad) end. string_field(S, F, _Adj, N, _Pad) when N > F -> flat_trunc(S, F); string_field(S, F, Adj, N, Pad) when N < F -> adjust(S, chars(Pad, F-N), Adj); string_field(S, _, _, _, _) -> % N == F S. %% unprefixed_integer(Int, Field, Adjust, Base, PadChar, Lowercase) %% -> [Char]. unprefixed_integer(Int, F, Adj, Base, Pad, Lowercase) when Base >= 2, Base =< 1+$Z-$A+10 -> if Int < 0 -> S = cond_lowercase(erlang:integer_to_list(-Int, Base), Lowercase), term([$-|S], F, Adj, none, Pad); true -> S = cond_lowercase(erlang:integer_to_list(Int, Base), Lowercase), term(S, F, Adj, none, Pad) end. %% prefixed_integer(Int, Field, Adjust, Base, PadChar, Prefix, Lowercase) %% -> [Char]. prefixed_integer(Int, F, Adj, Base, Pad, Prefix, Lowercase) when Base >= 2, Base =< 1+$Z-$A+10 -> if Int < 0 -> S = cond_lowercase(erlang:integer_to_list(-Int, Base), Lowercase), term([$-,Prefix|S], F, Adj, none, Pad); true -> S = cond_lowercase(erlang:integer_to_list(Int, Base), Lowercase), term([Prefix|S], F, Adj, none, Pad) end. %% char(Char, Field, Adjust, Precision, PadChar) -> [Char]. char(C, none, _Adj, none, _Pad) -> [C]; char(C, F, _Adj, none, _Pad) -> chars(C, F); char(C, none, _Adj, P, _Pad) -> chars(C, P); char(C, F, Adj, P, Pad) when F >= P -> adjust(chars(C, P), chars(Pad, F - P), Adj). %% newline(Field, Adjust, Precision, PadChar) -> [Char]. newline(none, _Adj, _P, _Pad) -> "\n"; newline(F, right, _P, _Pad) -> chars($\n, F). %% %% Utilities %% adjust(Data, [], _) -> Data; adjust(Data, Pad, left) -> [Data|Pad]; adjust(Data, Pad, right) -> [Pad|Data]. %% Flatten and truncate a deep list to at most N elements. flat_trunc(List, N) when is_integer(N), N >= 0 -> flat_trunc(List, N, []). flat_trunc(L, 0, R) when is_list(L) -> lists:reverse(R); flat_trunc([H|T], N, R) -> flat_trunc(T, N-1, [H|R]); flat_trunc([], _, R) -> lists:reverse(R). %% A deep version of string:chars/2,3 chars(_C, 0) -> []; chars(C, 1) -> [C]; chars(C, 2) -> [C,C]; chars(C, 3) -> [C,C,C]; chars(C, N) when is_integer(N), (N band 1) =:= 0 -> S = chars(C, N bsr 1), [S|S]; chars(C, N) when is_integer(N) -> S = chars(C, N bsr 1), [C,S|S]. %chars(C, N, Tail) -> % [chars(C, N)|Tail]. %% Lowercase conversion cond_lowercase(String, true) -> lowercase(String); cond_lowercase(String,false) -> String. lowercase([H|T]) when is_integer(H), H >= $A, H =< $Z -> [(H-$A+$a)|lowercase(T)]; lowercase([H|T]) -> [H|lowercase(T)]; lowercase([]) -> []. lager-3.8.0/src/lager_util.erl0000644000232200023220000011604613523436621016621 0ustar debalancedebalance%% ------------------------------------------------------------------- %% %% Copyright (c) 2011-2017 Basho Technologies, Inc. %% %% This file is provided to you under the Apache License, %% Version 2.0 (the "License"); you may not use this file %% except in compliance with the License. You may obtain %% a copy of the License at %% %% http://www.apache.org/licenses/LICENSE-2.0 %% %% Unless required by applicable law or agreed to in writing, %% software distributed under the License is distributed on an %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY %% KIND, either express or implied. See the License for the %% specific language governing permissions and limitations %% under the License. %% %% ------------------------------------------------------------------- -module(lager_util). -export([ levels/0, level_to_num/1, level_to_chr/1, num_to_level/1, config_to_mask/1, config_to_levels/1, mask_to_levels/1, format_time/0, format_time/1, localtime_ms/0, localtime_ms/1, maybe_utc/1, parse_rotation_date_spec/1, calculate_next_rotation/1, validate_trace/1, check_traces/4, is_loggable/3, trace_filter/1, trace_filter/2, expand_path/1, find_file/2, check_hwm/1, check_hwm/2, make_internal_sink_name/1, otp_version/0, maybe_flush/2, has_file_changed/3 ]). -ifdef(TEST). -export([create_test_dir/0, get_test_dir/0, delete_test_dir/0, set_dir_permissions/2, safe_application_load/1, safe_write_file/2]). -include_lib("eunit/include/eunit.hrl"). -endif. -include("lager.hrl"). -include_lib("kernel/include/file.hrl"). levels() -> [debug, info, notice, warning, error, critical, alert, emergency, none]. level_to_num(debug) -> ?DEBUG; level_to_num(info) -> ?INFO; level_to_num(notice) -> ?NOTICE; level_to_num(warning) -> ?WARNING; level_to_num(error) -> ?ERROR; level_to_num(critical) -> ?CRITICAL; level_to_num(alert) -> ?ALERT; level_to_num(emergency) -> ?EMERGENCY; level_to_num(none) -> ?LOG_NONE. level_to_chr(debug) -> $D; level_to_chr(info) -> $I; level_to_chr(notice) -> $N; level_to_chr(warning) -> $W; level_to_chr(error) -> $E; level_to_chr(critical) -> $C; level_to_chr(alert) -> $A; level_to_chr(emergency) -> $M; level_to_chr(none) -> $ . num_to_level(?DEBUG) -> debug; num_to_level(?INFO) -> info; num_to_level(?NOTICE) -> notice; num_to_level(?WARNING) -> warning; num_to_level(?ERROR) -> error; num_to_level(?CRITICAL) -> critical; num_to_level(?ALERT) -> alert; num_to_level(?EMERGENCY) -> emergency; num_to_level(?LOG_NONE) -> none. -spec config_to_mask(atom()|string()) -> {'mask', integer()}. config_to_mask(Conf) -> Levels = config_to_levels(Conf), {mask, lists:foldl(fun(Level, Acc) -> level_to_num(Level) bor Acc end, 0, Levels)}. -spec mask_to_levels(non_neg_integer()) -> [lager:log_level()]. mask_to_levels(Mask) -> mask_to_levels(Mask, levels(), []). mask_to_levels(_Mask, [], Acc) -> lists:reverse(Acc); mask_to_levels(Mask, [Level|Levels], Acc) -> NewAcc = case (level_to_num(Level) band Mask) /= 0 of true -> [Level|Acc]; false -> Acc end, mask_to_levels(Mask, Levels, NewAcc). -spec config_to_levels(atom()|string()) -> [lager:log_level()]. config_to_levels(Conf) when is_atom(Conf) -> config_to_levels(atom_to_list(Conf)); config_to_levels([$! | Rest]) -> levels() -- config_to_levels(Rest); config_to_levels([$=, $< | Rest]) -> [_|Levels] = config_to_levels_int(Rest), lists:filter(fun(E) -> not lists:member(E, Levels) end, levels()); config_to_levels([$<, $= | Rest]) -> [_|Levels] = config_to_levels_int(Rest), lists:filter(fun(E) -> not lists:member(E, Levels) end, levels()); config_to_levels([$>, $= | Rest]) -> config_to_levels_int(Rest); config_to_levels([$=, $> | Rest]) -> config_to_levels_int(Rest); config_to_levels([$= | Rest]) -> [level_to_atom(Rest)]; config_to_levels([$< | Rest]) -> Levels = config_to_levels_int(Rest), lists:filter(fun(E) -> not lists:member(E, Levels) end, levels()); config_to_levels([$> | Rest]) -> [_|Levels] = config_to_levels_int(Rest), lists:filter(fun(E) -> lists:member(E, Levels) end, levels()); config_to_levels(Conf) -> config_to_levels_int(Conf). %% internal function to break the recursion loop config_to_levels_int(Conf) -> Level = level_to_atom(Conf), lists:dropwhile(fun(E) -> E /= Level end, levels()). level_to_atom(String) -> Levels = levels(), try list_to_existing_atom(String) of Atom -> case lists:member(Atom, Levels) of true -> Atom; false -> erlang:error(badarg) end catch _:_ -> erlang:error(badarg) end. %% returns localtime with milliseconds included localtime_ms() -> Now = os:timestamp(), localtime_ms(Now). localtime_ms(Now) -> {_, _, Micro} = Now, {Date, {Hours, Minutes, Seconds}} = calendar:now_to_local_time(Now), {Date, {Hours, Minutes, Seconds, Micro div 1000 rem 1000}}. maybe_utc({Date, {H, M, S, Ms}}) -> case lager_stdlib:maybe_utc({Date, {H, M, S}}) of {utc, {Date1, {H1, M1, S1}}} -> {utc, {Date1, {H1, M1, S1, Ms}}}; {Date1, {H1, M1, S1}} -> {Date1, {H1, M1, S1, Ms}} end. format_time() -> format_time(maybe_utc(localtime_ms())). format_time({utc, {{Y, M, D}, {H, Mi, S, Ms}}}) -> {[integer_to_list(Y), $-, i2l(M), $-, i2l(D)], [i2l(H), $:, i2l(Mi), $:, i2l(S), $., i3l(Ms), $ , $U, $T, $C]}; format_time({{Y, M, D}, {H, Mi, S, Ms}}) -> {[integer_to_list(Y), $-, i2l(M), $-, i2l(D)], [i2l(H), $:, i2l(Mi), $:, i2l(S), $., i3l(Ms)]}; format_time({utc, {{Y, M, D}, {H, Mi, S}}}) -> {[integer_to_list(Y), $-, i2l(M), $-, i2l(D)], [i2l(H), $:, i2l(Mi), $:, i2l(S), $ , $U, $T, $C]}; format_time({{Y, M, D}, {H, Mi, S}}) -> {[integer_to_list(Y), $-, i2l(M), $-, i2l(D)], [i2l(H), $:, i2l(Mi), $:, i2l(S)]}. parse_rotation_hour_spec([], Res) -> {ok, Res}; parse_rotation_hour_spec([$H, M1, M2], Res) -> case list_to_integer([M1, M2]) of X when X >= 0, X =< 59 -> {ok, Res ++ [{minute, X}]}; _ -> {error, invalid_date_spec} end; parse_rotation_hour_spec([$H, M], Res) when M >= $0, M =< $9 -> {ok, Res ++ [{minute, M - 48}]}; parse_rotation_hour_spec(_,_) -> {error, invalid_date_spec}. %% Default to 00:00:00 rotation parse_rotation_day_spec([], Res) -> {ok, Res ++ [{hour ,0}]}; parse_rotation_day_spec([$D, D1, D2|T], Res) -> case list_to_integer([D1, D2]) of X when X >= 0, X =< 23 -> parse_rotation_hour_spec(T, Res ++ [{hour, X}]); _ -> {error, invalid_date_spec} end; parse_rotation_day_spec([$D, D|T], Res) when D >= $0, D =< $9 -> parse_rotation_hour_spec(T, Res ++ [{hour, D - 48 }]); parse_rotation_day_spec(X, Res) -> parse_rotation_hour_spec(X, Res). parse_rotation_date_spec([$$, $W, W|T]) when W >= $0, W =< $6 -> Week = W - 48, parse_rotation_day_spec(T, [{day, Week}]); parse_rotation_date_spec([$$, $M, L|T]) when L == $L; L == $l -> %% last day in month. parse_rotation_day_spec(T, [{date, last}]); parse_rotation_date_spec([$$, $M, M1, M2|[$D|_]=T]) -> case list_to_integer([M1, M2]) of X when X >= 1, X =< 31 -> parse_rotation_day_spec(T, [{date, X}]); _ -> {error, invalid_date_spec} end; parse_rotation_date_spec([$$, $M, M|[$D|_]=T]) -> parse_rotation_day_spec(T, [{date, M - 48}]); parse_rotation_date_spec([$$, $M, M1, M2]) -> case list_to_integer([M1, M2]) of X when X >= 1, X =< 31 -> {ok, [{date, X}, {hour, 0}]}; _ -> {error, invalid_date_spec} end; parse_rotation_date_spec([$$, $M, M]) -> {ok, [{date, M - 48}, {hour, 0}]}; parse_rotation_date_spec([$$|X]) when X /= [] -> parse_rotation_day_spec(X, []); parse_rotation_date_spec(_) -> {error, invalid_date_spec}. calculate_next_rotation(Spec) -> Now = calendar:local_time(), Later = calculate_next_rotation(Spec, Now), calendar:datetime_to_gregorian_seconds(Later) - calendar:datetime_to_gregorian_seconds(Now). calculate_next_rotation([], Now) -> Now; calculate_next_rotation([{minute, X}|T], {{_, _, _}, {Hour, Minute, _}} = Now) when Minute < X -> %% rotation is this hour NewNow = setelement(2, Now, {Hour, X, 0}), calculate_next_rotation(T, NewNow); calculate_next_rotation([{minute, X}|T], Now) -> %% rotation is next hour Seconds = calendar:datetime_to_gregorian_seconds(Now) + 3600, DateTime = calendar:gregorian_seconds_to_datetime(Seconds), {_, {NewHour, _, _}} = DateTime, NewNow = setelement(2, DateTime, {NewHour, X, 0}), calculate_next_rotation(T, NewNow); calculate_next_rotation([{hour, X}|T], {{_, _, _}, {Hour, _, _}} = Now) when Hour < X -> %% rotation is today, sometime NewNow = setelement(2, Now, {X, 0, 0}), calculate_next_rotation(T, NewNow); calculate_next_rotation([{hour, X}|T], {{_, _, _}, _} = Now) -> %% rotation is not today Seconds = calendar:datetime_to_gregorian_seconds(Now) + 86400, DateTime = calendar:gregorian_seconds_to_datetime(Seconds), NewNow = setelement(2, DateTime, {X, 0, 0}), calculate_next_rotation(T, NewNow); calculate_next_rotation([{day, Day}|T], {Date, _Time} = Now) -> DoW = calendar:day_of_the_week(Date), AdjustedDay = case Day of 0 -> 7; X -> X end, case AdjustedDay of DoW -> %% rotation is today case calculate_next_rotation(T, Now) of {Date, _} = NewNow -> NewNow; {NewDate, _} -> %% rotation *isn't* today! rerun the calculation NewNow = {NewDate, {0, 0, 0}}, calculate_next_rotation([{day, Day}|T], NewNow) end; Y when Y > DoW -> %% rotation is later this week PlusDays = Y - DoW, Seconds = calendar:datetime_to_gregorian_seconds(Now) + (86400 * PlusDays), {NewDate, _} = calendar:gregorian_seconds_to_datetime(Seconds), NewNow = {NewDate, {0, 0, 0}}, calculate_next_rotation(T, NewNow); Y when Y < DoW -> %% rotation is next week PlusDays = ((7 - DoW) + Y), Seconds = calendar:datetime_to_gregorian_seconds(Now) + (86400 * PlusDays), {NewDate, _} = calendar:gregorian_seconds_to_datetime(Seconds), NewNow = {NewDate, {0, 0, 0}}, calculate_next_rotation(T, NewNow) end; calculate_next_rotation([{date, last}|T], {{Year, Month, Day}, _} = Now) -> Last = calendar:last_day_of_the_month(Year, Month), case Last == Day of true -> %% doing rotation today case calculate_next_rotation(T, Now) of {{Year, Month, Day}, _} = NewNow -> NewNow; {NewDate, _} -> %% rotation *isn't* today! rerun the calculation NewNow = {NewDate, {0, 0, 0}}, calculate_next_rotation([{date, last}|T], NewNow) end; false -> NewNow = setelement(1, Now, {Year, Month, Last}), calculate_next_rotation(T, NewNow) end; calculate_next_rotation([{date, Date}|T], {{Year, Month, Date}, _} = Now) -> %% rotation is today case calculate_next_rotation(T, Now) of {{Year, Month, Date}, _} = NewNow -> NewNow; {NewDate, _} -> %% rotation *isn't* today! rerun the calculation NewNow = setelement(1, Now, NewDate), calculate_next_rotation([{date, Date}|T], NewNow) end; calculate_next_rotation([{date, Date}|T], {{Year, Month, Day}, _} = Now) -> PlusDays = case Date of X when X < Day -> %% rotation is next month Last = calendar:last_day_of_the_month(Year, Month), (Last - Day); X when X > Day -> %% rotation is later this month X - Day end, Seconds = calendar:datetime_to_gregorian_seconds(Now) + (86400 * PlusDays), NewNow = calendar:gregorian_seconds_to_datetime(Seconds), calculate_next_rotation(T, NewNow). -spec trace_filter(Query :: 'none' | [tuple()]) -> {ok, any()}. trace_filter(Query) -> trace_filter(?DEFAULT_TRACER, Query). %% TODO: Support multiple trace modules %-spec trace_filter(Module :: atom(), Query :: 'none' | [tuple()]) -> {ok, any()}. trace_filter(Module, Query) when Query == none; Query == [] -> {ok, _} = glc:compile(Module, glc:null(false)); trace_filter(Module, Query) when is_list(Query) -> {ok, _} = glc:compile(Module, glc_lib:reduce(trace_any(Query))). validate_trace({Filter, Level, {Destination, ID}}) when is_tuple(Filter); is_list(Filter), is_atom(Level), is_atom(Destination) -> case validate_trace({Filter, Level, Destination}) of {ok, {F, L, D}} -> {ok, {F, L, {D, ID}}}; Error -> Error end; validate_trace({Filter, Level, Destination}) when is_tuple(Filter); is_list(Filter), is_atom(Level), is_atom(Destination) -> ValidFilter = validate_trace_filter(Filter), try config_to_mask(Level) of _ when not ValidFilter -> {error, invalid_trace}; L when is_list(Filter) -> {ok, {trace_all(Filter), L, Destination}}; L -> {ok, {Filter, L, Destination}} catch _:_ -> {error, invalid_level} end; validate_trace(_) -> {error, invalid_trace}. validate_trace_filter(Filter) when is_tuple(Filter), is_atom(element(1, Filter)) =:= false -> false; validate_trace_filter(Filter) when is_list(Filter) -> lists:all(fun validate_trace_filter/1, Filter); validate_trace_filter({Key, '*'}) when is_atom(Key) -> true; validate_trace_filter({any, L}) when is_list(L) -> lists:all(fun validate_trace_filter/1, L); validate_trace_filter({all, L}) when is_list(L) -> lists:all(fun validate_trace_filter/1, L); validate_trace_filter({null, Bool}) when is_boolean(Bool) -> true; validate_trace_filter({Key, _Value}) when is_atom(Key) -> true; validate_trace_filter({Key, '=', _Value}) when is_atom(Key) -> true; validate_trace_filter({Key, '!=', _Value}) when is_atom(Key) -> true; validate_trace_filter({Key, '<', _Value}) when is_atom(Key) -> true; validate_trace_filter({Key, '=<', _Value}) when is_atom(Key) -> true; validate_trace_filter({Key, '>', _Value}) when is_atom(Key) -> true; validate_trace_filter({Key, '>=', _Value}) when is_atom(Key) -> true; validate_trace_filter(_) -> false. trace_all(Query) -> glc:all(trace_acc(Query)). trace_any(Query) -> glc:any(Query). trace_acc(Query) -> trace_acc(Query, []). trace_acc([], Acc) -> lists:reverse(Acc); trace_acc([{any, L}|T], Acc) -> trace_acc(T, [glc:any(L)|Acc]); trace_acc([{all, L}|T], Acc) -> trace_acc(T, [glc:all(L)|Acc]); trace_acc([{null, Bool}|T], Acc) -> trace_acc(T, [glc:null(Bool)|Acc]); trace_acc([{Key, '*'}|T], Acc) -> trace_acc(T, [glc:wc(Key)|Acc]); trace_acc([{Key, '!'}|T], Acc) -> trace_acc(T, [glc:nf(Key)|Acc]); trace_acc([{Key, Val}|T], Acc) -> trace_acc(T, [glc:eq(Key, Val)|Acc]); trace_acc([{Key, '=', Val}|T], Acc) -> trace_acc(T, [glc:eq(Key, Val)|Acc]); trace_acc([{Key, '!=', Val}|T], Acc) -> trace_acc(T, [glc:neq(Key, Val)|Acc]); trace_acc([{Key, '>', Val}|T], Acc) -> trace_acc(T, [glc:gt(Key, Val)|Acc]); trace_acc([{Key, '>=', Val}|T], Acc) -> trace_acc(T, [glc:gte(Key, Val)|Acc]); trace_acc([{Key, '=<', Val}|T], Acc) -> trace_acc(T, [glc:lte(Key, Val)|Acc]); trace_acc([{Key, '<', Val}|T], Acc) -> trace_acc(T, [glc:lt(Key, Val)|Acc]). check_traces(_, _, [], Acc) -> lists:flatten(Acc); check_traces(Attrs, Level, [{_, {mask, FilterLevel}, _}|Flows], Acc) when (Level band FilterLevel) == 0 -> check_traces(Attrs, Level, Flows, Acc); check_traces(Attrs, Level, [{Filter, _, _}|Flows], Acc) when length(Attrs) < length(Filter) -> check_traces(Attrs, Level, Flows, Acc); check_traces(Attrs, Level, [Flow|Flows], Acc) -> check_traces(Attrs, Level, Flows, [check_trace(Attrs, Flow)|Acc]). check_trace(Attrs, {Filter, _Level, Dest}) when is_list(Filter) -> check_trace(Attrs, {trace_all(Filter), _Level, Dest}); check_trace(Attrs, {Filter, _Level, Dest}) when is_tuple(Filter) -> Made = gre:make(Attrs, [list]), glc:handle(?DEFAULT_TRACER, Made), Match = glc_lib:matches(Filter, Made), case Match of true -> Dest; false -> [] end. -spec is_loggable(lager_msg:lager_msg(), non_neg_integer()|{'mask', non_neg_integer()}, term()) -> boolean(). is_loggable(Msg, {mask, Mask}, MyName) -> %% using syslog style comparison flags %S = lager_msg:severity_as_int(Msg), %?debugFmt("comparing masks ~.2B and ~.2B -> ~p~n", [S, Mask, S band Mask]), (lager_msg:severity_as_int(Msg) band Mask) /= 0 orelse lists:member(MyName, lager_msg:destinations(Msg)); is_loggable(Msg, SeverityThreshold, MyName) when is_atom(SeverityThreshold) -> is_loggable(Msg, level_to_num(SeverityThreshold), MyName); is_loggable(Msg, SeverityThreshold, MyName) when is_integer(SeverityThreshold) -> lager_msg:severity_as_int(Msg) =< SeverityThreshold orelse lists:member(MyName, lager_msg:destinations(Msg)). i2l(I) when I < 10 -> [$0, $0+I]; i2l(I) -> integer_to_list(I). i3l(I) when I < 100 -> [$0 | i2l(I)]; i3l(I) -> integer_to_list(I). %% When log_root option is provided, get the real path to a file expand_path(RelPath) -> case application:get_env(lager, log_root) of {ok, LogRoot} when is_list(LogRoot) -> % Join relative path %% check if the given RelPath contains LogRoot, if so, do not add %% it again; see gh #304 case string:str(filename:dirname(RelPath), LogRoot) of X when X > 0 -> RelPath; _Zero -> filename:join(LogRoot, RelPath) end; undefined -> % No log_root given, keep relative path RelPath end. %% Find a file among the already installed handlers. %% %% The file is already expanded (i.e. lager_util:expand_path already added the %% "log_root"), but the file paths inside Handlers are not. find_file(_File1, _Handlers = []) -> false; find_file(File1, [{{lager_file_backend, File2}, _Handler, _Sink} = HandlerInfo | Handlers]) -> File1Abs = filename:absname(File1), File2Abs = filename:absname(lager_util:expand_path(File2)), case File1Abs =:= File2Abs of true -> % The file inside HandlerInfo is the same as the file we are looking % for, so we are done. HandlerInfo; false -> find_file(File1, Handlers) end; find_file(File1, [_HandlerInfo | Handlers]) -> find_file(File1, Handlers). %% conditionally check the HWM if the event would not have been filtered check_hwm(Shaper = #lager_shaper{filter = Filter}, Event) -> case Filter(Event) of true -> {true, 0, Shaper}; false -> check_hwm(Shaper) end. %% Log rate limit, i.e. high water mark for incoming messages check_hwm(Shaper = #lager_shaper{hwm = undefined}) -> {true, 0, Shaper}; check_hwm(Shaper = #lager_shaper{mps = Mps, hwm = Hwm, lasttime = Last}) when Mps < Hwm -> {M, S, _} = Now = os:timestamp(), case Last of {M, S, _} -> {true, 0, Shaper#lager_shaper{mps=Mps+1}}; _ -> %different second - reset mps {true, 0, Shaper#lager_shaper{mps=1, lasttime = Now}} end; check_hwm(Shaper = #lager_shaper{lasttime = Last, dropped = Drop}) -> %% are we still in the same second? {M, S, _} = Now = os:timestamp(), case Last of {M, S, N} -> %% still in same second, but have exceeded the high water mark NewDrops = case should_flush(Shaper) of true -> discard_messages(Now, Shaper#lager_shaper.filter, 0); false -> 0 end, Timer = case erlang:read_timer(Shaper#lager_shaper.timer) of false -> erlang:send_after(trunc((1000000 - N)/1000), self(), {shaper_expired, Shaper#lager_shaper.id}); _ -> Shaper#lager_shaper.timer end, {false, 0, Shaper#lager_shaper{dropped=Drop+NewDrops, timer=Timer}}; _ -> _ = erlang:cancel_timer(Shaper#lager_shaper.timer), %% different second, reset all counters and allow it {true, Drop, Shaper#lager_shaper{dropped = 0, mps=0, lasttime = Now}} end. should_flush(#lager_shaper{flush_queue = true, flush_threshold = 0}) -> true; should_flush(#lager_shaper{flush_queue = true, flush_threshold = T}) -> {_, L} = process_info(self(), message_queue_len), L > T; should_flush(_) -> false. discard_messages(Second, Filter, Count) -> {M, S, _} = os:timestamp(), case Second of {M, S, _} -> receive %% we only discard gen_event notifications, because %% otherwise we might discard gen_event internal %% messages, such as trapped EXITs {notify, Event} -> NewCount = case Filter(Event) of false -> Count+1; true -> Count end, discard_messages(Second, Filter, NewCount) after 0 -> Count end; _ -> Count end. %% @private Build an atom for the gen_event process based on a sink name. %% For historical reasons, the default gen_event process for lager itself is named %% `lager_event'. For all other sinks, it is SinkName++`_lager_event' make_internal_sink_name(lager) -> ?DEFAULT_SINK; make_internal_sink_name(Sink) -> list_to_atom(atom_to_list(Sink) ++ "_lager_event"). -spec otp_version() -> pos_integer(). %% @doc Return the major version of the current Erlang/OTP runtime as an integer. otp_version() -> {Vsn, _} = string:to_integer( case erlang:system_info(otp_release) of [$R | Rel] -> Rel; Rel -> Rel end), Vsn. maybe_flush(undefined, #lager_shaper{} = S) -> S; maybe_flush(Flag, #lager_shaper{} = S) when is_boolean(Flag) -> S#lager_shaper{flush_queue = Flag}. -spec has_file_changed(Name :: file:name_all(), Inode0 :: pos_integer(), Ctime0 :: file:date_time()) -> {boolean(), file:file_info() | undefined}. has_file_changed(Name, Inode0, Ctime0) -> {OsType, _} = os:type(), F = file:read_file_info(Name, [raw]), case {OsType, F} of {win32, {ok, #file_info{ctime=Ctime1}=FInfo}} -> % Note: on win32, Inode is always zero % So check the file's ctime to see if it % needs to be re-opened Changed = Ctime0 =/= Ctime1, {Changed, FInfo}; {_, {ok, #file_info{inode=Inode1}=FInfo}} -> Changed = Inode0 =/= Inode1, {Changed, FInfo}; {_, _} -> {true, undefined} end. -ifdef(TEST). parse_test() -> ?assertEqual({ok, [{minute, 0}]}, parse_rotation_date_spec("$H0")), ?assertEqual({ok, [{minute, 59}]}, parse_rotation_date_spec("$H59")), ?assertEqual({ok, [{hour, 0}]}, parse_rotation_date_spec("$D0")), ?assertEqual({ok, [{hour, 23}]}, parse_rotation_date_spec("$D23")), ?assertEqual({ok, [{day, 0}, {hour, 23}]}, parse_rotation_date_spec("$W0D23")), ?assertEqual({ok, [{day, 5}, {hour, 16}]}, parse_rotation_date_spec("$W5D16")), ?assertEqual({ok, [{day, 0}, {hour, 12}, {minute, 30}]}, parse_rotation_date_spec("$W0D12H30")), ?assertEqual({ok, [{date, 1}, {hour, 0}]}, parse_rotation_date_spec("$M1D0")), ?assertEqual({ok, [{date, 5}, {hour, 6}]}, parse_rotation_date_spec("$M5D6")), ?assertEqual({ok, [{date, 5}, {hour, 0}]}, parse_rotation_date_spec("$M5")), ?assertEqual({ok, [{date, 31}, {hour, 0}]}, parse_rotation_date_spec("$M31")), ?assertEqual({ok, [{date, 31}, {hour, 1}]}, parse_rotation_date_spec("$M31D1")), ?assertEqual({ok, [{date, last}, {hour, 0}]}, parse_rotation_date_spec("$ML")), ?assertEqual({ok, [{date, last}, {hour, 0}]}, parse_rotation_date_spec("$Ml")), ?assertEqual({ok, [{day, 5}, {hour, 0}]}, parse_rotation_date_spec("$W5")), ok. parse_fail_test() -> ?assertEqual({error, invalid_date_spec}, parse_rotation_date_spec("$H")), ?assertEqual({error, invalid_date_spec}, parse_rotation_date_spec("$H60")), ?assertEqual({error, invalid_date_spec}, parse_rotation_date_spec("$D")), ?assertEqual({error, invalid_date_spec}, parse_rotation_date_spec("$D24")), ?assertEqual({error, invalid_date_spec}, parse_rotation_date_spec("$W7")), ?assertEqual({error, invalid_date_spec}, parse_rotation_date_spec("$W7D1")), ?assertEqual({error, invalid_date_spec}, parse_rotation_date_spec("$M32")), ?assertEqual({error, invalid_date_spec}, parse_rotation_date_spec("$M32D1")), ?assertEqual({error, invalid_date_spec}, parse_rotation_date_spec("$")), ?assertEqual({error, invalid_date_spec}, parse_rotation_date_spec("")), ?assertEqual({error, invalid_date_spec}, parse_rotation_date_spec("$D15M5")), ?assertEqual({error, invalid_date_spec}, parse_rotation_date_spec("$M5W5")), ok. rotation_calculation_test() -> ?assertMatch({{2000, 1, 1}, {13, 0, 0}}, calculate_next_rotation([{minute, 0}], {{2000, 1, 1}, {12, 34, 43}})), ?assertMatch({{2000, 1, 1}, {12, 45, 0}}, calculate_next_rotation([{minute, 45}], {{2000, 1, 1}, {12, 34, 43}})), ?assertMatch({{2000, 1, 2}, {0, 0, 0}}, calculate_next_rotation([{minute, 0}], {{2000, 1, 1}, {23, 45, 43}})), ?assertMatch({{2000, 1, 2}, {0, 0, 0}}, calculate_next_rotation([{hour, 0}], {{2000, 1, 1}, {12, 34, 43}})), ?assertMatch({{2000, 1, 1}, {16, 0, 0}}, calculate_next_rotation([{hour, 16}], {{2000, 1, 1}, {12, 34, 43}})), ?assertMatch({{2000, 1, 2}, {12, 0, 0}}, calculate_next_rotation([{hour, 12}], {{2000, 1, 1}, {12, 34, 43}})), ?assertMatch({{2000, 2, 1}, {12, 0, 0}}, calculate_next_rotation([{date, 1}, {hour, 12}], {{2000, 1, 1}, {12, 34, 43}})), ?assertMatch({{2000, 2, 1}, {12, 0, 0}}, calculate_next_rotation([{date, 1}, {hour, 12}], {{2000, 1, 15}, {12, 34, 43}})), ?assertMatch({{2000, 2, 1}, {12, 0, 0}}, calculate_next_rotation([{date, 1}, {hour, 12}], {{2000, 1, 2}, {12, 34, 43}})), ?assertMatch({{2000, 2, 1}, {12, 0, 0}}, calculate_next_rotation([{date, 1}, {hour, 12}], {{2000, 1, 31}, {12, 34, 43}})), ?assertMatch({{2000, 1, 1}, {16, 0, 0}}, calculate_next_rotation([{date, 1}, {hour, 16}], {{2000, 1, 1}, {12, 34, 43}})), ?assertMatch({{2000, 1, 15}, {16, 0, 0}}, calculate_next_rotation([{date, 15}, {hour, 16}], {{2000, 1, 1}, {12, 34, 43}})), ?assertMatch({{2000, 1, 31}, {16, 0, 0}}, calculate_next_rotation([{date, last}, {hour, 16}], {{2000, 1, 1}, {12, 34, 43}})), ?assertMatch({{2000, 1, 31}, {16, 0, 0}}, calculate_next_rotation([{date, last}, {hour, 16}], {{2000, 1, 31}, {12, 34, 43}})), ?assertMatch({{2000, 2, 29}, {16, 0, 0}}, calculate_next_rotation([{date, last}, {hour, 16}], {{2000, 1, 31}, {17, 34, 43}})), ?assertMatch({{2001, 2, 28}, {16, 0, 0}}, calculate_next_rotation([{date, last}, {hour, 16}], {{2001, 1, 31}, {17, 34, 43}})), ?assertMatch({{2000, 1, 1}, {16, 0, 0}}, calculate_next_rotation([{day, 6}, {hour, 16}], {{2000, 1, 1}, {12, 34, 43}})), ?assertMatch({{2000, 1, 8}, {16, 0, 0}}, calculate_next_rotation([{day, 6}, {hour, 16}], {{2000, 1, 1}, {17, 34, 43}})), ?assertMatch({{2000, 1, 7}, {16, 0, 0}}, calculate_next_rotation([{day, 5}, {hour, 16}], {{2000, 1, 1}, {17, 34, 43}})), ?assertMatch({{2000, 1, 3}, {16, 0, 0}}, calculate_next_rotation([{day, 1}, {hour, 16}], {{2000, 1, 1}, {17, 34, 43}})), ?assertMatch({{2000, 1, 2}, {16, 0, 0}}, calculate_next_rotation([{day, 0}, {hour, 16}], {{2000, 1, 1}, {17, 34, 43}})), ?assertMatch({{2000, 1, 9}, {16, 0, 0}}, calculate_next_rotation([{day, 0}, {hour, 16}], {{2000, 1, 2}, {17, 34, 43}})), ?assertMatch({{2000, 2, 3}, {16, 0, 0}}, calculate_next_rotation([{day, 4}, {hour, 16}], {{2000, 1, 29}, {17, 34, 43}})), ?assertMatch({{2000, 1, 7}, {16, 0, 0}}, calculate_next_rotation([{day, 5}, {hour, 16}], {{2000, 1, 3}, {17, 34, 43}})), ?assertMatch({{2000, 1, 3}, {16, 0, 0}}, calculate_next_rotation([{day, 1}, {hour, 16}], {{1999, 12, 28}, {17, 34, 43}})), ok. check_trace_test() -> lager:start(), trace_filter(none), %% match by module ?assertEqual([foo], check_traces([{module, ?MODULE}], ?EMERGENCY, [ {[{module, ?MODULE}], config_to_mask(emergency), foo}, {[{module, test}], config_to_mask(emergency), bar}], [])), %% match by module, but other unsatisfyable attribute ?assertEqual([], check_traces([{module, ?MODULE}], ?EMERGENCY, [ {[{module, ?MODULE}, {foo, bar}], config_to_mask(emergency), foo}, {[{module, test}], config_to_mask(emergency), bar}], [])), %% match by wildcard module ?assertEqual([bar], check_traces([{module, ?MODULE}], ?EMERGENCY, [ {[{module, ?MODULE}, {foo, bar}], config_to_mask(emergency), foo}, {[{module, '*'}], config_to_mask(emergency), bar}], [])), %% wildcard module, one trace with unsatisfyable attribute ?assertEqual([bar], check_traces([{module, ?MODULE}], ?EMERGENCY, [ {[{module, '*'}, {foo, bar}], config_to_mask(emergency), foo}, {[{module, '*'}], config_to_mask(emergency), bar}], [])), %% wildcard but not present custom trace attribute ?assertEqual([bar], check_traces([{module, ?MODULE}], ?EMERGENCY, [ {[{module, '*'}, {foo, '*'}], config_to_mask(emergency), foo}, {[{module, '*'}], config_to_mask(emergency), bar}], [])), %% wildcarding a custom attribute works when it is present ?assertEqual([bar, foo], check_traces([{module, ?MODULE}, {foo, bar}], ?EMERGENCY, [ {[{module, '*'}, {foo, '*'}], config_to_mask(emergency), foo}, {[{module, '*'}], config_to_mask(emergency), bar}], [])), %% denied by level ?assertEqual([], check_traces([{module, ?MODULE}, {foo, bar}], ?INFO, [ {[{module, '*'}, {foo, '*'}], config_to_mask(emergency), foo}, {[{module, '*'}], config_to_mask(emergency), bar}], [])), %% allowed by level ?assertEqual([foo], check_traces([{module, ?MODULE}, {foo, bar}], ?INFO, [ {[{module, '*'}, {foo, '*'}], config_to_mask(debug), foo}, {[{module, '*'}], config_to_mask(emergency), bar}], [])), ?assertEqual([anythingbutnotice, infoandbelow, infoonly], check_traces([{module, ?MODULE}], ?INFO, [ {[{module, '*'}], config_to_mask('=debug'), debugonly}, {[{module, '*'}], config_to_mask('=info'), infoonly}, {[{module, '*'}], config_to_mask('<=info'), infoandbelow}, {[{module, '*'}], config_to_mask('!=info'), anythingbutinfo}, {[{module, '*'}], config_to_mask('!=notice'), anythingbutnotice} ], [])), application:stop(lager), application:stop(goldrush), ok. is_loggable_test_() -> [ {"Loggable by severity only", ?_assert(is_loggable(lager_msg:new("", alert, [], []),2,me))}, {"Not loggable by severity only", ?_assertNot(is_loggable(lager_msg:new("", critical, [], []),1,me))}, {"Loggable by severity with destination", ?_assert(is_loggable(lager_msg:new("", alert, [], [you]),2,me))}, {"Not loggable by severity with destination", ?_assertNot(is_loggable(lager_msg:new("", critical, [], [you]),1,me))}, {"Loggable by destination overriding severity", ?_assert(is_loggable(lager_msg:new("", critical, [], [me]),1,me))} ]. format_time_test_() -> [ ?_assertEqual("2012-10-04 11:16:23.002", begin {D, T} = format_time({{2012,10,04},{11,16,23,2}}), lists:flatten([D,$ ,T]) end), ?_assertEqual("2012-10-04 11:16:23.999", begin {D, T} = format_time({{2012,10,04},{11,16,23,999}}), lists:flatten([D,$ ,T]) end), ?_assertEqual("2012-10-04 11:16:23", begin {D, T} = format_time({{2012,10,04},{11,16,23}}), lists:flatten([D,$ ,T]) end), ?_assertEqual("2012-10-04 00:16:23.092 UTC", begin {D, T} = format_time({utc, {{2012,10,04},{0,16,23,92}}}), lists:flatten([D,$ ,T]) end), ?_assertEqual("2012-10-04 11:16:23 UTC", begin {D, T} = format_time({utc, {{2012,10,04},{11,16,23}}}), lists:flatten([D,$ ,T]) end) ]. config_to_levels_test() -> ?assertEqual([none], config_to_levels('none')), ?assertEqual({mask, 0}, config_to_mask('none')), ?assertEqual([debug], config_to_levels('=debug')), ?assertEqual([debug], config_to_levels('debug')), ?assertEqual(levels() -- [debug], config_to_levels('>=info')), ?assertEqual(levels() -- [debug], config_to_levels('=>info')), ?assertEqual([debug, info, notice], config_to_levels('<=notice')), ?assertEqual([debug, info, notice], config_to_levels('=info')), ?assertError(badarg, config_to_levels('=<=info')), ?assertError(badarg, config_to_levels('<==>=<=>info')), %% double negatives DO work, however ?assertEqual([debug], config_to_levels('!!=debug')), ?assertEqual(levels() -- [debug], config_to_levels('!!!=debug')), ok. config_to_mask_test() -> ?assertEqual({mask, 0}, config_to_mask('none')), ?assertEqual({mask, ?DEBUG bor ?INFO bor ?NOTICE bor ?WARNING bor ?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY}, config_to_mask('debug')), ?assertEqual({mask, ?WARNING bor ?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY}, config_to_mask('warning')), ?assertEqual({mask, ?DEBUG bor ?NOTICE bor ?WARNING bor ?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY}, config_to_mask('!=info')), ok. mask_to_levels_test() -> ?assertEqual([], mask_to_levels(0)), ?assertEqual([debug], mask_to_levels(2#10000000)), ?assertEqual([debug, info], mask_to_levels(2#11000000)), ?assertEqual([debug, info, emergency], mask_to_levels(2#11000001)), ?assertEqual([debug, notice, error], mask_to_levels(?DEBUG bor ?NOTICE bor ?ERROR)), ok. expand_path_test() -> OldRootVal = application:get_env(lager, log_root), ok = application:unset_env(lager, log_root), ?assertEqual("/foo/bar", expand_path("/foo/bar")), ?assertEqual("foo/bar", expand_path("foo/bar")), ok = application:set_env(lager, log_root, "log/dir"), ?assertEqual("/foo/bar", expand_path("/foo/bar")), % Absolute path should not be changed ?assertEqual("log/dir/foo/bar", expand_path("foo/bar")), ?assertEqual("log/dir/foo/bar", expand_path("log/dir/foo/bar")), %% gh #304 case OldRootVal of undefined -> application:unset_env(lager, log_root); {ok, Root} -> application:set_env(lager, log_root, Root) end, ok. sink_name_test_() -> [ ?_assertEqual(lager_event, make_internal_sink_name(lager)), ?_assertEqual(audit_lager_event, make_internal_sink_name(audit)) ]. create_test_dir() -> {ok, Tmp} = get_temp_dir(), Dir = filename:join([Tmp, "lager_test", erlang:integer_to_list(erlang:phash2(os:timestamp()))]), ?assertEqual(ok, filelib:ensure_dir(Dir)), TestDir = case file:make_dir(Dir) of ok -> Dir; Err -> ?assertEqual({error, eexist}, Err), create_test_dir() end, ok = application:set_env(lager, test_dir, TestDir), {ok, TestDir}. get_test_dir() -> case application:get_env(lager, test_dir) of undefined -> create_test_dir(); {ok, _}=Res -> Res end. get_temp_dir() -> Tmp = case os:getenv("TEMP") of false -> case os:getenv("TMP") of false -> "/tmp"; Dir1 -> Dir1 end; Dir0 -> Dir0 end, ?assertEqual(true, filelib:is_dir(Tmp)), {ok, Tmp}. delete_test_dir() -> {ok, TestDir} = get_test_dir(), ok = delete_test_dir(TestDir). delete_test_dir(TestDir) -> ok = application:unset_env(lager, test_dir), {OsType, _} = os:type(), ok = case {OsType, otp_version()} of {win32, _} -> application:stop(lager), do_delete_test_dir(TestDir); {unix, 15} -> os:cmd("rm -rf " ++ TestDir); {unix, _} -> do_delete_test_dir(TestDir) end. do_delete_test_dir(Dir) -> ListRet = file:list_dir_all(Dir), ?assertMatch({ok, _}, ListRet), {_, Entries} = ListRet, lists:foreach( fun(Entry) -> FsElem = filename:join(Dir, Entry), case filelib:is_dir(FsElem) of true -> delete_test_dir(FsElem); _ -> case file:delete(FsElem) of ok -> ok; Error -> io:format(standard_error, "[ERROR]: error deleting file ~p~n", [FsElem]), ?assertEqual(ok, Error) end end end, Entries), ?assertEqual(ok, file:del_dir(Dir)). do_delete_file(_FsElem, 0) -> ?assert(false); do_delete_file(FsElem, Attempts) -> case file:delete(FsElem) of ok -> ok; _Error -> do_delete_file(FsElem, Attempts - 1) end. set_dir_permissions(Perms, Dir) -> do_set_dir_permissions(os:type(), Perms, Dir). do_set_dir_permissions({win32, _}, _Perms, _Dir) -> ok; do_set_dir_permissions({unix, _}, Perms, Dir) -> os:cmd("chmod -R " ++ Perms ++ " " ++ Dir), ok. safe_application_load(App) -> case application:load(App) of ok -> ok; {error, {already_loaded, App}} -> ok; Error -> ?assertEqual(ok, Error) end. safe_write_file(File, Content) -> % Note: ensures that the new creation time is at least one second % in the future ?assertEqual(ok, file:write_file(File, Content)), Ctime0 = calendar:local_time(), Ctime0Sec = calendar:datetime_to_gregorian_seconds(Ctime0), Ctime1Sec = Ctime0Sec + 1, Ctime1 = calendar:gregorian_seconds_to_datetime(Ctime1Sec), {ok, FInfo0} = file:read_file_info(File, [raw]), FInfo1 = FInfo0#file_info{ctime = Ctime1}, ?assertEqual(ok, file:write_file_info(File, FInfo1, [raw])). -endif. lager-3.8.0/src/lager_sup.erl0000644000232200023220000000600613523436621016445 0ustar debalancedebalance%% Copyright (c) 2011-2012 Basho Technologies, Inc. All Rights Reserved. %% %% This file is provided to you under the Apache License, %% Version 2.0 (the "License"); you may not use this file %% except in compliance with the License. You may obtain %% a copy of the License at %% %% http://www.apache.org/licenses/LICENSE-2.0 %% %% Unless required by applicable law or agreed to in writing, %% software distributed under the License is distributed on an %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY %% KIND, either express or implied. See the License for the %% specific language governing permissions and limitations %% under the License. %% @doc Lager's top level supervisor. %% @private -module(lager_sup). -behaviour(supervisor). %% API -export([start_link/0]). %% Callbacks -export([init/1]). start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). init([]) -> %% set up the config, is safe even during relups lager_config:new(), %% TODO: %% Always start lager_event as the default and make sure that %% other gen_event stuff can start up as needed %% %% Maybe a new API to handle the sink and its policy? Children = [ {lager, {gen_event, start_link, [{local, lager_event}]}, permanent, 5000, worker, dynamic}, {lager_handler_watcher_sup, {lager_handler_watcher_sup, start_link, []}, permanent, 5000, supervisor, [lager_handler_watcher_sup]}], CrashLog = decide_crash_log(application:get_env(lager, crash_log, false)), {ok, {{one_for_one, 10, 60}, Children ++ CrashLog }}. validate_positive({ok, Val}, _Default) when is_integer(Val) andalso Val >= 0 -> Val; validate_positive(_Val, Default) -> Default. determine_rotation_date({ok, ""}) -> undefined; determine_rotation_date({ok, Val3}) -> case lager_util:parse_rotation_date_spec(Val3) of {ok, Spec} -> Spec; {error, _} -> error_logger:error_msg("Invalid date spec for " "crash log ~p~n", [Val3]), undefined end; determine_rotation_date(_) -> undefined. determine_rotator_mod({ok, Mod}, _Default) when is_atom(Mod) -> Mod; determine_rotator_mod(_, Default) -> Default. decide_crash_log(undefined) -> []; decide_crash_log(false) -> []; decide_crash_log(File) -> MaxBytes = validate_positive(application:get_env(lager, crash_log_msg_size), 65536), RotationSize = validate_positive(application:get_env(lager, crash_log_size), 0), RotationCount = validate_positive(application:get_env(lager, crash_log_count), 0), RotationDate = determine_rotation_date(application:get_env(lager, crash_log_date)), RotationMod = determine_rotator_mod(application:get_env(lager, crash_log_rotator), lager_rotator_default), [{lager_crash_log, {lager_crash_log, start_link, [File, MaxBytes, RotationSize, RotationDate, RotationCount, RotationMod]}, permanent, 5000, worker, [lager_crash_log]}]. lager-3.8.0/src/lager_rotator_default.erl0000644000232200023220000001510313523436621021032 0ustar debalancedebalance-module(lager_rotator_default). -include_lib("kernel/include/file.hrl"). -behaviour(lager_rotator_behaviour). -export([ create_logfile/2, open_logfile/2, ensure_logfile/5, rotate_logfile/2 ]). -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). -endif. create_logfile(Name, Buffer) -> open_logfile(Name, Buffer). open_logfile(Name, Buffer) -> case filelib:ensure_dir(Name) of ok -> Options = [append, raw] ++ case Buffer of {Size0, Interval} when is_integer(Interval), Interval >= 0, is_integer(Size0), Size0 >= 0 -> [{delayed_write, Size0, Interval}]; _ -> [] end, case file:open(Name, Options) of {ok, FD} -> case file:read_file_info(Name, [raw]) of {ok, FInfo0} -> Inode = FInfo0#file_info.inode, {ok, Ctime} = maybe_update_ctime(Name, FInfo0), Size1 = FInfo0#file_info.size, {ok, {FD, Inode, Ctime, Size1}}; X -> X end; Y -> Y end; Z -> Z end. ensure_logfile(Name, undefined, _Inode, _Ctime, Buffer) -> open_logfile(Name, Buffer); ensure_logfile(Name, FD, Inode0, Ctime0, Buffer) -> case lager_util:has_file_changed(Name, Inode0, Ctime0) of {true, _FInfo} -> reopen_logfile(Name, FD, Buffer); {_, FInfo} -> {ok, {FD, Inode0, Ctime0, FInfo#file_info.size}} end. reopen_logfile(Name, FD0, Buffer) -> %% Flush and close any file handles. %% delayed write can cause file:close not to do a close _ = file:datasync(FD0), _ = file:close(FD0), _ = file:close(FD0), case open_logfile(Name, Buffer) of {ok, {_FD1, _Inode, _Size, _Ctime}=FileInfo} -> %% inode changed, file was probably moved and %% recreated {ok, FileInfo}; Error -> Error end. %% renames failing are OK rotate_logfile(File, 0) -> %% open the file in write-only mode to truncate/create it case file:open(File, [write]) of {ok, FD} -> _ = file:close(FD), _ = file:close(FD), {ok, _Ctime} = maybe_update_ctime(File), ok; Error -> Error end; rotate_logfile(File0, 1) -> File1 = File0 ++ ".0", _ = file:rename(File0, File1), rotate_logfile(File0, 0); rotate_logfile(File0, Count) -> File1 = File0 ++ "." ++ integer_to_list(Count - 2), File2 = File0 ++ "." ++ integer_to_list(Count - 1), _ = file:rename(File1, File2), rotate_logfile(File0, Count - 1). maybe_update_ctime(Name) -> case file:read_file_info(Name, [raw]) of {ok, FInfo} -> maybe_update_ctime(Name, FInfo); _ -> {ok, calendar:local_time()} end. maybe_update_ctime(Name, FInfo) -> {OsType, _} = os:type(), do_update_ctime(OsType, Name, FInfo). do_update_ctime(win32, Name, FInfo0) -> % Note: we force the creation time to be the current time. % On win32 this may prevent the ctime from being updated: % https://stackoverflow.com/q/8804342/1466825 NewCtime = calendar:local_time(), FInfo1 = FInfo0#file_info{ctime = NewCtime}, ok = file:write_file_info(Name, FInfo1, [raw]), {ok, NewCtime}; do_update_ctime(_, _Name, FInfo) -> {ok, FInfo#file_info.ctime}. -ifdef(TEST). rotate_file_test() -> RotCount = 10, {ok, TestDir} = lager_util:create_test_dir(), TestLog = filename:join(TestDir, "rotation.log"), Outer = fun(N) -> ?assertEqual(ok, lager_util:safe_write_file(TestLog, erlang:integer_to_list(N))), Inner = fun(M) -> File = lists:flatten([TestLog, $., erlang:integer_to_list(M)]), ?assert(filelib:is_regular(File)), %% check the expected value is in the file Number = erlang:list_to_binary(integer_to_list(N - M - 1)), ?assertEqual({ok, Number}, file:read_file(File)) end, Count = erlang:min(N, RotCount), % The first time through, Count == 0, so the sequence is empty, % effectively skipping the inner loop so a rotation can occur that % creates the file that Inner looks for. % Don't shoot the messenger, it was worse before this refactoring. lists:foreach(Inner, lists:seq(0, Count-1)), rotate_logfile(TestLog, RotCount) end, lists:foreach(Outer, lists:seq(0, (RotCount * 2))), lager_util:delete_test_dir(TestDir). rotate_file_zero_count_test() -> %% Test that a rotation count of 0 simply truncates the file {ok, TestDir} = lager_util:create_test_dir(), TestLog = filename:join(TestDir, "rotation.log"), ?assertMatch(ok, rotate_logfile(TestLog, 0)), ?assertNot(filelib:is_regular(TestLog ++ ".0")), ?assertEqual(true, filelib:is_regular(TestLog)), ?assertEqual(1, length(filelib:wildcard(TestLog++"*"))), %% assert the new file is 0 size: case file:read_file_info(TestLog, [raw]) of {ok, FInfo} -> ?assertEqual(0, FInfo#file_info.size); _ -> ?assert(false) end, lager_util:delete_test_dir(TestDir). rotate_file_fail_test() -> {ok, TestDir} = lager_util:create_test_dir(), TestLog = filename:join(TestDir, "rotation.log"), %% set known permissions on it ok = lager_util:set_dir_permissions("u+rwx", TestDir), %% write a file ?assertEqual(ok, lager_util:safe_write_file(TestLog, "hello")), case os:type() of {win32, _} -> ok; _ -> %% hose up the permissions ok = lager_util:set_dir_permissions("u-w", TestDir), ?assertMatch({error, _}, rotate_logfile(TestLog, 10)) end, %% check we still only have one file, rotation.log ?assertEqual([TestLog], filelib:wildcard(TestLog++"*")), ?assert(filelib:is_regular(TestLog)), %% fix the permissions ok = lager_util:set_dir_permissions("u+w", TestDir), ?assertMatch(ok, rotate_logfile(TestLog, 10)), ?assert(filelib:is_regular(TestLog ++ ".0")), ?assertEqual(true, filelib:is_regular(TestLog)), ?assertEqual(2, length(filelib:wildcard(TestLog++"*"))), %% assert the new file is 0 size: case file:read_file_info(TestLog, [raw]) of {ok, FInfo} -> ?assertEqual(0, FInfo#file_info.size); _ -> ?assert(false) end, %% check that the .0 file now has the contents "hello" ?assertEqual({ok, <<"hello">>}, file:read_file(TestLog++".0")), lager_util:delete_test_dir(TestDir). -endif. lager-3.8.0/src/lager.app.src0000644000232200023220000000552113523436621016343 0ustar debalancedebalance%% -*- tab-width: 4;erlang-indent-level: 4;indent-tabs-mode: nil -*- %% ex: ts=4 sw=4 et {application, lager, [ {description, "Erlang logging framework"}, {vsn, "3.8.0"}, {modules, []}, {applications, [ kernel, stdlib, goldrush ]}, {registered, [lager_sup, lager_event, lager_crash_log, lager_handler_watcher_sup]}, {mod, {lager_app, []}}, {env, [ %% Note: application:start(lager) overwrites previously defined environment variables %% thus declaration of default handlers is done at lager_app.erl %% What colors to use with what log levels {colored, false}, {colors, [ {debug, "\e[0;38m" }, {info, "\e[1;37m" }, {notice, "\e[1;36m" }, {warning, "\e[1;33m" }, {error, "\e[1;31m" }, {critical, "\e[1;35m" }, {alert, "\e[1;44m" }, {emergency, "\e[1;41m" } ]}, %% Whether to write a crash log, and where. False means no crash logger. {crash_log, "log/crash.log"}, %% Maximum size in bytes of events in the crash log - defaults to 65536 {crash_log_msg_size, 65536}, %% Maximum size of the crash log in bytes, before its rotated, set %% to 0 to disable rotation - default is 0 {crash_log_size, 10485760}, %% What time to rotate the crash log - default is no time %% rotation. See the README for a description of this format. {crash_log_date, "$D0"}, %% Number of rotated crash logs to keep, 0 means keep only the %% current one - default is 0 {crash_log_count, 5}, %% Crash Log Rotator Module - default is lager_rotator_default {crash_log_rotator, lager_rotator_default}, %% Whether to redirect error_logger messages into the default lager_event sink - defaults to true {error_logger_redirect, true}, %% How many messages per second to allow from error_logger before we start dropping them {error_logger_hwm, 50}, %% How big the gen_event mailbox can get before it is %% switched into sync mode. This value only applies to %% the default sink; extra sinks can supply their own. {async_threshold, 20}, %% Switch back to async mode, when gen_event mailbox size %% decrease from `async_threshold' to async_threshold - %% async_threshold_window. This value only applies to the %% default sink; extra sinks can supply their own. {async_threshold_window, 5} ]}, {licenses, ["Apache 2"]}, {links, [{"Github", "https://github.com/erlang-lager/lager"}]} ]}. lager-3.8.0/src/lager_handler_watcher.erl0000644000232200023220000002240513523436621020771 0ustar debalancedebalance%% Copyright (c) 2011-2012 Basho Technologies, Inc. All Rights Reserved. %% %% This file is provided to you under the Apache License, %% Version 2.0 (the "License"); you may not use this file %% except in compliance with the License. You may obtain %% a copy of the License at %% %% http://www.apache.org/licenses/LICENSE-2.0 %% %% Unless required by applicable law or agreed to in writing, %% software distributed under the License is distributed on an %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY %% KIND, either express or implied. See the License for the %% specific language governing permissions and limitations %% under the License. %% @doc A process that does a gen_event:add_sup_handler and attempts to re-add %% event handlers when they exit. %% @private -module(lager_handler_watcher). -behaviour(gen_server). -include("lager.hrl"). -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). -export([pop_until/2]). -endif. %% callbacks -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). -export([start_link/3, start/3]). -record(state, { module :: atom(), config :: any(), sink :: pid() | atom() }). start_link(Sink, Module, Config) -> gen_server:start_link(?MODULE, [Sink, Module, Config], []). start(Sink, Module, Config) -> gen_server:start(?MODULE, [Sink, Module, Config], []). init([Sink, Module, Config]) -> process_flag(trap_exit, true), install_handler(Sink, Module, Config), {ok, #state{sink=Sink, module=Module, config=Config}}. handle_call(_Call, _From, State) -> {reply, ok, State}. handle_cast(_Request, State) -> {noreply, State}. handle_info({gen_event_EXIT, Module, normal}, #state{module=Module} = State) -> {stop, normal, State}; handle_info({gen_event_EXIT, Module, shutdown}, #state{module=Module} = State) -> {stop, normal, State}; handle_info({gen_event_EXIT, Module, {'EXIT', {kill_me, [_KillerHWM, KillerReinstallAfter]}}}, #state{module=Module, sink=Sink, config = Config} = State) -> %% Brutally kill the manager but stay alive to restore settings. %% %% SinkPid here means the gen_event process. Handlers *all* live inside the %% same gen_event process space, so when the Pid is killed, *all* of the %% pending log messages in its mailbox will die too. SinkPid = whereis(Sink), unlink(SinkPid), {message_queue_len, Len} = process_info(SinkPid, message_queue_len), error_logger:error_msg("Killing sink ~p, current message_queue_len:~p~n", [Sink, Len]), exit(SinkPid, kill), _ = timer:apply_after(KillerReinstallAfter, lager_app, start_handler, [Sink, Module, Config]), {stop, normal, State}; handle_info({gen_event_EXIT, Module, Reason}, #state{module=Module, config=Config, sink=Sink} = State) -> case lager:log(error, self(), "Lager event handler ~p exited with reason ~s", [Module, error_logger_lager_h:format_reason(Reason)]) of ok -> install_handler(Sink, Module, Config); {error, _} -> %% lager is not working, so installing a handler won't work ok end, {noreply, State}; handle_info(reinstall_handler, #state{module=Module, config=Config, sink=Sink} = State) -> install_handler(Sink, Module, Config), {noreply, State}; handle_info({reboot, Sink}, State) -> _ = lager_app:boot(Sink), {noreply, State}; handle_info(stop, State) -> {stop, normal, State}; handle_info({'EXIT', _Pid, killed}, #state{module=Module, config=Config, sink=Sink} = State) -> Tmr = application:get_env(lager, killer_reinstall_after, 5000), _ = timer:apply_after(Tmr, lager_app, start_handler, [Sink, Module, Config]), {stop, normal, State}; handle_info(_Info, State) -> {noreply, State}. terminate(_Reason, _State) -> ok. code_change(_OldVsn, State, _Extra) -> {ok, State}. %% internal install_handler(Sink, lager_backend_throttle, Config) -> %% The lager_backend_throttle needs to know to which sink it is %% attached, hence this admittedly ugly workaround. Handlers are %% sensitive to the structure of the configuration sent to `init', %% sadly, so it's not trivial to add a configuration item to be %% ignored to backends without breaking 3rd party handlers. install_handler2(Sink, lager_backend_throttle, [{sink, Sink}|Config]); install_handler(Sink, Module, Config) -> install_handler2(Sink, Module, Config). %% private install_handler2(Sink, Module, Config) -> case gen_event:add_sup_handler(Sink, Module, Config) of ok -> ?INT_LOG(debug, "Lager installed handler ~p into ~p", [Module, Sink]), lager:update_loglevel_config(Sink), ok; {error, {fatal, Reason}} -> ?INT_LOG(error, "Lager fatally failed to install handler ~p into" " ~p, NOT retrying: ~p", [Module, Sink, Reason]), %% tell ourselves to stop self() ! stop, ok; Error -> %% try to reinstall it later ?INT_LOG(error, "Lager failed to install handler ~p into" " ~p, retrying later : ~p", [Module, Sink, Error]), erlang:send_after(5000, self(), reinstall_handler), ok end. -ifdef(TEST). from_now(Seconds) -> {Mega, Secs, Micro} = os:timestamp(), {Mega, Secs + Seconds, Micro}. reinstall_on_initial_failure_test_() -> {timeout, 60000, [ fun() -> error_logger:tty(false), application:load(lager), application:set_env(lager, handlers, [{lager_test_backend, info}, {lager_crash_backend, [from_now(2), undefined]}]), application:set_env(lager, error_logger_redirect, false), application:unset_env(lager, crash_log), lager:start(), try {_Level, _Time, Message, _Metadata} = lager_test_backend:pop(), ?assertMatch("Lager failed to install handler lager_crash_backend into lager_event, retrying later :"++_, lists:flatten(Message)), timer:sleep(6000), lager_test_backend:flush(), ?assertEqual(0, lager_test_backend:count()), ?assert(lists:member(lager_crash_backend, gen_event:which_handlers(lager_event))) after application:stop(lager), application:stop(goldrush), error_logger:tty(true) end end ] }. reinstall_on_runtime_failure_test_() -> {timeout, 60000, [ fun() -> error_logger:tty(false), application:load(lager), application:set_env(lager, handlers, [{lager_test_backend, info}, {lager_crash_backend, [undefined, from_now(5)]}]), application:set_env(lager, error_logger_redirect, false), application:unset_env(lager, crash_log), lager:start(), try ?assert(lists:member(lager_crash_backend, gen_event:which_handlers(lager_event))), timer:sleep(6000), pop_until("Lager event handler lager_crash_backend exited with reason crash", fun lists:flatten/1), pop_until("Lager failed to install handler lager_crash_backend into lager_event, retrying later", fun(Msg) -> string:substr(lists:flatten(Msg), 1, 84) end), ?assertEqual(false, lists:member(lager_crash_backend, gen_event:which_handlers(lager_event))) after application:stop(lager), application:stop(goldrush), error_logger:tty(true) end end ] }. reinstall_handlers_after_killer_hwm_test_() -> {timeout, 60000, [ fun() -> error_logger:tty(false), application:load(lager), application:set_env(lager, handlers, [{lager_manager_killer, [1000, 5000]}]), application:set_env(lager, error_logger_redirect, false), application:set_env(lager, killer_reinstall_after, 5000), application:unset_env(lager, crash_log), lager:start(), lager:trace_file("foo", [{foo, "bar"}], error), L = length(gen_event:which_handlers(lager_event)), try lager_manager_killer:kill_me(), timer:sleep(6000), ?assertEqual(L, length(gen_event:which_handlers(lager_event))), file:delete("foo") after application:stop(lager), application:stop(goldrush), error_logger:tty(true) end end ] }. pop_until(String, Fun) -> try_backend_pop(lager_test_backend:pop(), String, Fun). try_backend_pop(undefined, String, _Fun) -> throw("Not found: " ++ String); try_backend_pop({_Severity, _Date, Msg, _Metadata}, String, Fun) -> case Fun(Msg) of String -> ok; _ -> try_backend_pop(lager_test_backend:pop(), String, Fun) end. -endif. lager-3.8.0/src/lager_common_test_backend.erl0000644000232200023220000000776013523436621021644 0ustar debalancedebalance-module(lager_common_test_backend). -behavior(gen_event). %% gen_event callbacks -export([init/1, handle_call/2, handle_event/2, handle_info/2, terminate/2, code_change/3]). -export([get_logs/0, bounce/0, bounce/1]). %% holds the log messages for retreival on terminate -record(state, {level :: {mask, integer()}, formatter :: atom(), format_config :: any(), log = [] :: list()}). -include("lager.hrl"). -define(TERSE_FORMAT,[time, " ", color, "[", severity,"] ", message]). %% @doc Before every test, just %% lager_common_test_backend:bounce(Level) with the log level of your %% choice. Every message will be passed along to ct:pal for your %% viewing in the common_test reports. Also, you can call %% lager_common_test_backend:get_logs/0 to get a list of all log %% messages this backend has received during your test. You can then %% search that list for expected log messages. -spec get_logs() -> [iolist()] | {error, term()}. get_logs() -> gen_event:call(lager_event, ?MODULE, get_logs, infinity). bounce() -> bounce(error). bounce(Level) -> _ = application:stop(lager), application:set_env(lager, suppress_application_start_stop, true), application:set_env(lager, handlers, [ {lager_common_test_backend, [Level, false]} ]), ok = lager:start(), %% we care more about getting all of our messages here than being %% careful with the amount of memory that we're using. error_logger_lager_h:set_high_water(100000), ok. -spec(init(integer()|atom()|[term()]) -> {ok, #state{}} | {error, atom()}). %% @private %% @doc Initializes the event handler init([Level, true]) -> % for backwards compatibility init([Level,{lager_default_formatter,[{eol, "\n"}]}]); init([Level,false]) -> % for backwards compatibility init([Level,{lager_default_formatter,?TERSE_FORMAT ++ ["\n"]}]); init([Level,{Formatter,FormatterConfig}]) when is_atom(Formatter) -> case lists:member(Level, ?LEVELS) of true -> {ok, #state{level=lager_util:config_to_mask(Level), formatter=Formatter, format_config=FormatterConfig}}; _ -> {error, bad_log_level} end; init(Level) -> init([Level,{lager_default_formatter,?TERSE_FORMAT ++ ["\n"]}]). -spec(handle_event(tuple(), #state{}) -> {ok, #state{}}). %% @private handle_event({log, Message}, #state{level=L,formatter=Formatter,format_config=FormatConfig,log=Logs} = State) -> case lager_util:is_loggable(Message,L,?MODULE) of true -> Log = Formatter:format(Message,FormatConfig), ct:pal(Log), {ok, State#state{log=[Log|Logs]}}; false -> {ok, State} end; handle_event(Event, State) -> ct:pal(Event), {ok, State#state{log = [Event|State#state.log]}}. -spec(handle_call(any(), #state{}) -> {ok, any(), #state{}}). %% @private %% @doc gets and sets loglevel. This is part of the lager backend api. handle_call(get_loglevel, #state{level=Level} = State) -> {ok, Level, State}; handle_call({set_loglevel, Level}, State) -> case lists:member(Level, ?LEVELS) of true -> {ok, ok, State#state{level=lager_util:config_to_mask(Level)}}; _ -> {ok, {error, bad_log_level}, State} end; handle_call(get_logs, #state{log = Logs} = State) -> {ok, lists:reverse(Logs), State}; handle_call(_, State) -> {ok, ok, State}. -spec(handle_info(any(), #state{}) -> {ok, #state{}}). %% @private %% @doc gen_event callback, does nothing. handle_info(_, State) -> {ok, State}. -spec(code_change(any(), #state{}, any()) -> {ok, #state{}}). %% @private %% @doc gen_event callback, does nothing. code_change(_OldVsn, State, _Extra) -> {ok, State}. -spec(terminate(any(), #state{}) -> {ok, list()}). %% @doc gen_event callback, does nothing. terminate(_Reason, #state{log=Logs}) -> {ok, lists:reverse(Logs)}. lager-3.8.0/src/lager_backend_throttle.erl0000644000232200023220000000605013523436621021151 0ustar debalancedebalance%% Copyright (c) 2011-2013 Basho Technologies, Inc. All Rights Reserved. %% %% This file is provided to you under the Apache License, %% Version 2.0 (the "License"); you may not use this file %% except in compliance with the License. You may obtain %% a copy of the License at %% %% http://www.apache.org/licenses/LICENSE-2.0 %% %% Unless required by applicable law or agreed to in writing, %% software distributed under the License is distributed on an %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY %% KIND, either express or implied. See the License for the %% specific language governing permissions and limitations %% under the License. %% @doc A simple gen_event backend used to monitor mailbox size and %% switch log messages between synchronous and asynchronous modes. %% A gen_event handler is used because a process getting its own mailbox %% size doesn't involve getting a lock, and gen_event handlers run in their %% parent's process. -module(lager_backend_throttle). -include("lager.hrl"). -behaviour(gen_event). -export([init/1, handle_call/2, handle_event/2, handle_info/2, terminate/2, code_change/3]). %% %% Allow test code to verify that we're doing the needful. -ifdef(TEST). -define(ETS_TABLE, async_threshold_test). -define(TOGGLE_SYNC(), test_increment(sync_toggled)). -define(TOGGLE_ASYNC(), test_increment(async_toggled)). -else. -define(TOGGLE_SYNC(), true). -define(TOGGLE_ASYNC(), true). -endif. -record(state, { sink :: atom(), hwm :: non_neg_integer(), window_min :: non_neg_integer(), async = true :: boolean() }). init([{sink, Sink}, Hwm, Window]) -> lager_config:set({Sink, async}, true), {ok, #state{sink=Sink, hwm=Hwm, window_min=Hwm - Window}}. handle_call(get_loglevel, State) -> {ok, {mask, ?LOG_NONE}, State}; handle_call({set_loglevel, _Level}, State) -> {ok, ok, State}; handle_call(_Request, State) -> {ok, ok, State}. handle_event({log, _Message},State) -> {message_queue_len, Len} = erlang:process_info(self(), message_queue_len), case {Len > State#state.hwm, Len < State#state.window_min, State#state.async} of {true, _, true} -> %% need to flip to sync mode ?TOGGLE_SYNC(), lager_config:set({State#state.sink, async}, false), {ok, State#state{async=false}}; {_, true, false} -> %% need to flip to async mode ?TOGGLE_ASYNC(), lager_config:set({State#state.sink, async}, true), {ok, State#state{async=true}}; _ -> %% nothing needs to change {ok, State} end; handle_event(_Event, State) -> {ok, State}. handle_info(_Info, State) -> {ok, State}. %% @private terminate(_Reason, _State) -> ok. %% @private code_change(_OldVsn, State, _Extra) -> {ok, State}. -ifdef(TEST). test_get(Key) -> get_default(ets:lookup(?ETS_TABLE, Key)). test_increment(Key) -> ets:insert(?ETS_TABLE, {Key, test_get(Key) + 1}). get_default([]) -> 0; get_default([{_Key, Value}]) -> Value. -endif. lager-3.8.0/src/lager_rotator_behaviour.erl0000644000232200023220000000147013523436621021374 0ustar debalancedebalance-module(lager_rotator_behaviour). %% Create a log file -callback(create_logfile(Name::list(), Buffer::{integer(), integer()} | any()) -> {ok, {file:io_device(), integer(), file:date_time(), integer()}} | {error, any()}). %% Open a log file -callback(open_logfile(Name::list(), Buffer::{integer(), integer()} | any()) -> {ok, {file:io_device(), integer(), file:date_time(), integer()}} | {error, any()}). %% Ensure reference to current target, could be rotated -callback(ensure_logfile(Name::list(), FD::file:io_device(), Inode::integer(), Ctime::file:date_time(), Buffer::{integer(), integer()} | any()) -> {ok, {file:io_device(), integer(), file:date_time(), integer()}} | {error, any()}). %% Rotate the log file -callback(rotate_logfile(Name::list(), Count::integer()) -> ok). lager-3.8.0/src/lager_file_backend.erl0000644000232200023220000015405413523436621020233 0ustar debalancedebalance%% ------------------------------------------------------------------- %% %% Copyright (c) 2011-2017 Basho Technologies, Inc. %% %% This file is provided to you under the Apache License, %% Version 2.0 (the "License"); you may not use this file %% except in compliance with the License. You may obtain %% a copy of the License at %% %% http://www.apache.org/licenses/LICENSE-2.0 %% %% Unless required by applicable law or agreed to in writing, %% software distributed under the License is distributed on an %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY %% KIND, either express or implied. See the License for the %% specific language governing permissions and limitations %% under the License. %% %% ------------------------------------------------------------------- %% @doc File backend for lager, with multiple file support. %% Multiple files are supported, each with the path and the loglevel being %% configurable. The configuration paramter for this backend is a list of %% key-value 2-tuples. See the init() function for the available options. %% This backend supports external and internal log %% rotation and will re-open handles to files if the inode changes. It will %% also rotate the files itself if the size of the file exceeds the %% `size' and keep `count' rotated files. `date' is %% an alternate rotation trigger, based on time. See the README for %% documentation. %% For performance, the file backend does delayed writes, although it will %% sync at specific log levels, configured via the `sync_on' option. By default %% the error level or above will trigger a sync. -module(lager_file_backend). -include("lager.hrl"). -include_lib("kernel/include/file.hrl"). -behaviour(gen_event). -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). -compile([{parse_transform, lager_transform}]). -endif. -export([init/1, handle_call/2, handle_event/2, handle_info/2, terminate/2, code_change/3]). -export([config_to_id/1]). -define(DEFAULT_LOG_LEVEL, info). -define(DEFAULT_ROTATION_SIZE, 10485760). %% 10mb -define(DEFAULT_ROTATION_DATE, "$D0"). %% midnight -define(DEFAULT_ROTATION_COUNT, 5). -define(DEFAULT_ROTATION_MOD, lager_rotator_default). -define(DEFAULT_SYNC_LEVEL, error). -define(DEFAULT_SYNC_INTERVAL, 1000). -define(DEFAULT_SYNC_SIZE, 1024*64). %% 64kb -define(DEFAULT_CHECK_INTERVAL, 1000). -record(state, { name :: string(), level :: {'mask', integer()}, fd :: file:io_device() | undefined, inode :: integer() | undefined, ctime :: file:date_time() | undefined, flap = false :: boolean(), size = 0 :: integer(), date :: undefined | string(), count = 10 :: integer(), rotator = lager_util :: atom(), shaper :: lager_shaper(), formatter :: atom(), formatter_config :: any(), sync_on :: {'mask', integer()}, check_interval = ?DEFAULT_CHECK_INTERVAL :: non_neg_integer(), sync_interval = ?DEFAULT_SYNC_INTERVAL :: non_neg_integer(), sync_size = ?DEFAULT_SYNC_SIZE :: non_neg_integer(), last_check = os:timestamp() :: erlang:timestamp(), os_type :: atom() }). -type option() :: {file, string()} | {level, lager:log_level()} | {size, non_neg_integer()} | {date, string()} | {count, non_neg_integer()} | {rotator, atom()} | {high_water_mark, non_neg_integer()} | {flush_queue, boolean()} | {flush_threshold, non_neg_integer()} | {sync_interval, non_neg_integer()} | {sync_size, non_neg_integer()} | {sync_on, lager:log_level()} | {check_interval, non_neg_integer()} | {formatter, atom()} | {formatter_config, term()}. -spec init([option(),...]) -> {ok, #state{}} | {error, {fatal,bad_config}}. init({FileName, LogLevel}) when is_list(FileName), is_atom(LogLevel) -> %% backwards compatibility hack init([{file, FileName}, {level, LogLevel}]); init({FileName, LogLevel, Size, Date, Count}) when is_list(FileName), is_atom(LogLevel) -> %% backwards compatibility hack init([{file, FileName}, {level, LogLevel}, {size, Size}, {date, Date}, {count, Count}]); init([{FileName, LogLevel, Size, Date, Count}, {Formatter,FormatterConfig}]) when is_list(FileName), is_atom(LogLevel), is_atom(Formatter) -> %% backwards compatibility hack init([{file, FileName}, {level, LogLevel}, {size, Size}, {date, Date}, {count, Count}, {formatter, Formatter}, {formatter_config, FormatterConfig}]); init([LogFile,{Formatter}]) -> %% backwards compatibility hack init([LogFile,{Formatter,[]}]); init([{FileName, LogLevel}, {Formatter,FormatterConfig}]) when is_list(FileName), is_atom(LogLevel), is_atom(Formatter) -> %% backwards compatibility hack init([{file, FileName}, {level, LogLevel}, {formatter, Formatter}, {formatter_config, FormatterConfig}]); init(LogFileConfig) when is_list(LogFileConfig) -> case validate_logfile_proplist(LogFileConfig) of false -> %% falied to validate config {error, {fatal, bad_config}}; Config -> %% probabably a better way to do this, but whatever [RelName, Level, Date, Size, Count, Rotator, HighWaterMark, Flush, SyncInterval, SyncSize, SyncOn, CheckInterval, Formatter, FormatterConfig] = [proplists:get_value(Key, Config) || Key <- [file, level, date, size, count, rotator, high_water_mark, flush_queue, sync_interval, sync_size, sync_on, check_interval, formatter, formatter_config]], FlushThr = proplists:get_value(flush_threshold, Config, 0), Name = lager_util:expand_path(RelName), schedule_rotation(Name, Date), Shaper = lager_util:maybe_flush(Flush, #lager_shaper{hwm=HighWaterMark, flush_threshold = FlushThr, id=Name}), State0 = #state{name=Name, level=Level, size=Size, date=Date, count=Count, rotator=Rotator, shaper=Shaper, formatter=Formatter, formatter_config=FormatterConfig, sync_on=SyncOn, sync_interval=SyncInterval, sync_size=SyncSize, check_interval=CheckInterval}, State = case Rotator:create_logfile(Name, {SyncSize, SyncInterval}) of {ok, {FD, Inode, Ctime, _Size}} -> State0#state{fd=FD, inode=Inode, ctime=Ctime}; {error, Reason} -> ?INT_LOG(error, "Failed to open log file ~ts with error ~s", [Name, file:format_error(Reason)]), State0#state{flap=true} end, {ok, State} end. %% @private handle_call({set_loglevel, Level}, #state{name=Ident} = State) -> case validate_loglevel(Level) of false -> {ok, {error, bad_loglevel}, State}; Levels -> ?INT_LOG(notice, "Changed loglevel of ~s to ~p", [Ident, Level]), {ok, ok, State#state{level=Levels}} end; handle_call(get_loglevel, #state{level=Level} = State) -> {ok, Level, State}; handle_call({set_loghwm, Hwm}, #state{shaper=Shaper, name=Name} = State) -> case validate_logfile_proplist([{file, Name}, {high_water_mark, Hwm}]) of false -> {ok, {error, bad_log_hwm}, State}; _ -> NewShaper = Shaper#lager_shaper{hwm=Hwm}, ?INT_LOG(notice, "Changed loghwm of ~ts to ~p", [Name, Hwm]), {ok, {last_loghwm, Shaper#lager_shaper.hwm}, State#state{shaper=NewShaper}} end; handle_call(rotate, State = #state{name=File}) -> {ok, NewState} = handle_info({rotate, File}, State), {ok, ok, NewState}; handle_call(_Request, State) -> {ok, ok, State}. %% @private handle_event({log, Message}, #state{name=Name, level=L, shaper=Shaper, formatter=Formatter,formatter_config=FormatConfig} = State) -> case lager_util:is_loggable(Message,L,{lager_file_backend, Name}) of true -> case lager_util:check_hwm(Shaper) of {true, Drop, #lager_shaper{hwm=Hwm} = NewShaper} -> NewState = case Drop > 0 of true -> Report = io_lib:format( "lager_file_backend dropped ~p messages in the last second that exceeded the limit of ~p messages/sec", [Drop, Hwm]), ReportMsg = lager_msg:new(Report, warning, [], []), write(State, lager_msg:timestamp(ReportMsg), lager_msg:severity_as_int(ReportMsg), Formatter:format(ReportMsg, FormatConfig)); false -> State end, {ok,write(NewState#state{shaper=NewShaper}, lager_msg:timestamp(Message), lager_msg:severity_as_int(Message), Formatter:format(Message,FormatConfig))}; {false, _, #lager_shaper{dropped=D} = NewShaper} -> {ok, State#state{shaper=NewShaper#lager_shaper{dropped=D+1}}} end; false -> {ok, State} end; handle_event(_Event, State) -> {ok, State}. %% @private handle_info({rotate, File}, #state{name=File, count=Count, date=Date, rotator=Rotator}=State0) -> State1 = close_file(State0), _ = Rotator:rotate_logfile(File, Count), schedule_rotation(File, Date), {ok, State1}; handle_info({shaper_expired, Name}, #state{shaper=Shaper, name=Name, formatter=Formatter, formatter_config=FormatConfig} = State) -> _ = case Shaper#lager_shaper.dropped of 0 -> ok; Dropped -> Report = io_lib:format( "lager_file_backend dropped ~p messages in the last second that exceeded the limit of ~p messages/sec", [Dropped, Shaper#lager_shaper.hwm]), ReportMsg = lager_msg:new(Report, warning, [], []), write(State, lager_msg:timestamp(ReportMsg), lager_msg:severity_as_int(ReportMsg), Formatter:format(ReportMsg, FormatConfig)) end, {ok, State#state{shaper=Shaper#lager_shaper{dropped=0, mps=0, lasttime=os:timestamp()}}}; handle_info(_Info, State) -> {ok, State}. %% @private terminate(_Reason, State) -> %% leaving this function call unmatched makes dialyzer cranky _ = close_file(State), ok. %% @private code_change(_OldVsn, State, _Extra) -> {ok, State}. %% Convert the config into a gen_event handler ID config_to_id({Name,_Severity}) when is_list(Name) -> {?MODULE, Name}; config_to_id({Name,_Severity,_Size,_Rotation,_Count}) -> {?MODULE, Name}; config_to_id([{Name,_Severity,_Size,_Rotation,_Count}, _Format]) -> {?MODULE, Name}; config_to_id([{Name,_Severity}, _Format]) when is_list(Name) -> {?MODULE, Name}; config_to_id(Config) -> case proplists:get_value(file, Config) of undefined -> erlang:error(no_file); File -> {?MODULE, File} end. write(#state{name=Name, fd=FD, inode=Inode, ctime=Ctime, flap=Flap, size=RotSize, count=Count, rotator=Rotator}=State0, Timestamp, Level, Msg) -> case write_should_check(State0, Timestamp) of true -> %% need to check for rotation Buffer = {State0#state.sync_size, State0#state.sync_interval}, case Rotator:ensure_logfile(Name, FD, Inode, Ctime, Buffer) of {ok, {_FD, _Inode, _Ctime, Size}} when RotSize > 0, Size > RotSize -> State1 = close_file(State0), case Rotator:rotate_logfile(Name, Count) of ok -> %% go around the loop again, we'll do another rotation check and hit the next clause of ensure_logfile write(State1, Timestamp, Level, Msg); {error, Reason} -> case Flap of true -> State1; _ -> ?INT_LOG(error, "Failed to rotate log file ~ts with error ~s", [Name, file:format_error(Reason)]), State1#state{flap=true} end end; {ok, {NewFD, NewInode, NewCtime, _Size}} -> %% update our last check and try again State1 = State0#state{last_check=Timestamp, fd=NewFD, inode=NewInode, ctime=NewCtime}, do_write(State1, Level, Msg); {error, Reason} -> case Flap of true -> State0; _ -> ?INT_LOG(error, "Failed to reopen log file ~ts with error ~s", [Name, file:format_error(Reason)]), State0#state{flap=true} end end; false -> do_write(State0, Level, Msg) end. write_should_check(#state{fd=undefined}, _Timestamp) -> true; write_should_check(#state{last_check=LastCheck0, check_interval=CheckInterval, name=Name, inode=Inode0, ctime=Ctime0}, Timestamp) -> LastCheck1 = timer:now_diff(Timestamp, LastCheck0) div 1000, case LastCheck1 >= CheckInterval of true -> true; _ -> % We need to know if the file has changed "out from under lager" so we don't % write to an invalid FD {Result, _FInfo} = lager_util:has_file_changed(Name, Inode0, Ctime0), Result end. do_write(#state{fd=FD, name=Name, flap=Flap} = State, Level, Msg) -> %% delayed_write doesn't report errors _ = file:write(FD, unicode:characters_to_binary(Msg)), {mask, SyncLevel} = State#state.sync_on, case (Level band SyncLevel) =/= 0 of true -> %% force a sync on any message that matches the 'sync_on' bitmask Flap2 = case file:datasync(FD) of {error, Reason2} when Flap == false -> ?INT_LOG(error, "Failed to write log message to file ~ts: ~s", [Name, file:format_error(Reason2)]), true; ok -> false; _ -> Flap end, State#state{flap=Flap2}; _ -> State end. validate_loglevel(Level) -> try lager_util:config_to_mask(Level) of Levels -> Levels catch _:_ -> false end. validate_logfile_proplist(List) -> try validate_logfile_proplist(List, []) of Res -> case proplists:get_value(file, Res) of undefined -> ?INT_LOG(error, "Missing required file option", []), false; _File -> %% merge with the default options {ok, DefaultRotationDate} = lager_util:parse_rotation_date_spec(?DEFAULT_ROTATION_DATE), lists:keymerge(1, lists:sort(Res), lists:sort([ {level, validate_loglevel(?DEFAULT_LOG_LEVEL)}, {date, DefaultRotationDate}, {size, ?DEFAULT_ROTATION_SIZE}, {count, ?DEFAULT_ROTATION_COUNT}, {rotator, ?DEFAULT_ROTATION_MOD}, {sync_on, validate_loglevel(?DEFAULT_SYNC_LEVEL)}, {sync_interval, ?DEFAULT_SYNC_INTERVAL}, {sync_size, ?DEFAULT_SYNC_SIZE}, {check_interval, ?DEFAULT_CHECK_INTERVAL}, {formatter, lager_default_formatter}, {formatter_config, []} ])) end catch {bad_config, Msg, Value} -> ?INT_LOG(error, "~s ~p for file ~tp", [Msg, Value, proplists:get_value(file, List)]), false end. validate_logfile_proplist([], Acc) -> Acc; validate_logfile_proplist([{file, File}|Tail], Acc) -> %% is there any reasonable validation we can do here? validate_logfile_proplist(Tail, [{file, File}|Acc]); validate_logfile_proplist([{level, Level}|Tail], Acc) -> case validate_loglevel(Level) of false -> throw({bad_config, "Invalid loglevel", Level}); Res -> validate_logfile_proplist(Tail, [{level, Res}|Acc]) end; validate_logfile_proplist([{size, Size}|Tail], Acc) -> case Size of S when is_integer(S), S >= 0 -> validate_logfile_proplist(Tail, [{size, Size}|Acc]); _ -> throw({bad_config, "Invalid rotation size", Size}) end; validate_logfile_proplist([{count, Count}|Tail], Acc) -> case Count of C when is_integer(C), C >= 0 -> validate_logfile_proplist(Tail, [{count, Count}|Acc]); _ -> throw({bad_config, "Invalid rotation count", Count}) end; validate_logfile_proplist([{rotator, Rotator}|Tail], Acc) -> case is_atom(Rotator) of true -> validate_logfile_proplist(Tail, [{rotator, Rotator}|Acc]); false -> throw({bad_config, "Invalid rotation module", Rotator}) end; validate_logfile_proplist([{high_water_mark, HighWaterMark}|Tail], Acc) -> case HighWaterMark of Hwm when is_integer(Hwm), Hwm >= 0 -> validate_logfile_proplist(Tail, [{high_water_mark, Hwm}|Acc]); _ -> throw({bad_config, "Invalid high water mark", HighWaterMark}) end; validate_logfile_proplist([{date, Date}|Tail], Acc) -> case lager_util:parse_rotation_date_spec(Date) of {ok, Spec} -> validate_logfile_proplist(Tail, [{date, Spec}|Acc]); {error, _} when Date == "" -> %% legacy config allowed blanks validate_logfile_proplist(Tail, [{date, undefined}|Acc]); {error, _} -> throw({bad_config, "Invalid rotation date", Date}) end; validate_logfile_proplist([{sync_interval, SyncInt}|Tail], Acc) -> case SyncInt of Val when is_integer(Val), Val >= 0 -> validate_logfile_proplist(Tail, [{sync_interval, Val}|Acc]); _ -> throw({bad_config, "Invalid sync interval", SyncInt}) end; validate_logfile_proplist([{sync_size, SyncSize}|Tail], Acc) -> case SyncSize of Val when is_integer(Val), Val >= 0 -> validate_logfile_proplist(Tail, [{sync_size, Val}|Acc]); _ -> throw({bad_config, "Invalid sync size", SyncSize}) end; validate_logfile_proplist([{check_interval, CheckInt}|Tail], Acc) -> case CheckInt of Val when is_integer(Val), Val >= 0 -> validate_logfile_proplist(Tail, [{check_interval, Val}|Acc]); always -> validate_logfile_proplist(Tail, [{check_interval, 0}|Acc]); _ -> throw({bad_config, "Invalid check interval", CheckInt}) end; validate_logfile_proplist([{sync_on, Level}|Tail], Acc) -> case validate_loglevel(Level) of false -> throw({bad_config, "Invalid sync on level", Level}); Res -> validate_logfile_proplist(Tail, [{sync_on, Res}|Acc]) end; validate_logfile_proplist([{formatter, Fmt}|Tail], Acc) -> case is_atom(Fmt) of true -> validate_logfile_proplist(Tail, [{formatter, Fmt}|Acc]); false -> throw({bad_config, "Invalid formatter module", Fmt}) end; validate_logfile_proplist([{formatter_config, FmtCfg}|Tail], Acc) -> case is_list(FmtCfg) of true -> validate_logfile_proplist(Tail, [{formatter_config, FmtCfg}|Acc]); false -> throw({bad_config, "Invalid formatter config", FmtCfg}) end; validate_logfile_proplist([{flush_queue, FlushCfg}|Tail], Acc) -> case is_boolean(FlushCfg) of true -> validate_logfile_proplist(Tail, [{flush_queue, FlushCfg}|Acc]); false -> throw({bad_config, "Invalid queue flush flag", FlushCfg}) end; validate_logfile_proplist([{flush_threshold, Thr}|Tail], Acc) -> case Thr of _ when is_integer(Thr), Thr >= 0 -> validate_logfile_proplist(Tail, [{flush_threshold, Thr}|Acc]); _ -> throw({bad_config, "Invalid queue flush threshold", Thr}) end; validate_logfile_proplist([Other|_Tail], _Acc) -> throw({bad_config, "Invalid option", Other}). schedule_rotation(_, undefined) -> ok; schedule_rotation(Name, Date) -> erlang:send_after(lager_util:calculate_next_rotation(Date) * 1000, self(), {rotate, Name}), ok. close_file(#state{fd=undefined} = State) -> State; close_file(#state{fd=FD} = State) -> %% Flush and close any file handles. %% delayed write can cause file:close not to do a close _ = file:datasync(FD), _ = file:close(FD), _ = file:close(FD), State#state{fd=undefined}. -ifdef(TEST). get_loglevel_test() -> {ok, Level, _} = handle_call(get_loglevel, #state{name="bar", level=lager_util:config_to_mask(info), fd=0, inode=0, ctime=undefined}), ?assertEqual(Level, lager_util:config_to_mask(info)), {ok, Level2, _} = handle_call(get_loglevel, #state{name="foo", level=lager_util:config_to_mask(warning), fd=0, inode=0, ctime=undefined}), ?assertEqual(Level2, lager_util:config_to_mask(warning)). rotation_test_() -> {foreach, fun() -> SyncLevel = validate_loglevel(?DEFAULT_SYNC_LEVEL), SyncSize = ?DEFAULT_SYNC_SIZE, SyncInterval = ?DEFAULT_SYNC_INTERVAL, Rotator = ?DEFAULT_ROTATION_MOD, CheckInterval = 0, %% hard to test delayed mode {ok, TestDir} = lager_util:create_test_dir(), TestLog = filename:join(TestDir, "test.log"), {OsType, _} = os:type(), #state{name=TestLog, level=?DEBUG, sync_on=SyncLevel, sync_size=SyncSize, sync_interval=SyncInterval, check_interval=CheckInterval, rotator=Rotator, os_type=OsType} end, fun(#state{}) -> ok = lager_util:delete_test_dir() end, [ fun(DefaultState=#state{name=TestLog, os_type=OsType, sync_size=SyncSize, sync_interval=SyncInterval, rotator=Rotator}) -> {"External rotation should work", fun() -> case OsType of win32 -> % Note: test is skipped on win32 due to the fact that a file can't be deleted or renamed % while a process has an open file handle referencing it ok; _ -> {ok, {FD, Inode, Ctime, _Size}} = Rotator:open_logfile(TestLog, {SyncSize, SyncInterval}), State0 = DefaultState#state{fd=FD, inode=Inode, ctime=Ctime}, State1 = write(State0, os:timestamp(), ?DEBUG, "hello world"), ?assertMatch(#state{name=TestLog, level=?DEBUG, fd=FD, inode=Inode, ctime=Ctime}, State1), ?assertEqual(ok, file:delete(TestLog)), State2 = write(State0, os:timestamp(), ?DEBUG, "hello world"), %% assert file has changed ExpState1 = #state{name=TestLog, level=?DEBUG, fd=FD, inode=Inode, ctime=Ctime}, ?assertNotEqual(ExpState1, State2), ?assertMatch(#state{name=TestLog, level=?DEBUG}, State2), ?assertEqual(ok, file:rename(TestLog, TestLog ++ ".1")), State3 = write(State2, os:timestamp(), ?DEBUG, "hello world"), %% assert file has changed ?assertNotEqual(State3, State2), ?assertMatch(#state{name=TestLog, level=?DEBUG}, State3), ok end end} end, fun(DefaultState = #state{name=TestLog, sync_size=SyncSize, sync_interval=SyncInterval, rotator=Rotator}) -> {"Internal rotation and delayed write", fun() -> TestLog0 = TestLog ++ ".0", CheckInterval = 3000, % 3 sec RotationSize = 15, PreviousCheck = os:timestamp(), {ok, {FD, Inode, Ctime, _Size}} = Rotator:open_logfile(TestLog, {SyncSize, SyncInterval}), State0 = DefaultState#state{ fd=FD, inode=Inode, ctime=Ctime, size=RotationSize, check_interval=CheckInterval, last_check=PreviousCheck}, %% new message within check interval with sync_on level Msg1Timestamp = add_secs(PreviousCheck, 1), State1 = write(State0, Msg1Timestamp, ?ERROR, "big big message 1"), ?assertEqual(State0, State1), %% new message within check interval under sync_on level %% not written to disk yet Msg2Timestamp = add_secs(PreviousCheck, 2), State2 = write(State1, Msg2Timestamp, ?DEBUG, "buffered message 2"), ?assertEqual(State0, State2), % Note: we must ensure at least one second (DEFAULT_SYNC_INTERVAL) has passed % for message 1 and 2 to be written to disk ElapsedMs = timer:now_diff(os:timestamp(), PreviousCheck) div 1000, case ElapsedMs > SyncInterval of true -> ok; _ -> S = SyncInterval - ElapsedMs, timer:sleep(S) end, %% although file size is big enough... {ok, FInfo} = file:read_file_info(TestLog, [raw]), ?assert(RotationSize < FInfo#file_info.size), %% ...no rotation yet ?assertEqual(PreviousCheck, State2#state.last_check), ?assertNot(filelib:is_regular(TestLog0)), %% new message after check interval Msg3Timestamp = add_secs(PreviousCheck, 4), _State3 = write(State2, Msg3Timestamp, ?DEBUG, "message 3"), %% rotation happened ?assert(filelib:is_regular(TestLog0)), {ok, Bin1} = file:read_file(TestLog0), {ok, Bin2} = file:read_file(TestLog), %% message 1-2 written to file ?assertEqual(<<"big big message 1buffered message 2">>, Bin1), %% message 3 buffered, not yet written to file ?assertEqual(<<"">>, Bin2), ok end} end ]}. add_secs({Mega, Secs, Micro}, Add) -> NewSecs = Secs + Add, {Mega + NewSecs div 10000000, NewSecs rem 10000000, Micro}. filesystem_test_() -> {foreach, fun() -> ok = error_logger:tty(false), ok = lager_util:safe_application_load(lager), ok = application:set_env(lager, handlers, [{lager_test_backend, info}]), ok = application:set_env(lager, error_logger_redirect, false), ok = application:set_env(lager, async_threshold, undefined), {ok, _TestDir} = lager_util:create_test_dir(), ok = lager:start(), %% race condition where lager logs its own start up %% makes several tests fail. See test/lager_test_backend %% around line 800 for more information. ok = timer:sleep(5), ok = lager_test_backend:flush() end, fun(_) -> ok = application:stop(lager), ok = application:stop(goldrush), ok = error_logger:tty(true), ok = lager_util:delete_test_dir() end, [ {"under normal circumstances, file should be opened", fun() -> {ok, TestDir} = lager_util:get_test_dir(), TestLog = filename:join(TestDir, "test.log"), gen_event:add_handler(lager_event, lager_file_backend, [{TestLog, info}, {lager_default_formatter}]), lager:log(error, self(), "Test message"), {ok, Bin} = file:read_file(TestLog), Pid = pid_to_list(self()), ?assertMatch([_, _, "[error]", Pid, "Test message\n"], re:split(Bin, " ", [{return, list}, {parts, 5}])) end}, {"don't choke on unicode", fun() -> {ok, TestDir} = lager_util:get_test_dir(), TestLog = filename:join(TestDir, "test.log"), gen_event:add_handler(lager_event, lager_file_backend, [{TestLog, info}, {lager_default_formatter}]), lager:log(error, self(),"~ts", [[20013,25991,27979,35797]]), {ok, Bin} = file:read_file(TestLog), Pid = pid_to_list(self()), ?assertMatch([_, _, "[error]", Pid, [228,184,173,230,150,135,230,181,139,232,175,149, $\n]], re:split(Bin, " ", [{return, list}, {parts, 5}])) end}, {"don't choke on latin-1", fun() -> {ok, TestDir} = lager_util:get_test_dir(), TestLog = filename:join(TestDir, "test.log"), %% XXX if this test fails, check that this file is encoded latin-1, not utf-8! gen_event:add_handler(lager_event, lager_file_backend, [{TestLog, info}, {lager_default_formatter}]), lager:log(error, self(),"~ts", [[76, 198, 221, 206, 78, $-, 239]]), {ok, Bin} = file:read_file(TestLog), Pid = pid_to_list(self()), Res = re:split(Bin, " ", [{return, list}, {parts, 5}]), ?assertMatch([_, _, "[error]", Pid, [76,195,134,195,157,195,142,78,45,195,175,$\n]], Res) end}, {"file can't be opened on startup triggers an error message", fun() -> {ok, TestDir} = lager_util:get_test_dir(), TestLog = filename:join(TestDir, "test.log"), ?assertEqual(ok, lager_util:safe_write_file(TestLog, [])), {ok, FInfo0} = file:read_file_info(TestLog, [raw]), FInfo1 = FInfo0#file_info{mode = 0}, ?assertEqual(ok, file:write_file_info(TestLog, FInfo1)), gen_event:add_handler(lager_event, lager_file_backend, {TestLog, info, 10*1024*1024, "$D0", 5}), % Note: required on win32, do this early to prevent subsequent failures % from preventing cleanup ?assertEqual(ok, file:write_file_info(TestLog, FInfo0)), ?assertEqual(1, lager_test_backend:count()), {_Level, _Time, Message, _Metadata} = lager_test_backend:pop(), MessageFlat = lists:flatten(Message), ?assertEqual( "Failed to open log file " ++ TestLog ++ " with error permission denied", MessageFlat) end}, {"file that becomes unavailable at runtime should trigger an error message", fun() -> case os:type() of {win32, _} -> % Note: test is skipped on win32 due to the fact that a file can't be % deleted or renamed while a process has an open file handle referencing it ok; _ -> {ok, TestDir} = lager_util:get_test_dir(), TestLog = filename:join(TestDir, "test.log"), gen_event:add_handler(lager_event, lager_file_backend, [{file, TestLog}, {level, info}, {check_interval, 0}]), ?assertEqual(0, lager_test_backend:count()), lager:log(error, self(), "Test message"), ?assertEqual(1, lager_test_backend:count()), ?assertEqual(ok, file:delete(TestLog)), ?assertEqual(ok, lager_util:safe_write_file(TestLog, "")), {ok, FInfo0} = file:read_file_info(TestLog, [raw]), FInfo1 = FInfo0#file_info{mode = 0}, ?assertEqual(ok, file:write_file_info(TestLog, FInfo1)), lager:log(error, self(), "Test message"), lager_test_backend:pop(), lager_test_backend:pop(), {_Level, _Time, Message, _Metadata} = lager_test_backend:pop(), ?assertEqual( "Failed to reopen log file " ++ TestLog ++ " with error permission denied", lists:flatten(Message)) end end}, {"unavailable files that are fixed at runtime should start having log messages written", fun() -> {ok, TestDir} = lager_util:get_test_dir(), TestLog = filename:join(TestDir, "test.log"), ?assertEqual(ok, lager_util:safe_write_file(TestLog, [])), {ok, FInfo} = file:read_file_info(TestLog, [raw]), OldPerms = FInfo#file_info.mode, ?assertEqual(ok, file:write_file_info(TestLog, FInfo#file_info{mode = 0})), gen_event:add_handler(lager_event, lager_file_backend, [{file, TestLog},{check_interval, 0}]), ?assertEqual(1, lager_test_backend:count()), {_Level, _Time, Message, _Metadata} = lager_test_backend:pop(), ?assertEqual( "Failed to open log file " ++ TestLog ++ " with error permission denied", lists:flatten(Message)), ?assertEqual(ok, file:write_file_info(TestLog, FInfo#file_info{mode = OldPerms})), lager:log(error, self(), "Test message"), {ok, Bin} = file:read_file(TestLog), Pid = pid_to_list(self()), ?assertMatch([_, _, "[error]", Pid, "Test message\n"], re:split(Bin, " ", [{return, list}, {parts, 5}])) end}, {"external logfile rotation/deletion should be handled", fun() -> case os:type() of {win32, _} -> % Note: test is skipped on win32 due to the fact that a file can't be deleted or renamed % while a process has an open file handle referencing it ok; _ -> {ok, TestDir} = lager_util:get_test_dir(), TestLog = filename:join(TestDir, "test.log"), TestLog0 = TestLog ++ ".0", gen_event:add_handler(lager_event, lager_file_backend, [{file, TestLog}, {level, info}, {check_interval, 0}]), ?assertEqual(0, lager_test_backend:count()), lager:log(error, self(), "Test message1"), ?assertEqual(1, lager_test_backend:count()), ?assertEqual(ok, file:delete(TestLog)), ?assertEqual(ok, lager_util:safe_write_file(TestLog, "")), lager:log(error, self(), "Test message2"), ?assertEqual(2, lager_test_backend:count()), {ok, Bin} = file:read_file(TestLog), Pid = pid_to_list(self()), ?assertMatch([_, _, "[error]", Pid, "Test message2\n"], re:split(Bin, " ", [{return, list}, {parts, 5}])), ?assertEqual(ok, file:rename(TestLog, TestLog0)), lager:log(error, self(), "Test message3"), ?assertEqual(3, lager_test_backend:count()), {ok, Bin2} = file:read_file(TestLog), ?assertMatch([_, _, "[error]", Pid, "Test message3\n"], re:split(Bin2, " ", [{return, list}, {parts, 5}])) end end}, {"internal size rotation should work", fun() -> {ok, TestDir} = lager_util:get_test_dir(), TestLog = filename:join(TestDir, "test.log"), TestLog0 = TestLog ++ ".0", gen_event:add_handler(lager_event, lager_file_backend, [{file, TestLog}, {level, info}, {check_interval, 0}, {size, 10}]), lager:log(error, self(), "Test message1"), lager:log(error, self(), "Test message1"), ?assert(filelib:is_regular(TestLog0)) end}, {"internal time rotation should work", fun() -> {ok, TestDir} = lager_util:get_test_dir(), TestLog = filename:join(TestDir, "test.log"), TestLog0 = TestLog ++ ".0", gen_event:add_handler(lager_event, lager_file_backend, [{file, TestLog}, {level, info}, {check_interval, 1000}]), lager:log(error, self(), "Test message1"), lager:log(error, self(), "Test message1"), whereis(lager_event) ! {rotate, TestLog}, lager:log(error, self(), "Test message1"), ?assert(filelib:is_regular(TestLog0)) end}, {"rotation call should work", fun() -> {ok, TestDir} = lager_util:get_test_dir(), TestLog = filename:join(TestDir, "test.log"), TestLog0 = TestLog ++ ".0", gen_event:add_handler(lager_event, {lager_file_backend, TestLog}, [{file, TestLog}, {level, info}, {check_interval, 1000}]), lager:log(error, self(), "Test message1"), lager:log(error, self(), "Test message1"), gen_event:call(lager_event, {lager_file_backend, TestLog}, rotate, infinity), lager:log(error, self(), "Test message1"), ?assert(filelib:is_regular(TestLog0)) end}, {"sync_on option should work", fun() -> {ok, TestDir} = lager_util:get_test_dir(), TestLog = filename:join(TestDir, "test.log"), gen_event:add_handler(lager_event, lager_file_backend, [{file, TestLog}, {level, info}, {sync_on, "=info"}, {check_interval, 5000}, {sync_interval, 5000}]), lager:log(error, self(), "Test message1"), lager:log(error, self(), "Test message1"), ?assertEqual({ok, <<>>}, file:read_file(TestLog)), lager:log(info, self(), "Test message1"), {ok, Bin} = file:read_file(TestLog), ?assert(<<>> /= Bin) end}, {"sync_on none option should work (also tests sync_interval)", fun() -> {ok, TestDir} = lager_util:get_test_dir(), TestLog = filename:join(TestDir, "test.log"), gen_event:add_handler(lager_event, lager_file_backend, [{file, TestLog}, {level, info}, {sync_on, "none"}, {check_interval, 5000}, {sync_interval, 1000}]), lager:log(error, self(), "Test message1"), lager:log(error, self(), "Test message1"), ?assertEqual({ok, <<>>}, file:read_file(TestLog)), lager:log(info, self(), "Test message1"), ?assertEqual({ok, <<>>}, file:read_file(TestLog)), timer:sleep(2000), {ok, Bin} = file:read_file(TestLog), ?assert(<<>> /= Bin) end}, {"sync_size option should work", fun() -> {ok, TestDir} = lager_util:get_test_dir(), TestLog = filename:join(TestDir, "test.log"), gen_event:add_handler(lager_event, lager_file_backend, [{file, TestLog}, {level, info}, {sync_on, "none"}, {check_interval, 5001}, {sync_size, 640}, {sync_interval, 5001}]), lager:log(error, self(), "Test messageis64bytes"), lager:log(error, self(), "Test messageis64bytes"), lager:log(error, self(), "Test messageis64bytes"), lager:log(error, self(), "Test messageis64bytes"), lager:log(error, self(), "Test messageis64bytes"), ?assertEqual({ok, <<>>}, file:read_file(TestLog)), lager:log(error, self(), "Test messageis64bytes"), lager:log(error, self(), "Test messageis64bytes"), lager:log(error, self(), "Test messageis64bytes"), lager:log(error, self(), "Test messageis64bytes"), ?assertEqual({ok, <<>>}, file:read_file(TestLog)), %% now we've written enough bytes lager:log(error, self(), "Test messageis64bytes"), {ok, Bin} = file:read_file(TestLog), ?assert(<<>> /= Bin) end}, {"runtime level changes", fun() -> {ok, TestDir} = lager_util:get_test_dir(), TestLog = filename:join(TestDir, "test.log"), gen_event:add_handler(lager_event, {lager_file_backend, TestLog}, {TestLog, info}), ?assertEqual(0, lager_test_backend:count()), lager:log(info, self(), "Test message1"), lager:log(error, self(), "Test message2"), {ok, Bin} = file:read_file(TestLog), Lines = length(re:split(Bin, "\n", [{return, list}, trim])), ?assertEqual(Lines, 2), ?assertEqual(ok, lager:set_loglevel(lager_file_backend, TestLog, warning)), lager:log(info, self(), "Test message3"), %% this won't get logged lager:log(error, self(), "Test message4"), {ok, Bin2} = file:read_file(TestLog), Lines2 = length(re:split(Bin2, "\n", [{return, list}, trim])), ?assertEqual(Lines2, 3) end}, {"invalid runtime level changes", fun() -> {ok, TestDir} = lager_util:get_test_dir(), TestLog = filename:join(TestDir, "test.log"), TestLog3 = filename:join(TestDir, "test3.log"), gen_event:add_handler(lager_event, lager_file_backend, [{TestLog, info, 10*1024*1024, "$D0", 5}, {lager_default_formatter}]), gen_event:add_handler(lager_event, lager_file_backend, {TestLog3, info}), ?assertEqual({error, bad_module}, lager:set_loglevel(lager_file_backend, TestLog, warning)) end}, {"tracing should work", fun() -> {ok, TestDir} = lager_util:get_test_dir(), TestLog = filename:join(TestDir, "test.log"), gen_event:add_handler(lager_event, lager_file_backend, {TestLog, critical}), lager:error("Test message"), ?assertEqual({ok, <<>>}, file:read_file(TestLog)), {Level, _} = lager_config:get({lager_event, loglevel}), lager_config:set({lager_event, loglevel}, {Level, [{[{module, ?MODULE}], ?DEBUG, {lager_file_backend, TestLog}}]}), lager:error("Test message"), timer:sleep(1000), {ok, Bin} = file:read_file(TestLog), ?assertMatch([_, _, "[error]", _, "Test message\n"], re:split(Bin, " ", [{return, list}, {parts, 5}])) end}, {"tracing should not duplicate messages", fun() -> case os:type() of {win32, _} -> % Note: test is skipped on win32 due to the fact that a file can't be % deleted or renamed while a process has an open file handle referencing it ok; _ -> {ok, TestDir} = lager_util:get_test_dir(), TestLog = filename:join(TestDir, "test.log"), gen_event:add_handler(lager_event, lager_file_backend, [{file, TestLog}, {level, critical}, {check_interval, always}]), timer:sleep(500), lager:critical("Test message"), {ok, Bin1} = file:read_file(TestLog), ?assertMatch([_, _, "[critical]", _, "Test message\n"], re:split(Bin1, " ", [{return, list}, {parts, 5}])), ?assertEqual(ok, file:delete(TestLog)), {Level, _} = lager_config:get({lager_event, loglevel}), lager_config:set({lager_event, loglevel}, {Level, [{[{module, ?MODULE}], ?DEBUG, {lager_file_backend, TestLog}}]}), lager:critical("Test message"), {ok, Bin2} = file:read_file(TestLog), ?assertMatch([_, _, "[critical]", _, "Test message\n"], re:split(Bin2, " ", [{return, list}, {parts, 5}])), ?assertEqual(ok, file:delete(TestLog)), lager:error("Test message"), {ok, Bin3} = file:read_file(TestLog), ?assertMatch([_, _, "[error]", _, "Test message\n"], re:split(Bin3, " ", [{return, list}, {parts, 5}])) end end}, {"tracing to a dedicated file should work", fun() -> {ok, TestDir} = lager_util:get_test_dir(), TestLog = filename:join(TestDir, "foo.log"), {ok, _} = lager:trace_file(TestLog, [{module, ?MODULE}]), lager:error("Test message"), %% not eligible for trace lager:log(error, self(), "Test message"), {ok, Bin3} = file:read_file(TestLog), ?assertMatch([_, _, "[error]", _, "Test message\n"], re:split(Bin3, " ", [{return, list}, {parts, 5}])) end}, {"tracing to a dedicated file should work even if root_log is set", fun() -> {ok, TestDir} = lager_util:get_test_dir(), LogName = "foo.log", LogPath = filename:join(TestDir, LogName), application:set_env(lager, log_root, TestDir), {ok, _} = lager:trace_file(LogName, [{module, ?MODULE}]), lager:error("Test message"), %% not eligible for trace lager:log(error, self(), "Test message"), {ok, Bin3} = file:read_file(LogPath), application:unset_env(lager, log_root), ?assertMatch([_, _, "[error]", _, "Test message\n"], re:split(Bin3, " ", [{return, list}, {parts, 5}])) end}, {"tracing with options should work", fun() -> {ok, TestDir} = lager_util:get_test_dir(), TestLog = filename:join(TestDir, "foo.log"), TestLog0 = TestLog ++ ".0", {ok, _} = lager:trace_file(TestLog, [{module, ?MODULE}], [{size, 20}, {check_interval, 1}]), lager:error("Test message"), ?assertNot(filelib:is_regular(TestLog0)), %% rotation is sensitive to intervals between %% writes so we sleep to exceed the 1 %% millisecond interval specified by %% check_interval above timer:sleep(2), lager:error("Test message"), timer:sleep(10), ?assert(filelib:is_regular(TestLog0)) end}, {"no silent hwm drops", fun() -> MsgCount = 15, {ok, TestDir} = lager_util:get_test_dir(), TestLog = filename:join(TestDir, "test.log"), gen_event:add_handler(lager_event, lager_file_backend, [{file, TestLog}, {level, info}, {high_water_mark, 5}, {flush_queue, false}, {sync_on, "=warning"}]), {_, _, MS} = os:timestamp(), % start close to the beginning of a new second ?assertEqual(ok, timer:sleep((1000000 - MS) div 1000 + 1)), [lager:log(info, self(), "Foo ~p", [K]) || K <- lists:seq(1, MsgCount)], ?assertEqual(MsgCount, lager_test_backend:count()), % Note: bumped from 1000 to 1250 to ensure delayed write flushes to disk ?assertEqual(ok, timer:sleep(1250)), {ok, Bin} = file:read_file(TestLog), Last = lists:last(re:split(Bin, "\n", [{return, list}, trim])), ?assertMatch([_, _, _, _, "lager_file_backend dropped 10 messages in the last second that exceeded the limit of 5 messages/sec"], re:split(Last, " ", [{return, list}, {parts, 5}])) end} ]}. trace_files_test_() -> {foreach, fun() -> {ok, TestDir} = lager_util:get_test_dir(), Log = filename:join(TestDir, "test.log"), Debug = filename:join(TestDir, "debug.log"), Events = filename:join(TestDir, "events.log"), ok = error_logger:tty(false), ok = lager_util:safe_application_load(lager), ok = application:set_env(lager, handlers, [ {lager_file_backend, [ {file, Log}, {level, error}, {formatter, lager_default_formatter}, {formatter_config, [message, "\n"]} ]} ]), ok = application:set_env(lager, traces, [ { % get default level of debug {lager_file_backend, Debug}, [{module, ?MODULE}] }, { % Handler Filters Level {lager_file_backend, Events}, [{module, ?MODULE}], notice } ]), ok = application:set_env(lager, async_threshold, undefined), ok = lager:start(), {Log, Debug, Events} end, fun({_, _, _}) -> catch ets:delete(lager_config), ok = application:unset_env(lager, traces), ok = application:stop(lager), ok = lager_util:delete_test_dir(), ok = error_logger:tty(true) end, [ fun({Log, Debug, Events}) -> {"a trace using file backend set up in configuration should work", fun() -> lager:error("trace test error message"), lager:info("info message"), %% not eligible for trace lager:log(error, self(), "Not trace test message"), {ok, BinInfo} = file:read_file(Events), ?assertMatch([_, _, "[error]", _, "trace test error message\n"], re:split(BinInfo, " ", [{return, list}, {parts, 5}])), ?assert(filelib:is_regular(Log)), {ok, BinInfo2} = file:read_file(Log), ?assertMatch(["trace test error message", "Not trace test message\n"], re:split(BinInfo2, "\n", [{return, list}, {parts, 2}])), ?assert(filelib:is_regular(Debug)), %% XXX Aughhhh, wish I could force this to flush somehow... % should take about 1 second, try for 3 ... ?assertEqual(2, count_lines_until(2, add_secs(os:timestamp(), 3), Debug, 0)) end} end ]}. count_lines_until(Lines, Timeout, File, Last) -> case timer:now_diff(Timeout, os:timestamp()) > 0 of true -> timer:sleep(333), {ok, Bin} = file:read_file(File), case erlang:length(re:split(Bin, "\n", [{return, list}, trim])) of Count when Count < Lines -> count_lines_until(Lines, Timeout, File, Count); Count -> Count end; _ -> Last end. formatting_test_() -> {foreach, fun() -> {ok, TestDir} = lager_util:get_test_dir(), Log1 = filename:join(TestDir, "test.log"), Log2 = filename:join(TestDir, "test2.log"), ?assertEqual(ok, lager_util:safe_write_file(Log1, [])), ?assertEqual(ok, lager_util:safe_write_file(Log2, [])), ok = error_logger:tty(false), ok = lager_util:safe_application_load(lager), ok = application:set_env(lager, handlers, [{lager_test_backend, info}]), ok = application:set_env(lager, error_logger_redirect, false), ok = lager:start(), %% same race condition issue ok = timer:sleep(5), {ok, Log1, Log2} end, fun({ok, _, _}) -> ok = application:stop(lager), ok = application:stop(goldrush), ok = lager_util:delete_test_dir(), ok = error_logger:tty(true) end, [ fun({ok, Log1, Log2}) -> {"Should have two log files, the second prefixed with 2>", fun() -> gen_event:add_handler(lager_event, lager_file_backend, [{Log1, debug}, {lager_default_formatter, ["[",severity,"] ", message, "\n"]}]), gen_event:add_handler(lager_event, lager_file_backend, [{Log2, debug}, {lager_default_formatter, ["2> [",severity,"] ", message, "\n"]}]), lager:log(error, self(), "Test message"), ?assertMatch({ok, <<"[error] Test message\n">>},file:read_file(Log1)), ?assertMatch({ok, <<"2> [error] Test message\n">>},file:read_file(Log2)) end} end ]}. config_validation_test_() -> [ {"missing file", ?_assertEqual(false, validate_logfile_proplist([{level, info}, {size, 10}])) }, {"bad level", ?_assertEqual(false, validate_logfile_proplist([{file, "test.log"}, {level, blah}, {size, 10}])) }, {"bad size", ?_assertEqual(false, validate_logfile_proplist([{file, "test.log"}, {size, infinity}])) }, {"bad count", ?_assertEqual(false, validate_logfile_proplist([{file, "test.log"}, {count, infinity}])) }, {"bad high water mark", ?_assertEqual(false, validate_logfile_proplist([{file, "test.log"}, {high_water_mark, infinity}])) }, {"bad date", ?_assertEqual(false, validate_logfile_proplist([{file, "test.log"}, {date, "midnight"}])) }, {"blank date is ok", ?_assertMatch([_|_], validate_logfile_proplist([{file, "test.log"}, {date, ""}])) }, {"bad sync_interval", ?_assertEqual(false, validate_logfile_proplist([{file, "test.log"}, {sync_interval, infinity}])) }, {"bad sync_size", ?_assertEqual(false, validate_logfile_proplist([{file, "test.log"}, {sync_size, infinity}])) }, {"bad check_interval", ?_assertEqual(false, validate_logfile_proplist([{file, "test.log"}, {check_interval, infinity}])) }, {"bad sync_on level", ?_assertEqual(false, validate_logfile_proplist([{file, "test.log"}, {sync_on, infinity}])) }, {"bad formatter module", ?_assertEqual(false, validate_logfile_proplist([{file, "test.log"}, {formatter, "io:format"}])) }, {"bad formatter config", ?_assertEqual(false, validate_logfile_proplist([{file, "test.log"}, {formatter_config, blah}])) }, {"unknown option", ?_assertEqual(false, validate_logfile_proplist([{file, "test.log"}, {rhubarb, spicy}])) } ]. -endif. lager-3.8.0/src/lager_msg.erl0000644000232200023220000000353613523436621016431 0ustar debalancedebalance-module(lager_msg). -export([new/4, new/5]). -export([message/1]). -export([timestamp/1]). -export([datetime/1]). -export([severity/1]). -export([severity_as_int/1]). -export([metadata/1]). -export([destinations/1]). -record(lager_msg,{ destinations :: list(), metadata :: [tuple()], severity :: lager:log_level(), datetime :: {string(), string()}, timestamp :: erlang:timestamp(), message :: list() }). -opaque lager_msg() :: #lager_msg{}. -export_type([lager_msg/0]). %% create with provided timestamp, handy for testing mostly -spec new(list(), erlang:timestamp(), lager:log_level(), [tuple()], list()) -> lager_msg(). new(Msg, Timestamp, Severity, Metadata, Destinations) -> {Date, Time} = lager_util:format_time(lager_util:maybe_utc(lager_util:localtime_ms(Timestamp))), #lager_msg{message=Msg, datetime={Date, Time}, timestamp=Timestamp, severity=Severity, metadata=Metadata, destinations=Destinations}. -spec new(list(), lager:log_level(), [tuple()], list()) -> lager_msg(). new(Msg, Severity, Metadata, Destinations) -> Now = os:timestamp(), new(Msg, Now, Severity, Metadata, Destinations). -spec message(lager_msg()) -> list(). message(Msg) -> Msg#lager_msg.message. -spec timestamp(lager_msg()) -> erlang:timestamp(). timestamp(Msg) -> Msg#lager_msg.timestamp. -spec datetime(lager_msg()) -> {string(), string()}. datetime(Msg) -> Msg#lager_msg.datetime. -spec severity(lager_msg()) -> lager:log_level(). severity(Msg) -> Msg#lager_msg.severity. -spec severity_as_int(lager_msg()) -> lager:log_level_number(). severity_as_int(Msg) -> lager_util:level_to_num(Msg#lager_msg.severity). -spec metadata(lager_msg()) -> [tuple()]. metadata(Msg) -> Msg#lager_msg.metadata. -spec destinations(lager_msg()) -> list(). destinations(Msg) -> Msg#lager_msg.destinations. lager-3.8.0/src/lager_transform.erl0000644000232200023220000003637513523436621017665 0ustar debalancedebalance%% Copyright (c) 2011-2012 Basho Technologies, Inc. All Rights Reserved. %% %% This file is provided to you under the Apache License, %% Version 2.0 (the "License"); you may not use this file %% except in compliance with the License. You may obtain %% a copy of the License at %% %% http://www.apache.org/licenses/LICENSE-2.0 %% %% Unless required by applicable law or agreed to in writing, %% software distributed under the License is distributed on an %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY %% KIND, either express or implied. See the License for the %% specific language governing permissions and limitations %% under the License. %% @doc The parse transform used for lager messages. %% This parse transform rewrites functions calls to lager:Severity/1,2 into %% a more complicated function that captures module, function, line, pid and %% time as well. The entire function call is then wrapped in a case that %% checks the lager_config 'loglevel' value, so the code isn't executed if %% nothing wishes to consume the message. -module(lager_transform). -include("lager.hrl"). -export([parse_transform/2]). %% @private parse_transform(AST, Options) -> TruncSize = proplists:get_value(lager_truncation_size, Options, ?DEFAULT_TRUNCATION), Enable = proplists:get_value(lager_print_records_flag, Options, true), Sinks = [lager] ++ proplists:get_value(lager_extra_sinks, Options, []), Functions = proplists:get_value(lager_function_transforms, Options, []), put(print_records_flag, Enable), put(truncation_size, TruncSize), put(sinks, Sinks), put(functions, lists:keysort(1, Functions)), erlang:put(records, []), %% .app file should either be in the outdir, or the same dir as the source file guess_application(proplists:get_value(outdir, Options), hd(AST)), walk_ast([], AST). walk_ast(Acc, []) -> case get(print_records_flag) of true -> insert_record_attribute(Acc); false -> lists:reverse(Acc) end; walk_ast(Acc, [{attribute, _, module, {Module, _PmodArgs}}=H|T]) -> %% A wild parameterized module appears! put(module, Module), walk_ast([H|Acc], T); walk_ast(Acc, [{attribute, _, module, Module}=H|T]) -> put(module, Module), walk_ast([H|Acc], T); walk_ast(Acc, [{attribute, _, lager_function_transforms, FromModule }=H|T]) -> %% Merge transform options from the module over the compile options FromOptions = get(functions), put(functions, orddict:merge(fun(_Key, _V1, V2) -> V2 end, FromOptions, lists:keysort(1, FromModule))), walk_ast([H|Acc], T); walk_ast(Acc, [{function, Line, Name, Arity, Clauses}|T]) -> put(function, Name), walk_ast([{function, Line, Name, Arity, walk_clauses([], Clauses)}|Acc], T); walk_ast(Acc, [{attribute, _, record, {Name, Fields}}=H|T]) -> FieldNames = lists:map(fun record_field_name/1, Fields), stash_record({Name, FieldNames}), walk_ast([H|Acc], T); walk_ast(Acc, [H|T]) -> walk_ast([H|Acc], T). record_field_name({record_field, _, {atom, _, FieldName}}) -> FieldName; record_field_name({record_field, _, {atom, _, FieldName}, _Default}) -> FieldName; record_field_name({typed_record_field, Field, _Type}) -> record_field_name(Field). walk_clauses(Acc, []) -> lists:reverse(Acc); walk_clauses(Acc, [{clause, Line, Arguments, Guards, Body}|T]) -> walk_clauses([{clause, Line, Arguments, Guards, walk_body([], Body)}|Acc], T). walk_body(Acc, []) -> lists:reverse(Acc); walk_body(Acc, [H|T]) -> walk_body([transform_statement(H, get(sinks))|Acc], T). transform_statement({call, Line, {remote, _Line1, {atom, _Line2, Module}, {atom, _Line3, Function}}, Arguments0} = Stmt, Sinks) -> case lists:member(Module, Sinks) of true -> case lists:member(Function, ?LEVELS) of true -> SinkName = lager_util:make_internal_sink_name(Module), do_transform(Line, SinkName, Function, Arguments0); false -> case lists:keyfind(Function, 1, ?LEVELS_UNSAFE) of {Function, Severity} -> SinkName = lager_util:make_internal_sink_name(Module), do_transform(Line, SinkName, Severity, Arguments0, unsafe); false -> Stmt end end; false -> list_to_tuple(transform_statement(tuple_to_list(Stmt), Sinks)) end; transform_statement(Stmt, Sinks) when is_tuple(Stmt) -> list_to_tuple(transform_statement(tuple_to_list(Stmt), Sinks)); transform_statement(Stmt, Sinks) when is_list(Stmt) -> [transform_statement(S, Sinks) || S <- Stmt]; transform_statement(Stmt, _Sinks) -> Stmt. add_function_transforms(_Line, DefaultAttrs, []) -> DefaultAttrs; add_function_transforms(Line, DefaultAttrs, [{Atom, on_emit, {Module, Function}}|Remainder]) -> NewFunction = {tuple, Line, [ {atom, Line, Atom}, {'fun', Line, { function, {atom, Line, Module}, {atom, Line, Function}, {integer, Line, 0} }} ]}, add_function_transforms(Line, {cons, Line, NewFunction, DefaultAttrs}, Remainder); add_function_transforms(Line, DefaultAttrs, [{Atom, on_log, {Module, Function}}|Remainder]) -> NewFunction = {tuple, Line, [ {atom, Line, Atom}, {call, Line, {remote, Line, {atom, Line, Module}, {atom, Line, Function}}, []} ]}, add_function_transforms(Line, {cons, Line, NewFunction, DefaultAttrs}, Remainder). do_transform(Line, SinkName, Severity, Arguments0) -> do_transform(Line, SinkName, Severity, Arguments0, safe). do_transform(Line, SinkName, Severity, Arguments0, Safety) -> SeverityAsInt=lager_util:level_to_num(Severity), DefaultAttrs0 = {cons, Line, {tuple, Line, [ {atom, Line, module}, {atom, Line, get(module)}]}, {cons, Line, {tuple, Line, [ {atom, Line, function}, {atom, Line, get(function)}]}, {cons, Line, {tuple, Line, [ {atom, Line, line}, {integer, Line, Line}]}, {cons, Line, {tuple, Line, [ {atom, Line, pid}, {call, Line, {atom, Line, pid_to_list}, [ {call, Line, {atom, Line ,self}, []}]}]}, {cons, Line, {tuple, Line, [ {atom, Line, node}, {call, Line, {atom, Line, node}, []}]}, %% get the metadata with lager:md(), this will always return a list so we can use it as the tail here {call, Line, {remote, Line, {atom, Line, lager}, {atom, Line, md}}, []}}}}}}, %{nil, Line}}}}}}}, Functions = get(functions), DefaultAttrs1 = add_function_transforms(Line, DefaultAttrs0, Functions), DefaultAttrs = case erlang:get(application) of undefined -> DefaultAttrs1; App -> %% stick the application in the attribute list concat_lists({cons, Line, {tuple, Line, [ {atom, Line, application}, {atom, Line, App}]}, {nil, Line}}, DefaultAttrs1) end, {Meta, Message, Arguments} = handle_args(DefaultAttrs, Line, Arguments0), %% Generate some unique variable names so we don't accidentally export from case clauses. %% Note that these are not actual atoms, but the AST treats variable names as atoms. LevelVar = make_varname("__Level", Line), TracesVar = make_varname("__Traces", Line), PidVar = make_varname("__Pid", Line), LogFun = case Safety of safe -> do_log; unsafe -> do_log_unsafe end, %% Wrap the call to lager:dispatch_log/6 in case that will avoid doing any work if this message is not elegible for logging %% See lager.erl (lines 89-100) for lager:dispatch_log/6 %% case {whereis(Sink), whereis(?DEFAULT_SINK), lager_config:get({Sink, loglevel}, {?LOG_NONE, []})} of {'case',Line, {tuple,Line, [{call,Line,{atom,Line,whereis},[{atom,Line,SinkName}]}, {call,Line,{atom,Line,whereis},[{atom,Line,?DEFAULT_SINK}]}, {call,Line, {remote,Line,{atom,Line,lager_config},{atom,Line,get}}, [{tuple,Line,[{atom,Line,SinkName},{atom,Line,loglevel}]}, {tuple,Line,[{integer,Line,0},{nil,Line}]}]}]}, %% {undefined, undefined, _} -> {error, lager_not_running}; [{clause,Line, [{tuple,Line, [{atom,Line,undefined},{atom,Line,undefined},{var,Line,'_'}]}], [], %% trick the linter into avoiding a 'term constructed but not used' error: %% (fun() -> {error, lager_not_running} end)() [{call, Line, {'fun', Line, {clauses, [{clause, Line, [],[], [{tuple, Line, [{atom, Line, error},{atom, Line, lager_not_running}]}]}]}}, []}] }, %% {undefined, _, _} -> {error, {sink_not_configured, Sink}}; {clause,Line, [{tuple,Line, [{atom,Line,undefined},{var,Line,'_'},{var,Line,'_'}]}], [], %% same trick as above to avoid linter error [{call, Line, {'fun', Line, {clauses, [{clause, Line, [],[], [{tuple,Line, [{atom,Line,error}, {tuple,Line,[{atom,Line,sink_not_configured},{atom,Line,SinkName}]}]}]}]}}, []}] }, %% {SinkPid, _, {Level, Traces}} when ... -> lager:do_log/9; {clause,Line, [{tuple,Line, [{var,Line,PidVar}, {var,Line,'_'}, {tuple,Line,[{var,Line,LevelVar},{var,Line,TracesVar}]}]}], [[{op, Line, 'orelse', {op, Line, '/=', {op, Line, 'band', {var, Line, LevelVar}, {integer, Line, SeverityAsInt}}, {integer, Line, 0}}, {op, Line, '/=', {var, Line, TracesVar}, {nil, Line}}}]], [{call,Line,{remote, Line, {atom, Line, lager}, {atom, Line, LogFun}}, [{atom,Line,Severity}, Meta, Message, Arguments, {integer, Line, get(truncation_size)}, {integer, Line, SeverityAsInt}, {var, Line, LevelVar}, {var, Line, TracesVar}, {atom, Line, SinkName}, {var, Line, PidVar}]}]}, %% _ -> ok {clause,Line,[{var,Line,'_'}],[],[{atom,Line,ok}]}]}. handle_args(DefaultAttrs, Line, [{cons, LineNum, {tuple, _, _}, _} = Attrs]) -> {concat_lists(DefaultAttrs, Attrs), {string, LineNum, ""}, {atom, Line, none}}; handle_args(DefaultAttrs, Line, [Format]) -> {DefaultAttrs, Format, {atom, Line, none}}; handle_args(DefaultAttrs, Line, [Arg1, Arg2]) -> %% some ambiguity here, figure out if these arguments are %% [Format, Args] or [Attr, Format]. %% The trace attributes will be a list of tuples, so check %% for that. case {element(1, Arg1), Arg1} of {_, {cons, _, {tuple, _, _}, _}} -> {concat_lists(Arg1, DefaultAttrs), Arg2, {atom, Line, none}}; {Type, _} when Type == var; Type == lc; Type == call; Type == record_field -> %% crap, its not a literal. look at the second %% argument to see if it is a string case Arg2 of {string, _, _} -> {concat_lists(Arg1, DefaultAttrs), Arg2, {atom, Line, none}}; _ -> %% not a string, going to have to guess %% it's the argument list {DefaultAttrs, Arg1, Arg2} end; _ -> {DefaultAttrs, Arg1, Arg2} end; handle_args(DefaultAttrs, _Line, [Attrs, Format, Args]) -> {concat_lists(Attrs, DefaultAttrs), Format, Args}. make_varname(Prefix, Line) -> list_to_atom(Prefix ++ atom_to_list(get(module)) ++ integer_to_list(Line)). %% concat 2 list ASTs by replacing the terminating [] in A with the contents of B concat_lists({var, Line, _Name}=Var, B) -> %% concatenating a var with a cons {call, Line, {remote, Line, {atom, Line, lists},{atom, Line, flatten}}, [{cons, Line, Var, B}]}; concat_lists({lc, Line, _Body, _Generator} = LC, B) -> %% concatenating a LC with a cons {call, Line, {remote, Line, {atom, Line, lists},{atom, Line, flatten}}, [{cons, Line, LC, B}]}; concat_lists({call, Line, _Function, _Args} = Call, B) -> %% concatenating a call with a cons {call, Line, {remote, Line, {atom, Line, lists},{atom, Line, flatten}}, [{cons, Line, Call, B}]}; concat_lists({record_field, Line, _Var, _Record, _Field} = Rec, B) -> %% concatenating a record_field with a cons {call, Line, {remote, Line, {atom, Line, lists},{atom, Line, flatten}}, [{cons, Line, Rec, B}]}; concat_lists({nil, _Line}, B) -> B; concat_lists({cons, Line, Element, Tail}, B) -> {cons, Line, Element, concat_lists(Tail, B)}. stash_record(Record) -> Records = case erlang:get(records) of undefined -> []; R -> R end, erlang:put(records, [Record|Records]). insert_record_attribute(AST) -> lists:foldl(fun({attribute, Line, module, _}=E, Acc) -> [E, {attribute, Line, lager_records, erlang:get(records)}|Acc]; (E, Acc) -> [E|Acc] end, [], AST). guess_application(Dirname, Attr) when Dirname /= undefined -> case find_app_file(Dirname) of no_idea -> %% try it based on source file directory (app.src most likely) guess_application(undefined, Attr); _ -> ok end; guess_application(undefined, {attribute, _, file, {Filename, _}}) -> Dir = filename:dirname(Filename), find_app_file(Dir); guess_application(_, _) -> ok. find_app_file(Dir) -> case filelib:wildcard(Dir++"/*.{app,app.src}") of [] -> no_idea; [File] -> case file:consult(File) of {ok, [{application, Appname, _Attributes}|_]} -> erlang:put(application, Appname); _ -> no_idea end; _ -> %% multiple files, uh oh no_idea end. lager-3.8.0/src/lager_stdlib.erl0000644000232200023220000004214213523436621017120 0ustar debalancedebalance%% %% %CopyrightBegin% %% %% Copyright Ericsson AB 1996-2009. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in %% compliance with the License. You should have received a copy of the %% Erlang Public License along with this software. If not, it can be %% retrieved online at http://www.erlang.org/. %% %% Software distributed under the License is distributed on an "AS IS" %% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See %% the License for the specific language governing rights and limitations %% under the License. %% %% %CopyrightEnd% %% %% @doc Functions from Erlang OTP distribution that are really useful %% but aren't exported. %% %% All functions in this module are covered by the Erlang/OTP source %% distribution's license, the Erlang Public License. See %% http://www.erlang.org/ for full details. -module(lager_stdlib). -export([string_p/1]). -export([write_time/2, maybe_utc/1]). -export([is_my_error_report/1, is_my_info_report/1]). -export([sup_get/2]). -export([proc_lib_format/2]). %% from error_logger_file_h string_p([]) -> false; string_p(Term) -> string_p1(Term). string_p1([H|T]) when is_integer(H), H >= $\s, H < 256 -> string_p1(T); string_p1([$\n|T]) -> string_p1(T); string_p1([$\r|T]) -> string_p1(T); string_p1([$\t|T]) -> string_p1(T); string_p1([$\v|T]) -> string_p1(T); string_p1([$\b|T]) -> string_p1(T); string_p1([$\f|T]) -> string_p1(T); string_p1([$\e|T]) -> string_p1(T); string_p1([H|T]) when is_list(H) -> case string_p1(H) of true -> string_p1(T); _ -> false end; string_p1([]) -> true; string_p1(_) -> false. %% From calendar -type year1970() :: 1970..10000. % should probably be 1970.. -type month() :: 1..12. -type day() :: 1..31. -type hour() :: 0..23. -type minute() :: 0..59. -type second() :: 0..59. -type t_time() :: {hour(),minute(),second()}. -type t_datetime1970() :: {{year1970(),month(),day()},t_time()}. %% From OTP stdlib's error_logger_tty_h.erl ... These functions aren't %% exported. -spec write_time({utc, t_datetime1970()} | t_datetime1970(), string()) -> string(). write_time({utc,{{Y,Mo,D},{H,Mi,S}}},Type) -> io_lib:format("~n=~s==== ~p-~s-~p::~s:~s:~s UTC ===~n", [Type,D,month(Mo),Y,t(H),t(Mi),t(S)]); write_time({{Y,Mo,D},{H,Mi,S}},Type) -> io_lib:format("~n=~s==== ~p-~s-~p::~s:~s:~s ===~n", [Type,D,month(Mo),Y,t(H),t(Mi),t(S)]). -spec maybe_utc(t_datetime1970()) -> {utc, t_datetime1970()} | t_datetime1970(). maybe_utc(Time) -> UTC = case application:get_env(sasl, utc_log) of {ok, Val} -> Val; undefined -> %% Backwards compatible: application:get_env(stdlib, utc_log, false) end, if UTC =:= true -> UTCTime = case calendar:local_time_to_universal_time_dst(Time) of [] -> calendar:local_time(); [T0|_] -> T0 end, {utc, UTCTime}; true -> Time end. t(X) when is_integer(X) -> t1(integer_to_list(X)); t(_) -> "". t1([X]) -> [$0,X]; t1(X) -> X. month(1) -> "Jan"; month(2) -> "Feb"; month(3) -> "Mar"; month(4) -> "Apr"; month(5) -> "May"; month(6) -> "Jun"; month(7) -> "Jul"; month(8) -> "Aug"; month(9) -> "Sep"; month(10) -> "Oct"; month(11) -> "Nov"; month(12) -> "Dec". %% From OTP sasl's sasl_report.erl ... These functions aren't %% exported. -spec is_my_error_report(atom()) -> boolean(). is_my_error_report(supervisor_report) -> true; is_my_error_report(crash_report) -> true; is_my_error_report(_) -> false. -spec is_my_info_report(atom()) -> boolean(). is_my_info_report(progress) -> true; is_my_info_report(_) -> false. -spec sup_get(term(), [proplists:property()]) -> term(). sup_get(Tag, Report) -> case lists:keysearch(Tag, 1, Report) of {value, {_, Value}} -> Value; _ -> "" end. %% From OTP stdlib's proc_lib.erl ... These functions aren't exported. -spec proc_lib_format([term()], pos_integer()) -> string(). proc_lib_format([OwnReport,LinkReport], FmtMaxBytes) -> OwnFormat = format_report(OwnReport, FmtMaxBytes), LinkFormat = format_report(LinkReport, FmtMaxBytes), %% io_lib:format here is OK because we're limiting max length elsewhere. Str = io_lib:format(" crasher:~n~s neighbours:~n~s",[OwnFormat,LinkFormat]), lists:flatten(Str). format_report(Rep, FmtMaxBytes) when is_list(Rep) -> format_rep(Rep, FmtMaxBytes); format_report(Rep, FmtMaxBytes) -> {Str, _} = lager_trunc_io:print(Rep, FmtMaxBytes), io_lib:format("~p~n", [Str]). format_rep([{initial_call,InitialCall}|Rep], FmtMaxBytes) -> [format_mfa(InitialCall, FmtMaxBytes)|format_rep(Rep, FmtMaxBytes)]; format_rep([{error_info,{Class,Reason,StackTrace}}|Rep], FmtMaxBytes) -> [format_exception(Class, Reason, StackTrace, FmtMaxBytes)|format_rep(Rep, FmtMaxBytes)]; format_rep([{Tag,Data}|Rep], FmtMaxBytes) -> [format_tag(Tag, Data, FmtMaxBytes)|format_rep(Rep, FmtMaxBytes)]; format_rep(_, _S) -> []. format_exception(Class, Reason, StackTrace, FmtMaxBytes) -> PF = pp_fun(FmtMaxBytes), StackFun = fun(M, _F, _A) -> (M =:= erl_eval) or (M =:= ?MODULE) end, %% EI = " exception: ", EI = " ", [EI, lib_format_exception(1+length(EI), Class, Reason, StackTrace, StackFun, PF), "\n"]. format_mfa({M,F,Args}=StartF, FmtMaxBytes) -> try A = length(Args), [" initial call: ",atom_to_list(M),$:,atom_to_list(F),$/, integer_to_list(A),"\n"] catch error:_ -> format_tag(initial_call, StartF, FmtMaxBytes) end. pp_fun(FmtMaxBytes) -> fun(Term, _I) -> {Str, _} = lager_trunc_io:print(Term, FmtMaxBytes), io_lib:format("~s", [Str]) end. format_tag(Tag, Data, FmtMaxBytes) -> {Str, _} = lager_trunc_io:print(Data, FmtMaxBytes), io_lib:format(" ~p: ~s~n", [Tag, Str]). %% From OTP stdlib's lib.erl ... These functions aren't exported. lib_format_exception(I, Class, Reason, StackTrace, StackFun, FormatFun) when is_integer(I), I >= 1, is_function(StackFun, 3), is_function(FormatFun, 2) -> Str = n_spaces(I-1), {Term,Trace1,Trace} = analyze_exception(Class, Reason, StackTrace), Expl0 = explain_reason(Term, Class, Trace1, FormatFun, Str), Expl = io_lib:fwrite(<<"~s~s">>, [exited(Class), Expl0]), case format_stacktrace1(Str, Trace, FormatFun, StackFun) of [] -> Expl; Stack -> [Expl, $\n, Stack] end. analyze_exception(error, Term, Stack) -> case {is_stacktrace(Stack), Stack, Term} of {true, [{_M,_F,As}=MFA|MFAs], function_clause} when is_list(As) -> {Term,[MFA],MFAs}; {true, [{shell,F,A}], function_clause} when is_integer(A) -> {Term, [{F,A}], []}; {true, [{_M,_F,_AorAs}=MFA|MFAs], undef} -> {Term,[MFA],MFAs}; {true, _, _} -> {Term,[],Stack}; {false, _, _} -> {{Term,Stack},[],[]} end; analyze_exception(_Class, Term, Stack) -> case is_stacktrace(Stack) of true -> {Term,[],Stack}; false -> {{Term,Stack},[],[]} end. is_stacktrace([]) -> true; is_stacktrace([{M,F,A}|Fs]) when is_atom(M), is_atom(F), is_integer(A) -> is_stacktrace(Fs); is_stacktrace([{M,F,As}|Fs]) when is_atom(M), is_atom(F), length(As) >= 0 -> is_stacktrace(Fs); is_stacktrace(_) -> false. %% ERTS exit codes (some of them are also returned by erl_eval): explain_reason(badarg, error, [], _PF, _Str) -> <<"bad argument">>; explain_reason({badarg,V}, error=Cl, [], PF, Str) -> % orelse, andalso format_value(V, <<"bad argument: ">>, Cl, PF, Str); explain_reason(badarith, error, [], _PF, _Str) -> <<"bad argument in an arithmetic expression">>; explain_reason({badarity,{Fun,As}}, error, [], _PF, _Str) when is_function(Fun) -> %% Only the arity is displayed, not the arguments As. io_lib:fwrite(<<"~s called with ~s">>, [format_fun(Fun), argss(length(As))]); explain_reason({badfun,Term}, error=Cl, [], PF, Str) -> format_value(Term, <<"bad function ">>, Cl, PF, Str); explain_reason({badmatch,Term}, error=Cl, [], PF, Str) -> format_value(Term, <<"no match of right hand side value ">>, Cl, PF, Str); explain_reason({case_clause,V}, error=Cl, [], PF, Str) -> %% "there is no case clause with a true guard sequence and a %% pattern matching..." format_value(V, <<"no case clause matching ">>, Cl, PF, Str); explain_reason(function_clause, error, [{F,A}], _PF, _Str) -> %% Shell commands FAs = io_lib:fwrite(<<"~w/~w">>, [F, A]), [<<"no function clause matching call to ">> | FAs]; explain_reason(function_clause, error=Cl, [{M,F,As}], PF, Str) -> String = <<"no function clause matching ">>, format_errstr_call(String, Cl, {M,F}, As, PF, Str); explain_reason(if_clause, error, [], _PF, _Str) -> <<"no true branch found when evaluating an if expression">>; explain_reason(noproc, error, [], _PF, _Str) -> <<"no such process or port">>; explain_reason(notalive, error, [], _PF, _Str) -> <<"the node cannot be part of a distributed system">>; explain_reason(system_limit, error, [], _PF, _Str) -> <<"a system limit has been reached">>; explain_reason(timeout_value, error, [], _PF, _Str) -> <<"bad receive timeout value">>; explain_reason({try_clause,V}, error=Cl, [], PF, Str) -> %% "there is no try clause with a true guard sequence and a %% pattern matching..." format_value(V, <<"no try clause matching ">>, Cl, PF, Str); explain_reason(undef, error, [{M,F,A}], _PF, _Str) -> %% Only the arity is displayed, not the arguments, if there are any. io_lib:fwrite(<<"undefined function ~s">>, [mfa_to_string(M, F, n_args(A))]); explain_reason({shell_undef,F,A}, error, [], _PF, _Str) -> %% Give nicer reports for undefined shell functions %% (but not when the user actively calls shell_default:F(...)). io_lib:fwrite(<<"undefined shell command ~s/~w">>, [F, n_args(A)]); %% Exit codes returned by erl_eval only: explain_reason({argument_limit,_Fun}, error, [], _PF, _Str) -> io_lib:fwrite(<<"limit of number of arguments to interpreted function" " exceeded">>, []); explain_reason({bad_filter,V}, error=Cl, [], PF, Str) -> format_value(V, <<"bad filter ">>, Cl, PF, Str); explain_reason({bad_generator,V}, error=Cl, [], PF, Str) -> format_value(V, <<"bad generator ">>, Cl, PF, Str); explain_reason({unbound,V}, error, [], _PF, _Str) -> io_lib:fwrite(<<"variable ~w is unbound">>, [V]); %% Exit codes local to the shell module (restricted shell): explain_reason({restricted_shell_bad_return, V}, exit=Cl, [], PF, Str) -> String = <<"restricted shell module returned bad value ">>, format_value(V, String, Cl, PF, Str); explain_reason({restricted_shell_disallowed,{ForMF,As}}, exit=Cl, [], PF, Str) -> %% ForMF can be a fun, but not a shell fun. String = <<"restricted shell does not allow ">>, format_errstr_call(String, Cl, ForMF, As, PF, Str); explain_reason(restricted_shell_started, exit, [], _PF, _Str) -> <<"restricted shell starts now">>; explain_reason(restricted_shell_stopped, exit, [], _PF, _Str) -> <<"restricted shell stopped">>; %% Other exit code: explain_reason(Reason, Class, [], PF, Str) -> PF(Reason, (iolist_size(Str)+1) + exited_size(Class)). n_spaces(N) -> lists:duplicate(N, $\s). exited_size(Class) -> iolist_size(exited(Class)). exited(error) -> <<"exception error: ">>; exited(exit) -> <<"exception exit: ">>; exited(throw) -> <<"exception throw: ">>. format_stacktrace1(S0, Stack0, PF, SF) -> Stack1 = lists:dropwhile(fun({M,F,A}) -> SF(M, F, A) end, lists:reverse(Stack0)), S = [" " | S0], Stack = lists:reverse(Stack1), format_stacktrace2(S, Stack, 1, PF). format_stacktrace2(S, [{M,F,A}|Fs], N, PF) when is_integer(A) -> [io_lib:fwrite(<<"~s~s ~s">>, [sep(N, S), origin(N, M, F, A), mfa_to_string(M, F, A)]) | format_stacktrace2(S, Fs, N + 1, PF)]; format_stacktrace2(S, [{M,F,As}|Fs], N, PF) when is_list(As) -> A = length(As), CalledAs = [S,<<" called as ">>], C = format_call("", CalledAs, {M,F}, As, PF), [io_lib:fwrite(<<"~s~s ~s\n~s~s">>, [sep(N, S), origin(N, M, F, A), mfa_to_string(M, F, A), CalledAs, C]) | format_stacktrace2(S, Fs, N + 1, PF)]; format_stacktrace2(_S, [], _N, _PF) -> "". argss(0) -> <<"no arguments">>; argss(1) -> <<"one argument">>; argss(2) -> <<"two arguments">>; argss(I) -> io_lib:fwrite(<<"~w arguments">>, [I]). format_value(V, ErrStr, Class, PF, Str) -> Pre1Sz = exited_size(Class), Str1 = PF(V, Pre1Sz + iolist_size([Str, ErrStr])+1), [ErrStr | case count_nl(Str1) of N1 when N1 > 1 -> Str2 = PF(V, iolist_size(Str) + 1 + Pre1Sz), case count_nl(Str2) < N1 of true -> [$\n, Str, n_spaces(Pre1Sz) | Str2]; false -> Str1 end; _ -> Str1 end]. format_fun(Fun) when is_function(Fun) -> {module, M} = erlang:fun_info(Fun, module), {name, F} = erlang:fun_info(Fun, name), {arity, A} = erlang:fun_info(Fun, arity), case erlang:fun_info(Fun, type) of {type, local} when F =:= "" -> io_lib:fwrite(<<"~w">>, [Fun]); {type, local} when M =:= erl_eval -> io_lib:fwrite(<<"interpreted function with arity ~w">>, [A]); {type, local} -> mfa_to_string(M, F, A); {type, external} -> mfa_to_string(M, F, A) end. format_errstr_call(ErrStr, Class, ForMForFun, As, PF, Pre0) -> Pre1 = [Pre0 | n_spaces(exited_size(Class))], format_call(ErrStr, Pre1, ForMForFun, As, PF). format_call(ErrStr, Pre1, ForMForFun, As, PF) -> Arity = length(As), [ErrStr | case is_op(ForMForFun, Arity) of {yes,Op} -> format_op(ErrStr, Pre1, Op, As, PF); no -> MFs = mf_to_string(ForMForFun, Arity), I1 = iolist_size([Pre1,ErrStr|MFs]), S1 = pp_arguments(PF, As, I1), S2 = pp_arguments(PF, As, iolist_size([Pre1|MFs])), Long = count_nl(pp_arguments(PF, [a2345,b2345], I1)) > 0, case Long or (count_nl(S2) < count_nl(S1)) of true -> [$\n, Pre1, MFs, S2]; false -> [MFs, S1] end end]. mfa_to_string(M, F, A) -> io_lib:fwrite(<<"~s/~w">>, [mf_to_string({M, F}, A), A]). mf_to_string({M, F}, A) -> case erl_internal:bif(M, F, A) of true -> io_lib:fwrite(<<"~w">>, [F]); false -> case is_op({M, F}, A) of {yes, '/'} -> io_lib:fwrite(<<"~w">>, [F]); {yes, F} -> atom_to_list(F); no -> io_lib:fwrite(<<"~w:~w">>, [M, F]) end end; mf_to_string(Fun, _A) when is_function(Fun) -> format_fun(Fun); mf_to_string(F, _A) -> io_lib:fwrite(<<"~w">>, [F]). n_args(A) when is_integer(A) -> A; n_args(As) when is_list(As) -> length(As). origin(1, M, F, A) -> case is_op({M, F}, n_args(A)) of {yes, F} -> <<"in operator ">>; no -> <<"in function ">> end; origin(_N, _M, _F, _A) -> <<"in call from">>. sep(1, S) -> S; sep(_, S) -> [$\n | S]. count_nl([E | Es]) -> count_nl(E) + count_nl(Es); count_nl($\n) -> 1; count_nl(Bin) when is_binary(Bin) -> count_nl(binary_to_list(Bin)); count_nl(_) -> 0. is_op(ForMForFun, A) -> try {erlang,F} = ForMForFun, _ = erl_internal:op_type(F, A), {yes,F} catch error:_ -> no end. format_op(ErrStr, Pre, Op, [A1, A2], PF) -> I1 = iolist_size([ErrStr,Pre]), S1 = PF(A1, I1+1), S2 = PF(A2, I1+1), OpS = atom_to_list(Op), Pre1 = [$\n | n_spaces(I1)], case count_nl(S1) > 0 of true -> [S1,Pre1,OpS,Pre1|S2]; false -> OpS2 = io_lib:fwrite(<<" ~s ">>, [Op]), S2_2 = PF(A2, iolist_size([ErrStr,Pre,S1|OpS2])+1), case count_nl(S2) < count_nl(S2_2) of true -> [S1,Pre1,OpS,Pre1|S2]; false -> [S1,OpS2|S2_2] end end. pp_arguments(PF, As, I) -> case {As, io_lib:printable_list(As)} of {[Int | T], true} -> L = integer_to_list(Int), Ll = length(L), A = list_to_atom(lists:duplicate(Ll, $a)), S0 = binary_to_list(iolist_to_binary(PF([A | T], I+1))), brackets_to_parens([$[,L,string:sub_string(S0, 2+Ll)]); _ -> brackets_to_parens(PF(As, I+1)) end. brackets_to_parens(S) -> B = iolist_to_binary(S), Sz = byte_size(B) - 2, <<$[,R:Sz/binary,$]>> = B, [$(,R,$)]. lager-3.8.0/src/lager_default_formatter.erl0000644000232200023220000004663513523436621021361 0ustar debalancedebalance%% Copyright (c) 2011-2012 Basho Technologies, Inc. All Rights Reserved. %% %% This file is provided to you under the Apache License, %% Version 2.0 (the "License"); you may not use this file %% except in compliance with the License. You may obtain %% a copy of the License at %% %% http://www.apache.org/licenses/LICENSE-2.0 %% %% Unless required by applicable law or agreed to in writing, %% software distributed under the License is distributed on an %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY %% KIND, either express or implied. See the License for the %% specific language governing permissions and limitations %% under the License. -module(lager_default_formatter). %% %% Include files %% -include("lager.hrl"). -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). -endif. %% %% Exported Functions %% -export([format/2, format/3]). %% %% API Functions %% %% @doc Provides a generic, default formatting for log messages using a semi-iolist as configuration. Any iolist allowed %% elements in the configuration are printed verbatim. Atoms in the configuration are treated as metadata properties %% and extracted from the log message. Optionally, a tuple of {atom(),semi-iolist()} can be used. The atom will look %% up the property, but if not found it will use the semi-iolist() instead. These fallbacks can be similarly nested %% or refer to other properties, if desired. You can also use a {atom, semi-iolist(), semi-iolist()} formatter, which %% acts like a ternary operator's true/false branches. %% %% The metadata properties date,time, message, severity, and sev will always exist. %% The properties pid, file, line, module, and function will always exist if the parser transform is used. %% %% Example: %% %% `["Foo"]' -> "Foo", regardless of message content. %% %% `[message]' -> The content of the logged message, alone. %% %% `[{pid,"Unknown Pid"}]' -> "?.?.?" if pid is in the metadata, "Unknown Pid" if not. %% %% `[{pid, ["My pid is ", pid], ["Unknown Pid"]}]' -> if pid is in the metada print "My pid is ?.?.?", otherwise print "Unknown Pid" %% @end -spec format(lager_msg:lager_msg(),list(),list()) -> any(). format(Msg,[], Colors) -> format(Msg, [{eol, "\n"}], Colors); format(Msg,[{eol, EOL}], Colors) -> format(Msg, [date, " ", time, " ", color, "[", severity, "] ", {pid, ""}, {module, [ {pid, ["@"], ""}, module, {function, [":", function], ""}, {line, [":",line], ""}], ""}, " ", message, EOL], Colors); format(Message,Config,Colors) -> [ case V of color -> output_color(Message,Colors); _ -> output(V,Message) end || V <- Config ]. -spec format(lager_msg:lager_msg(),list()) -> any(). format(Msg, Config) -> format(Msg, Config, []). -spec output(term(),lager_msg:lager_msg()) -> iolist(). output(message,Msg) -> lager_msg:message(Msg); output(date,Msg) -> {D, _T} = lager_msg:datetime(Msg), D; output(time,Msg) -> {_D, T} = lager_msg:datetime(Msg), T; output(severity,Msg) -> atom_to_list(lager_msg:severity(Msg)); output(severity_upper, Msg) -> uppercase_severity(lager_msg:severity(Msg)); output(blank,_Msg) -> output({blank," "},_Msg); output(node, _Msg) -> output({node, atom_to_list(node())},_Msg); output({blank,Fill},_Msg) -> Fill; output(sev,Msg) -> %% Write brief acronym for the severity level (e.g. debug -> $D) [lager_util:level_to_chr(lager_msg:severity(Msg))]; output(metadata, Msg) -> output({metadata, "=", " "}, Msg); output({metadata, IntSep, FieldSep}, Msg) -> MD = lists:keysort(1, lager_msg:metadata(Msg)), string:join([io_lib:format("~s~s~p", [K, IntSep, V]) || {K, V} <- MD], FieldSep); output(Prop,Msg) when is_atom(Prop) -> Metadata = lager_msg:metadata(Msg), make_printable(get_metadata(Prop,Metadata,<<"Undefined">>)); output({Prop,Default},Msg) when is_atom(Prop) -> Metadata = lager_msg:metadata(Msg), make_printable(get_metadata(Prop,Metadata,output(Default,Msg))); output({Prop, Present, Absent}, Msg) when is_atom(Prop) -> %% sort of like a poor man's ternary operator Metadata = lager_msg:metadata(Msg), case get_metadata(Prop, Metadata) of undefined -> [ output(V, Msg) || V <- Absent]; _ -> [ output(V, Msg) || V <- Present] end; output({Prop, Present, Absent, Width}, Msg) when is_atom(Prop) -> %% sort of like a poor man's ternary operator Metadata = lager_msg:metadata(Msg), case get_metadata(Prop, Metadata) of undefined -> [ output(V, Msg, Width) || V <- Absent]; _ -> [ output(V, Msg, Width) || V <- Present] end; output(Other,_) -> make_printable(Other). output(message, Msg, _Width) -> lager_msg:message(Msg); output(date,Msg, _Width) -> {D, _T} = lager_msg:datetime(Msg), D; output(time, Msg, _Width) -> {_D, T} = lager_msg:datetime(Msg), T; output(severity, Msg, Width) -> make_printable(atom_to_list(lager_msg:severity(Msg)), Width); output(sev,Msg, _Width) -> %% Write brief acronym for the severity level (e.g. debug -> $D) [lager_util:level_to_chr(lager_msg:severity(Msg))]; output(node, Msg, _Width) -> output({node, atom_to_list(node())}, Msg, _Width); output(blank,_Msg, _Width) -> output({blank, " "},_Msg, _Width); output({blank, Fill},_Msg, _Width) -> Fill; output(metadata, Msg, _Width) -> output({metadata, "=", " "}, Msg, _Width); output({metadata, IntSep, FieldSep}, Msg, _Width) -> MD = lists:keysort(1, lager_msg:metadata(Msg)), [string:join([io_lib:format("~s~s~p", [K, IntSep, V]) || {K, V} <- MD], FieldSep)]; output(Prop, Msg, Width) when is_atom(Prop) -> Metadata = lager_msg:metadata(Msg), make_printable(get_metadata(Prop,Metadata,<<"Undefined">>), Width); output({Prop,Default},Msg, Width) when is_atom(Prop) -> Metadata = lager_msg:metadata(Msg), make_printable(get_metadata(Prop,Metadata,output(Default,Msg)), Width); output(Other,_, Width) -> make_printable(Other, Width). output_color(_Msg,[]) -> []; output_color(Msg,Colors) -> Level = lager_msg:severity(Msg), case lists:keyfind(Level, 1, Colors) of {_, Color} -> Color; _ -> [] end. -spec make_printable(any()) -> iolist(). make_printable(A) when is_atom(A) -> atom_to_list(A); make_printable(P) when is_pid(P) -> pid_to_list(P); make_printable(L) when is_list(L) orelse is_binary(L) -> L; make_printable(Other) -> io_lib:format("~p",[Other]). make_printable(A,W) when is_integer(W)-> string:left(make_printable(A),W); make_printable(A,{Align,W}) when is_integer(W) -> case Align of left -> string:left(make_printable(A),W); centre -> string:centre(make_printable(A),W); right -> string:right(make_printable(A),W); _ -> string:left(make_printable(A),W) end; make_printable(A,_W) -> make_printable(A). run_function(Function, Default) -> try Function() of Result -> Result catch _:_ -> Default end. get_metadata(Key, Metadata) -> get_metadata(Key, Metadata, undefined). get_metadata(Key, Metadata, Default) -> case lists:keyfind(Key, 1, Metadata) of false -> Default; {Key, Value} when is_function(Value) -> run_function(Value, Default); {Key, Value} -> Value end. uppercase_severity(debug) -> "DEBUG"; uppercase_severity(info) -> "INFO"; uppercase_severity(notice) -> "NOTICE"; uppercase_severity(warning) -> "WARNING"; uppercase_severity(error) -> "ERROR"; uppercase_severity(critical) -> "CRITICAL"; uppercase_severity(alert) -> "ALERT"; uppercase_severity(emergency) -> "EMERGENCY". -ifdef(TEST). date_time_now() -> Now = os:timestamp(), {Date, Time} = lager_util:format_time(lager_util:maybe_utc(lager_util:localtime_ms(Now))), {Date, Time, Now}. basic_test_() -> {Date, Time, Now} = date_time_now(), [{"Default formatting test", ?_assertEqual(iolist_to_binary([Date, " ", Time, " [error] ", pid_to_list(self()), " Message\n"]), iolist_to_binary(format(lager_msg:new("Message", Now, error, [{pid, self()}], []), []))) }, {"Basic Formatting", ?_assertEqual(<<"Simplist Format">>, iolist_to_binary(format(lager_msg:new("Message", Now, error, [{pid, self()}], []), ["Simplist Format"]))) }, {"Default equivalent formatting test", ?_assertEqual(iolist_to_binary([Date, " ", Time, " [error] ", pid_to_list(self()), " Message\n"]), iolist_to_binary(format(lager_msg:new("Message", Now, error, [{pid, self()}], []), [date, " ", time," [",severity,"] ",pid, " ", message, "\n"] ))) }, {"Non existent metadata can default to string", ?_assertEqual(iolist_to_binary([Date, " ", Time, " [error] Fallback Message\n"]), iolist_to_binary(format(lager_msg:new("Message", Now, error, [{pid, self()}], []), [date, " ", time," [",severity,"] ",{does_not_exist,"Fallback"}, " ", message, "\n"] ))) }, {"Non existent metadata can default to other metadata", ?_assertEqual(iolist_to_binary([Date, " ", Time, " [error] Fallback Message\n"]), iolist_to_binary(format(lager_msg:new("Message", Now, error, [{pid, "Fallback"}], []), [date, " ", time," [",severity,"] ",{does_not_exist,pid}, " ", message, "\n"] ))) }, {"Non existent metadata can default to a string2", ?_assertEqual(iolist_to_binary(["Unknown Pid"]), iolist_to_binary(format(lager_msg:new("Message", Now, error, [], []), [{pid, ["My pid is ", pid], ["Unknown Pid"]}] ))) }, {"Metadata can have extra formatting", ?_assertEqual(iolist_to_binary(["My pid is hello"]), iolist_to_binary(format(lager_msg:new("Message", Now, error, [{pid, hello}], []), [{pid, ["My pid is ", pid], ["Unknown Pid"]}] ))) }, {"Metadata can have extra formatting1", ?_assertEqual(iolist_to_binary(["servername"]), iolist_to_binary(format(lager_msg:new("Message", Now, error, [{pid, hello}, {server, servername}], []), [{server,{pid, ["(", pid, ")"], ["(Unknown Server)"]}}] ))) }, {"Metadata can have extra formatting2", ?_assertEqual(iolist_to_binary(["(hello)"]), iolist_to_binary(format(lager_msg:new("Message", Now, error, [{pid, hello}], []), [{server,{pid, ["(", pid, ")"], ["(Unknown Server)"]}}] ))) }, {"Metadata can have extra formatting3", ?_assertEqual(iolist_to_binary(["(Unknown Server)"]), iolist_to_binary(format(lager_msg:new("Message", Now, error, [], []), [{server,{pid, ["(", pid, ")"], ["(Unknown Server)"]}}] ))) }, {"Metadata can be printed in its enterity", ?_assertEqual(iolist_to_binary(["bar=2 baz=3 foo=1"]), iolist_to_binary(format(lager_msg:new("Message", Now, error, [{foo, 1}, {bar, 2}, {baz, 3}], []), [metadata] ))) }, {"Metadata can be printed in its enterity with custom seperators", ?_assertEqual(iolist_to_binary(["bar->2, baz->3, foo->1"]), iolist_to_binary(format(lager_msg:new("Message", Now, error, [{foo, 1}, {bar, 2}, {baz, 3}], []), [{metadata, "->", ", "}] ))) }, {"Metadata can have extra formatting with width 1", ?_assertEqual(iolist_to_binary(["(hello )(hello )(hello)(hello)(hello)"]), iolist_to_binary(format(lager_msg:new("Message", Now, error, [{pid, hello}], []), ["(",{pid, [pid], "", 10},")", "(",{pid, [pid], "", {bad_align,10}},")", "(",{pid, [pid], "", bad10},")", "(",{pid, [pid], "", {right,bad20}},")", "(",{pid, [pid], "", {bad_align,bad20}},")"] ))) }, {"Metadata can have extra formatting with width 2", ?_assertEqual(iolist_to_binary(["(hello )"]), iolist_to_binary(format(lager_msg:new("Message", Now, error, [{pid, hello}], []), ["(",{pid, [pid], "", {left,10}},")"] ))) }, {"Metadata can have extra formatting with width 3", ?_assertEqual(iolist_to_binary(["( hello)"]), iolist_to_binary(format(lager_msg:new("Message", Now, error, [{pid, hello}], []), ["(",{pid, [pid], "", {right,10}},")"] ))) }, {"Metadata can have extra formatting with width 4", ?_assertEqual(iolist_to_binary(["( hello )"]), iolist_to_binary(format(lager_msg:new("Message", Now, error, [{pid, hello}], []), ["(",{pid, [pid], "", {centre,10}},")"] ))) }, {"Metadata can have extra formatting with width 5", ?_assertEqual(iolist_to_binary(["error |hello ! ( hello )"]), iolist_to_binary(format(lager_msg:new("Message", Now, error, [{pid, hello}], []), [{x,"",[severity,{blank,"|"},pid], 10},"!",blank,"(",{pid, [pid], "", {centre,10}},")"] ))) }, {"Metadata can have extra formatting with width 6", ?_assertEqual(iolist_to_binary([Time,Date," bar=2 baz=3 foo=1 pid=hello EMessage"]), iolist_to_binary(format(lager_msg:new("Message", Now, error, [{pid, hello},{foo, 1}, {bar, 2}, {baz, 3}], []), [{x,"",[time]}, {x,"",[date],20},blank,{x,"",[metadata],30},blank,{x,"",[sev],10},message, {message,message,"", {right,20}}] ))) }, {"Uppercase Severity Formatting - DEBUG", ?_assertEqual(<<"DEBUG Simplist Format">>, iolist_to_binary(format(lager_msg:new("Message", Now, debug, [{pid, self()}], []), [severity_upper, " Simplist Format"]))) }, {"Uppercase Severity Formatting - INFO", ?_assertEqual(<<"INFO Simplist Format">>, iolist_to_binary(format(lager_msg:new("Message", Now, info, [{pid, self()}], []), [severity_upper, " Simplist Format"]))) }, {"Uppercase Severity Formatting - NOTICE", ?_assertEqual(<<"NOTICE Simplist Format">>, iolist_to_binary(format(lager_msg:new("Message", Now, notice, [{pid, self()}], []), [severity_upper, " Simplist Format"]))) }, {"Uppercase Severity Formatting - WARNING", ?_assertEqual(<<"WARNING Simplist Format">>, iolist_to_binary(format(lager_msg:new("Message", Now, warning, [{pid, self()}], []), [severity_upper, " Simplist Format"]))) }, {"Uppercase Severity Formatting - ERROR", ?_assertEqual(<<"ERROR Simplist Format">>, iolist_to_binary(format(lager_msg:new("Message", Now, error, [{pid, self()}], []), [severity_upper, " Simplist Format"]))) }, {"Uppercase Severity Formatting - CRITICAL", ?_assertEqual(<<"CRITICAL Simplist Format">>, iolist_to_binary(format(lager_msg:new("Message", Now, critical, [{pid, self()}], []), [severity_upper, " Simplist Format"]))) }, {"Uppercase Severity Formatting - ALERT", ?_assertEqual(<<"ALERT Simplist Format">>, iolist_to_binary(format(lager_msg:new("Message", Now, alert, [{pid, self()}], []), [severity_upper, " Simplist Format"]))) }, {"Uppercase Severity Formatting - EMERGENCY", ?_assertEqual(<<"EMERGENCY Simplist Format">>, iolist_to_binary(format(lager_msg:new("Message", Now, emergency, [{pid, self()}], []), [severity_upper, " Simplist Format"]))) }, {"node formatting basic", begin [N, "foo"] = format(lager_msg:new("Message", Now, info, [{pid, self()}], []), [node, "foo"]), ?_assertNotMatch(nomatch, re:run(N, <<"@">>)) end } ]. -endif. lager-3.8.0/src/lager_handler_watcher_sup.erl0000644000232200023220000000224313523436621021656 0ustar debalancedebalance%% Copyright (c) 2011-2012 Basho Technologies, Inc. All Rights Reserved. %% %% This file is provided to you under the Apache License, %% Version 2.0 (the "License"); you may not use this file %% except in compliance with the License. You may obtain %% a copy of the License at %% %% http://www.apache.org/licenses/LICENSE-2.0 %% %% Unless required by applicable law or agreed to in writing, %% software distributed under the License is distributed on an %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY %% KIND, either express or implied. See the License for the %% specific language governing permissions and limitations %% under the License. %% @doc A supervisor for monitoring lager_handler_watcher processes. %% @private -module(lager_handler_watcher_sup). -behaviour(supervisor). %% API -export([start_link/0]). %% Callbacks -export([init/1]). start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). init([]) -> {ok, {{simple_one_for_one, 10, 60}, [ {lager_handler_watcher, {lager_handler_watcher, start_link, []}, temporary, 5000, worker, [lager_handler_watcher]} ]}}. lager-3.8.0/src/lager_console_backend.erl0000644000232200023220000006602413523436621020755 0ustar debalancedebalance%% Copyright (c) 2011-2012, 2014 Basho Technologies, Inc. All Rights Reserved. %% %% This file is provided to you under the Apache License, %% Version 2.0 (the "License"); you may not use this file %% except in compliance with the License. You may obtain %% a copy of the License at %% %% http://www.apache.org/licenses/LICENSE-2.0 %% %% Unless required by applicable law or agreed to in writing, %% software distributed under the License is distributed on an %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY %% KIND, either express or implied. See the License for the %% specific language governing permissions and limitations %% under the License. %% @doc Console backend for lager. %% Configuration is a proplist with the following keys: %%
    %%
  • `level' - log level to use
  • %%
  • `use_stderr' - either `true' or `false', defaults to false. If set to true, %% use standard error to output console log messages
  • %%
  • `formatter' - the module to use when formatting log messages. Defaults to %% `lager_default_formatter'
  • %%
  • `formatter_config' - the format configuration string. Defaults to %% `time [ severity ] message'
  • %%
-module(lager_console_backend). -behaviour(gen_event). -export([init/1, handle_call/2, handle_event/2, handle_info/2, terminate/2, code_change/3]). -record(state, {level :: {'mask', integer()}, out = user :: user | standard_error | pid(), id :: atom() | {atom(), any()}, formatter :: atom(), format_config :: any(), colors=[] :: list()}). -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). -compile([{parse_transform, lager_transform}]). -endif. -include("lager.hrl"). -define(TERSE_FORMAT,[time, " ", color, "[", severity,"] ", message]). -define(DEFAULT_FORMAT_CONFIG, ?TERSE_FORMAT ++ [eol()]). -define(FORMAT_CONFIG_OFF, [{eol, eol()}]). -ifdef(TEST). -define(DEPRECATED(_Msg), ok). -else. -define(DEPRECATED(Msg), io:format(user, "WARNING: This is a deprecated console configuration. Please use \"~w\" instead.~n", [Msg])). -endif. %% @private init([Level]) when is_atom(Level) -> ?DEPRECATED([{level, Level}]), init([{level, Level}]); init([Level, true]) when is_atom(Level) -> % for backwards compatibility ?DEPRECATED([{level, Level}, {formatter_config, [{eol, "\\r\\n\\"}]}]), init([{level, Level}, {formatter_config, ?FORMAT_CONFIG_OFF}]); init([Level, false]) when is_atom(Level) -> % for backwards compatibility ?DEPRECATED([{level, Level}]), init([{level, Level}]); init(Options) when is_list(Options) -> true = validate_options(Options), Colors = case application:get_env(lager, colored) of {ok, true} -> {ok, LagerColors} = application:get_env(lager, colors), LagerColors; _ -> [] end, Level = get_option(level, Options, undefined), try {is_new_style_console_available(), lager_util:config_to_mask(Level)} of {false, _} -> Msg = "Lager's console backend is incompatible with the 'old' shell, not enabling it", %% be as noisy as possible, log to every possible place try alarm_handler:set_alarm({?MODULE, "WARNING: " ++ Msg}) catch _:_ -> error_logger:warning_msg(Msg ++ "~n") end, io:format("WARNING: " ++ Msg ++ "~n"), ?INT_LOG(warning, Msg, []), {error, {fatal, old_shell}}; {true, L} -> [UseErr, GroupLeader, ID, Formatter, Config] = [ get_option(K, Options, Default) || {K, Default} <- [ {use_stderr, false}, {group_leader, false}, {id, ?MODULE}, {formatter, lager_default_formatter}, {formatter_config, ?DEFAULT_FORMAT_CONFIG} ] ], Out = case UseErr of false -> case GroupLeader of false -> user; GLPid when is_pid(GLPid) -> erlang:monitor(process, GLPid), GLPid end; true -> standard_error end, {ok, #state{level=L, id=ID, out=Out, formatter=Formatter, format_config=Config, colors=Colors}} catch _:_ -> {error, {fatal, bad_log_level}} end; init(Level) when is_atom(Level) -> ?DEPRECATED([{level, Level}]), init([{level, Level}]); init(Other) -> {error, {fatal, {bad_console_config, Other}}}. validate_options([]) -> true; validate_options([{level, L}|T]) when is_atom(L) -> case lists:member(L, ?LEVELS) of false -> throw({error, {fatal, {bad_level, L}}}); true -> validate_options(T) end; validate_options([{use_stderr, true}|T]) -> validate_options(T); validate_options([{use_stderr, false}|T]) -> validate_options(T); validate_options([{formatter, M}|T]) when is_atom(M) -> validate_options(T); validate_options([{formatter_config, C}|T]) when is_list(C) -> validate_options(T); validate_options([{group_leader, L}|T]) when is_pid(L) -> validate_options(T); validate_options([{id, {?MODULE, _}}|T]) -> validate_options(T); validate_options([H|_]) -> throw({error, {fatal, {bad_console_config, H}}}). get_option(K, Options, Default) -> case lists:keyfind(K, 1, Options) of {K, V} -> V; false -> Default end. %% @private handle_call(get_loglevel, #state{level=Level} = State) -> {ok, Level, State}; handle_call({set_loglevel, Level}, State) -> try lager_util:config_to_mask(Level) of Levels -> {ok, ok, State#state{level=Levels}} catch _:_ -> {ok, {error, bad_log_level}, State} end; handle_call(_Request, State) -> {ok, ok, State}. %% @private handle_event({log, Message}, #state{level=L,out=Out,formatter=Formatter,format_config=FormatConfig,colors=Colors,id=ID} = State) -> case lager_util:is_loggable(Message, L, ID) of true -> io:put_chars(Out, Formatter:format(Message,FormatConfig,Colors)), {ok, State}; false -> {ok, State} end; handle_event(_Event, State) -> {ok, State}. %% @private handle_info({'DOWN', _, process, Out, _}, #state{out=Out}) -> remove_handler; handle_info(_Info, State) -> {ok, State}. %% @private terminate(remove_handler, _State=#state{id=ID}) -> %% have to do this asynchronously because we're in the event handlr spawn(fun() -> lager:clear_trace_by_destination(ID) end), ok; terminate(_Reason, _State) -> ok. %% @private code_change(_OldVsn, State, _Extra) -> {ok, State}. eol() -> case application:get_env(lager, colored) of {ok, true} -> "\e[0m\r\n"; _ -> "\r\n" end. -ifdef(TEST). is_new_style_console_available() -> true. -else. is_new_style_console_available() -> %% Criteria: %% 1. If the user has specified '-noshell' on the command line, %% then we will pretend that the new-style console is available. %% If there is no shell at all, then we don't have to worry %% about log events being blocked by the old-style shell. %% 2. Windows doesn't support the new shell, so all windows users %% have is the oldshell. %% 3. If the user_drv process is registered, all is OK. %% 'user_drv' is a registered proc name used by the "new" %% console driver. init:get_argument(noshell) /= error orelse element(1, os:type()) /= win32 orelse is_pid(whereis(user_drv)). -endif. -ifdef(TEST). console_config_validation_test_() -> Good = [{level, info}, {use_stderr, true}], Bad1 = [{level, foo}, {use_stderr, flase}], Bad2 = [{level, info}, {use_stderr, flase}], AllGood = [{level, info}, {formatter, my_formatter}, {formatter_config, ["blort", "garbage"]}, {use_stderr, false}], [ ?_assertEqual(true, validate_options(Good)), ?_assertThrow({error, {fatal, {bad_level, foo}}}, validate_options(Bad1)), ?_assertThrow({error, {fatal, {bad_console_config, {use_stderr, flase}}}}, validate_options(Bad2)), ?_assertEqual(true, validate_options(AllGood)) ]. console_log_test_() -> %% tiny recursive fun that pretends to be a group leader F = fun(Self) -> fun() -> YComb = fun(Fun) -> receive {io_request, From, ReplyAs, {put_chars, unicode, _Msg}} = Y -> From ! {io_reply, ReplyAs, ok}, Self ! Y, Fun(Fun); Other -> ?debugFmt("unexpected message ~p~n", [Other]), Self ! Other end end, YComb(YComb) end end, {foreach, fun() -> error_logger:tty(false), application:load(lager), application:set_env(lager, handlers, []), application:set_env(lager, error_logger_redirect, false), lager:start(), whereis(user) end, fun(User) -> unregister(user), register(user, User), application:stop(lager), application:stop(goldrush), error_logger:tty(true) end, [ {"regular console logging", fun() -> Pid = spawn(F(self())), unregister(user), register(user, Pid), erlang:group_leader(Pid, whereis(lager_event)), gen_event:add_handler(lager_event, lager_console_backend, [{level, info}]), lager_config:set({lager_event, loglevel}, {element(2, lager_util:config_to_mask(info)), []}), lager:log(info, self(), "Test message"), receive {io_request, From, ReplyAs, {put_chars, unicode, Msg}} -> From ! {io_reply, ReplyAs, ok}, TestMsg = "Test message" ++ eol(), ?assertMatch([_, "[info]", TestMsg], re:split(Msg, " ", [{return, list}, {parts, 3}])) after 500 -> ?assert(false) end end }, {"verbose console logging", fun() -> Pid = spawn(F(self())), unregister(user), register(user, Pid), erlang:group_leader(Pid, whereis(lager_event)), gen_event:add_handler(lager_event, lager_console_backend, [info, true]), lager_config:set({lager_event, loglevel}, {element(2, lager_util:config_to_mask(info)), []}), lager:info("Test message"), PidStr = pid_to_list(self()), receive {io_request, _, _, {put_chars, unicode, Msg}} -> TestMsg = "Test message" ++ eol(), ?assertMatch([_, _, "[info]", PidStr, _, TestMsg], re:split(Msg, "[ @]", [{return, list}, {parts, 6}])) after 500 -> ?assert(false) end end }, {"custom format console logging", fun() -> Pid = spawn(F(self())), unregister(user), register(user, Pid), erlang:group_leader(Pid, whereis(lager_event)), gen_event:add_handler(lager_event, lager_console_backend, [{level, info}, {formatter, lager_default_formatter}, {formatter_config, [date,"#",time,"#",severity,"#",node,"#",pid,"#", module,"#",function,"#",file,"#",line,"#",message,"\r\n"]}]), lager_config:set({lager_event, loglevel}, {?INFO, []}), lager:info("Test message"), PidStr = pid_to_list(self()), NodeStr = atom_to_list(node()), ModuleStr = atom_to_list(?MODULE), receive {io_request, _, _, {put_chars, unicode, Msg}} -> TestMsg = "Test message" ++ eol(), ?assertMatch([_, _, "info", NodeStr, PidStr, ModuleStr, _, _, _, TestMsg], re:split(Msg, "#", [{return, list}, {parts, 10}])) after 500 -> ?assert(false) end end }, {"tracing should work", fun() -> Pid = spawn(F(self())), unregister(user), register(user, Pid), gen_event:add_handler(lager_event, lager_console_backend, [{level, info}]), erlang:group_leader(Pid, whereis(lager_event)), lager_config:set({lager_event, loglevel}, {element(2, lager_util:config_to_mask(info)), []}), lager:debug("Test message"), receive {io_request, From, ReplyAs, {put_chars, unicode, _Msg}} -> From ! {io_reply, ReplyAs, ok}, ?assert(false) after 500 -> ?assert(true) end, {ok, _} = lager:trace_console([{module, ?MODULE}]), lager:debug("Test message"), receive {io_request, From1, ReplyAs1, {put_chars, unicode, Msg1}} -> From1 ! {io_reply, ReplyAs1, ok}, TestMsg = "Test message" ++ eol(), ?assertMatch([_, "[debug]", TestMsg], re:split(Msg1, " ", [{return, list}, {parts, 3}])) after 500 -> ?assert(false) end end }, {"tracing doesn't duplicate messages", fun() -> Pid = spawn(F(self())), unregister(user), register(user, Pid), gen_event:add_handler(lager_event, lager_console_backend, [{level, info}]), lager_config:set({lager_event, loglevel}, {element(2, lager_util:config_to_mask(info)), []}), erlang:group_leader(Pid, whereis(lager_event)), lager:debug("Test message"), receive {io_request, From, ReplyAs, {put_chars, unicode, _Msg}} -> From ! {io_reply, ReplyAs, ok}, ?assert(false) after 500 -> ?assert(true) end, {ok, _} = lager:trace_console([{module, ?MODULE}]), lager:error("Test message"), receive {io_request, From1, ReplyAs1, {put_chars, unicode, Msg1}} -> From1 ! {io_reply, ReplyAs1, ok}, TestMsg = "Test message" ++ eol(), ?assertMatch([_, "[error]", TestMsg], re:split(Msg1, " ", [{return, list}, {parts, 3}])) after 1000 -> ?assert(false) end, %% make sure this event wasn't duplicated receive {io_request, From2, ReplyAs2, {put_chars, unicode, _Msg2}} -> From2 ! {io_reply, ReplyAs2, ok}, ?assert(false) after 500 -> ?assert(true) end end }, {"blacklisting a loglevel works", fun() -> Pid = spawn(F(self())), unregister(user), register(user, Pid), gen_event:add_handler(lager_event, lager_console_backend, [{level, info}]), lager_config:set({lager_event, loglevel}, {element(2, lager_util:config_to_mask(info)), []}), lager:set_loglevel(lager_console_backend, '!=info'), erlang:group_leader(Pid, whereis(lager_event)), lager:debug("Test message"), receive {io_request, From1, ReplyAs1, {put_chars, unicode, Msg1}} -> From1 ! {io_reply, ReplyAs1, ok}, TestMsg = "Test message" ++ eol(), ?assertMatch([_, "[debug]", TestMsg], re:split(Msg1, " ", [{return, list}, {parts, 3}])) after 1000 -> ?assert(false) end, %% info is blacklisted lager:info("Test message"), receive {io_request, From2, ReplyAs2, {put_chars, unicode, _Msg2}} -> From2 ! {io_reply, ReplyAs2, ok}, ?assert(false) after 500 -> ?assert(true) end end }, {"whitelisting a loglevel works", fun() -> Pid = spawn(F(self())), unregister(user), register(user, Pid), gen_event:add_handler(lager_event, lager_console_backend, [{level, info}]), lager_config:set({lager_event, loglevel}, {element(2, lager_util:config_to_mask(info)), []}), lager:set_loglevel(lager_console_backend, '=debug'), erlang:group_leader(Pid, whereis(lager_event)), lager:debug("Test message"), receive {io_request, From1, ReplyAs1, {put_chars, unicode, Msg1}} -> From1 ! {io_reply, ReplyAs1, ok}, TestMsg = "Test message" ++ eol(), ?assertMatch([_, "[debug]", TestMsg], re:split(Msg1, " ", [{return, list}, {parts, 3}])) after 1000 -> ?assert(false) end, %% info is blacklisted lager:error("Test message"), receive {io_request, From2, ReplyAs2, {put_chars, unicode, _Msg2}} -> From2 ! {io_reply, ReplyAs2, ok}, ?assert(false) after 500 -> ?assert(true) end end }, {"console backend with custom group leader", fun() -> Pid = spawn(F(self())), gen_event:add_handler(lager_event, lager_console_backend, [{level, info}, {group_leader, Pid}]), lager_config:set({lager_event, loglevel}, {element(2, lager_util:config_to_mask(info)), []}), lager:info("Test message"), ?assertNotEqual({group_leader, Pid}, erlang:process_info(whereis(lager_event), group_leader)), receive {io_request, From1, ReplyAs1, {put_chars, unicode, Msg1}} -> From1 ! {io_reply, ReplyAs1, ok}, TestMsg = "Test message" ++ eol(), ?assertMatch([_, "[info]", TestMsg], re:split(Msg1, " ", [{return, list}, {parts, 3}])) after 1000 -> ?assert(false) end, %% killing the pid should prevent any new log messages (to prove we haven't accidentally redirected %% the group leader some other way exit(Pid, kill), timer:sleep(100), %% additionally, check the lager backend has been removed (because the group leader process died) ?assertNot(lists:member(lager_console_backend, gen_event:which_handlers(lager_event))), lager:error("Test message"), receive {io_request, From2, ReplyAs2, {put_chars, unicode, _Msg2}} -> From2 ! {io_reply, ReplyAs2, ok}, ?assert(false) after 500 -> ?assert(true) end end }, {"console backend with custom group leader using a trace and an ID", fun() -> Pid = spawn(F(self())), ID = {?MODULE, trace_test}, Handlers = lager_config:global_get(handlers, []), HandlerInfo = lager_app:start_handler(lager_event, ID, [{level, none}, {group_leader, Pid}, {id, ID}]), lager_config:global_set(handlers, [HandlerInfo|Handlers]), lager:info("Test message"), ?assertNotEqual({group_leader, Pid}, erlang:process_info(whereis(lager_event), group_leader)), receive {io_request, From, ReplyAs, {put_chars, unicode, _Msg}} -> From ! {io_reply, ReplyAs, ok}, ?assert(false) after 500 -> ?assert(true) end, lager:trace(ID, [{module, ?MODULE}], debug), lager:info("Test message"), receive {io_request, From1, ReplyAs1, {put_chars, unicode, Msg1}} -> From1 ! {io_reply, ReplyAs1, ok}, TestMsg = "Test message" ++ eol(), ?assertMatch([_, "[info]", TestMsg], re:split(Msg1, " ", [{return, list}, {parts, 3}])) after 500 -> ?assert(false) end, ?assertNotEqual({0, []}, lager_config:get({lager_event, loglevel})), %% killing the pid should prevent any new log messages (to prove we haven't accidentally redirected %% the group leader some other way exit(Pid, kill), timer:sleep(100), %% additionally, check the lager backend has been removed (because the group leader process died) ?assertNot(lists:member(lager_console_backend, gen_event:which_handlers(lager_event))), %% finally, check the trace has been removed ?assertEqual({0, []}, lager_config:get({lager_event, loglevel})), lager:error("Test message"), receive {io_request, From3, ReplyAs3, {put_chars, unicode, _Msg3}} -> From3 ! {io_reply, ReplyAs3, ok}, ?assert(false) after 500 -> ?assert(true) end end } ] }. set_loglevel_test_() -> {foreach, fun() -> error_logger:tty(false), application:load(lager), application:set_env(lager, handlers, [{lager_console_backend, [{level, info}]}]), application:set_env(lager, error_logger_redirect, false), lager:start() end, fun(_) -> application:stop(lager), application:stop(goldrush), error_logger:tty(true) end, [ {"Get/set loglevel test", fun() -> ?assertEqual(info, lager:get_loglevel(lager_console_backend)), lager:set_loglevel(lager_console_backend, debug), ?assertEqual(debug, lager:get_loglevel(lager_console_backend)), lager:set_loglevel(lager_console_backend, '!=debug'), ?assertEqual(info, lager:get_loglevel(lager_console_backend)), lager:set_loglevel(lager_console_backend, '!=info'), ?assertEqual(debug, lager:get_loglevel(lager_console_backend)), ok end }, {"Get/set invalid loglevel test", fun() -> ?assertEqual(info, lager:get_loglevel(lager_console_backend)), ?assertEqual({error, bad_log_level}, lager:set_loglevel(lager_console_backend, fatfinger)), ?assertEqual(info, lager:get_loglevel(lager_console_backend)) end } ] }. -endif. lager-3.8.0/src/lager_crash_log.erl0000644000232200023220000004125013523436621017577 0ustar debalancedebalance%% ------------------------------------------------------------------- %% %% Copyright (c) 2011-2017 Basho Technologies, Inc. %% %% This file is provided to you under the Apache License, %% Version 2.0 (the "License"); you may not use this file %% except in compliance with the License. You may obtain %% a copy of the License at %% %% http://www.apache.org/licenses/LICENSE-2.0 %% %% Unless required by applicable law or agreed to in writing, %% software distributed under the License is distributed on an %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY %% KIND, either express or implied. See the License for the %% specific language governing permissions and limitations %% under the License. %% %% ------------------------------------------------------------------- %% @doc Lager crash log writer. This module implements a gen_server which writes %% error_logger error messages out to a file in their original format. The %% location to which it logs is configured by the application var `crash_log'. %% Omitting this variable disables crash logging. Crash logs are printed safely %% using trunc_io via code mostly lifted from riak_err. %% %% The `crash_log_msg_size' application var is used to specify the maximum %% size of any message to be logged. `crash_log_size' is used to specify the %% maximum size of the crash log before it will be rotated (0 will disable). %% Time based rotation is configurable via `crash_log_date', the syntax is %% documented in the README. To control the number of rotated files to be %% retained, use `crash_log_count'. -module(lager_crash_log). -include("lager.hrl"). -behaviour(gen_server). -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). -include_lib("kernel/include/file.hrl"). -endif. %% callbacks -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). -export([start_link/6, start/6]). -record(state, { name :: string(), fd :: pid() | undefined, inode :: integer() | undefined, ctime :: file:date_time() | undefined, fmtmaxbytes :: integer(), size :: integer(), date :: undefined | string(), count :: integer(), flap=false :: boolean(), rotator :: atom() }). %% @private start_link(Filename, MaxBytes, Size, Date, Count, Rotator) -> gen_server:start_link({local, ?MODULE}, ?MODULE, [Filename, MaxBytes, Size, Date, Count, Rotator], []). %% @private start(Filename, MaxBytes, Size, Date, Count, Rotator) -> gen_server:start({local, ?MODULE}, ?MODULE, [Filename, MaxBytes, Size, Date, Count, Rotator], []). %% @private init([RelFilename, MaxBytes, Size, Date, Count, Rotator]) -> Filename = lager_util:expand_path(RelFilename), case Rotator:open_logfile(Filename, false) of {ok, {FD, Inode, Ctime, _Size}} -> schedule_rotation(Date), {ok, #state{name=Filename, fd=FD, inode=Inode, ctime=Ctime, fmtmaxbytes=MaxBytes, size=Size, count=Count, date=Date, rotator=Rotator}}; {error, Reason} -> ?INT_LOG(error, "Failed to open crash log file ~ts with error: ~s", [Filename, file:format_error(Reason)]), {ok, #state{name=Filename, fmtmaxbytes=MaxBytes, flap=true, size=Size, count=Count, date=Date, rotator=Rotator}} end. %% @private handle_call({log, _} = Log, _From, State) -> {Reply, NewState} = do_log(Log, State), {reply, Reply, NewState}; handle_call(_Call, _From, State) -> {reply, ok, State}. %% @private handle_cast({log, _} = Log, State) -> {_, NewState} = do_log(Log, State), {noreply, NewState}; handle_cast(_Request, State) -> {noreply, State}. %% @private handle_info(rotate, #state{name=Name, count=Count, date=Date, rotator=Rotator} = State) -> _ = Rotator:rotate_logfile(Name, Count), schedule_rotation(Date), {noreply, State}; handle_info(_Info, State) -> {noreply, State}. %% @private terminate(_Reason, _State) -> ok. %% @private code_change(_OldVsn, State, _Extra) -> {ok, State}. schedule_rotation(undefined) -> ok; schedule_rotation(Date) -> erlang:send_after(lager_util:calculate_next_rotation(Date) * 1000, self(), rotate), ok. %% ===== Begin code lifted from riak_err ===== -spec limited_fmt(string(), list(), integer()) -> iolist(). %% @doc Format Fmt and Args similar to what io_lib:format/2 does but with %% limits on how large the formatted string may be. %% %% If the Args list's size is larger than TermMaxSize, then the %% formatting is done by trunc_io:print/2, where FmtMaxBytes is used %% to limit the formatted string's size. limited_fmt(Fmt, Args, FmtMaxBytes) -> lager:safe_format(Fmt, Args, FmtMaxBytes). limited_str(Term, FmtMaxBytes) -> {Str, _} = lager_trunc_io:print(Term, FmtMaxBytes), Str. other_node_suffix(Pid) when node(Pid) =/= node() -> "** at node " ++ atom_to_list(node(Pid)) ++ " **\n"; other_node_suffix(_) -> "". perhaps_a_sasl_report(error_report, {Pid, Type, Report}, FmtMaxBytes) -> case lager_stdlib:is_my_error_report(Type) of true -> {sasl_type_to_report_head(Type), Pid, sasl_limited_str(Type, Report, FmtMaxBytes), true}; false -> {ignore, ignore, ignore, false} end; %perhaps_a_sasl_report(info_report, {Pid, Type, Report}, FmtMaxBytes) -> %case lager_stdlib:is_my_info_report(Type) of %true -> %{sasl_type_to_report_head(Type), Pid, %sasl_limited_str(Type, Report, FmtMaxBytes), false}; %false -> %{ignore, ignore, ignore, false} %end; perhaps_a_sasl_report(_, _, _) -> {ignore, ignore, ignore, false}. sasl_type_to_report_head(supervisor_report) -> "SUPERVISOR REPORT"; sasl_type_to_report_head(crash_report) -> "CRASH REPORT"; sasl_type_to_report_head(progress) -> "PROGRESS REPORT". sasl_limited_str(supervisor_report, Report, FmtMaxBytes) -> Name = lager_stdlib:sup_get(supervisor, Report), Context = lager_stdlib:sup_get(errorContext, Report), Reason = lager_stdlib:sup_get(reason, Report), Offender = lager_stdlib:sup_get(offender, Report), FmtString = " Supervisor: ~p~n Context: ~p~n Reason: " "~s~n Offender: ~s~n~n", {ReasonStr, _} = lager_trunc_io:print(Reason, FmtMaxBytes), {OffenderStr, _} = lager_trunc_io:print(Offender, FmtMaxBytes), io_lib:format(FmtString, [Name, Context, ReasonStr, OffenderStr]); sasl_limited_str(progress, Report, FmtMaxBytes) -> [begin {Str, _} = lager_trunc_io:print(Data, FmtMaxBytes), io_lib:format(" ~16w: ~s~n", [Tag, Str]) end || {Tag, Data} <- Report]; sasl_limited_str(crash_report, Report, FmtMaxBytes) -> lager_stdlib:proc_lib_format(Report, FmtMaxBytes). do_log({log, Event}, #state{name=Name, fd=FD, inode=Inode, ctime=Ctime, flap=Flap, fmtmaxbytes=FmtMaxBytes, size=RotSize, count=Count, rotator=Rotator} = State) -> %% borrowed from riak_err {ReportStr, Pid, MsgStr, _ErrorP} = case Event of {error, _GL, {Pid1, Fmt, Args}} -> {"ERROR REPORT", Pid1, limited_fmt(Fmt, Args, FmtMaxBytes), true}; {error_report, _GL, {Pid1, std_error, Rep}} -> {"ERROR REPORT", Pid1, limited_str(Rep, FmtMaxBytes) ++ "\n", true}; {error_report, _GL, Other} -> perhaps_a_sasl_report(error_report, Other, FmtMaxBytes); _ -> {ignore, ignore, ignore, false} end, if ReportStr == ignore -> {ok, State}; true -> case Rotator:ensure_logfile(Name, FD, Inode, Ctime, false) of {ok, {_FD, _Inode, _Ctime, Size}} when RotSize /= 0, Size > RotSize -> _ = Rotator:rotate_logfile(Name, Count), handle_cast({log, Event}, State); {ok, {NewFD, NewInode, NewCtime, _Size}} -> {Date, TS} = lager_util:format_time( lager_stdlib:maybe_utc(erlang:localtime())), Time = [Date, " ", TS," =", ReportStr, "====\n"], NodeSuffix = other_node_suffix(Pid), Msg = io_lib:format("~s~s~s", [Time, MsgStr, NodeSuffix]), case file:write(NewFD, unicode:characters_to_binary(Msg)) of {error, Reason} when Flap == false -> ?INT_LOG(error, "Failed to write log message to file ~ts: ~s", [Name, file:format_error(Reason)]), {ok, State#state{fd=NewFD, inode=NewInode, ctime=NewCtime, flap=true}}; ok -> {ok, State#state{fd=NewFD, inode=NewInode, ctime=NewCtime, flap=false}}; _ -> {ok, State#state{fd=NewFD, inode=NewInode, ctime=NewCtime}} end; {error, Reason} -> case Flap of true -> {ok, State}; _ -> ?INT_LOG(error, "Failed to reopen crash log ~ts with error: ~s", [Name, file:format_error(Reason)]), {ok, State#state{flap=true}} end end end. -ifdef(TEST). filesystem_test_() -> {foreach, fun() -> {ok, TestDir} = lager_util:create_test_dir(), CrashLog = filename:join(TestDir, "crash_test.log"), ok = lager_util:safe_write_file(CrashLog, []), ok = error_logger:tty(false), ok = lager_util:safe_application_load(lager), ok = application:set_env(lager, handlers, [{lager_test_backend, info}]), ok = application:set_env(lager, error_logger_redirect, true), ok = application:unset_env(lager, crash_log), ok = lager:start(), ok = timer:sleep(1000), ok = lager_test_backend:flush(), CrashLog end, fun(_CrashLog) -> case whereis(lager_crash_log) of P when is_pid(P) -> gen_server:stop(P); _ -> ok end, ok = application:stop(lager), ok = application:stop(goldrush), ok = lager_util:delete_test_dir(), ok = error_logger:tty(true) end, [ fun(CrashLog) -> {"under normal circumstances, file should be opened", fun() -> {ok, _} = ?MODULE:start_link(CrashLog, 65535, 0, undefined, 0, lager_rotator_default), _ = gen_event:which_handlers(error_logger), sync_error_logger:error_msg("Test message\n"), {ok, Bin} = file:read_file(CrashLog), ?assertMatch([_, "Test message\n"], re:split(Bin, "\n", [{return, list}, {parts, 2}])) end} end, fun(CrashLog) -> {"file can't be opened on startup triggers an error message", fun() -> {ok, FInfo0} = file:read_file_info(CrashLog, [raw]), FInfo1 = FInfo0#file_info{mode = 0}, ?assertEqual(ok, file:write_file_info(CrashLog, FInfo1)), {ok, _} = ?MODULE:start_link(CrashLog, 65535, 0, undefined, 0, lager_rotator_default), % Note: required on win32, do this early to prevent subsequent failures % from preventing cleanup ?assertEqual(ok, file:write_file_info(CrashLog, FInfo0)), ?assertEqual(1, lager_test_backend:count()), {_Level, _Time, Message,_Metadata} = lager_test_backend:pop(), ?assertEqual( "Failed to open crash log file " ++ CrashLog ++ " with error: permission denied", lists:flatten(Message)) end} end, fun(CrashLog) -> {"file that becomes unavailable at runtime should trigger an error message", fun() -> case os:type() of {win32, _} -> % Note: test is skipped on win32 due to the fact that a file can't be deleted or renamed % while a process has an open file handle referencing it ok; _ -> {ok, _} = ?MODULE:start_link(CrashLog, 65535, 0, undefined, 0, lager_rotator_default), ?assertEqual(0, lager_test_backend:count()), sync_error_logger:error_msg("Test message\n"), _ = gen_event:which_handlers(error_logger), ?assertEqual(1, lager_test_backend:count()), ?assertEqual(ok, file:delete(CrashLog)), ?assertEqual(ok, lager_util:safe_write_file(CrashLog, "")), {ok, FInfo0} = file:read_file_info(CrashLog, [raw]), FInfo1 = FInfo0#file_info{mode = 0}, ?assertEqual(ok, file:write_file_info(CrashLog, FInfo1)), sync_error_logger:error_msg("Test message\n"), _ = gen_event:which_handlers(error_logger), % Note: required on win32, do this early to prevent subsequent failures % from preventing cleanup ?assertEqual(ok, file:write_file_info(CrashLog, FInfo0)), ?assertEqual(3, lager_test_backend:count()), lager_test_backend:pop(), {_Level, _Time, Message,_Metadata} = lager_test_backend:pop(), ?assertEqual( "Failed to reopen crash log " ++ CrashLog ++ " with error: permission denied", lists:flatten(Message)) end end} end, fun(CrashLog) -> {"unavailable files that are fixed at runtime should start having log messages written", fun() -> {ok, FInfo} = file:read_file_info(CrashLog, [raw]), OldPerms = FInfo#file_info.mode, file:write_file_info(CrashLog, FInfo#file_info{mode = 0}), {ok, _} = ?MODULE:start_link(CrashLog, 65535, 0, undefined, 0, lager_rotator_default), ?assertEqual(1, lager_test_backend:count()), {_Level, _Time, Message,_Metadata} = lager_test_backend:pop(), ?assertEqual( "Failed to open crash log file " ++ CrashLog ++ " with error: permission denied", lists:flatten(Message)), file:write_file_info(CrashLog, FInfo#file_info{mode = OldPerms}), sync_error_logger:error_msg("Test message~n"), _ = gen_event:which_handlers(error_logger), {ok, Bin} = file:read_file(CrashLog), ?assertMatch([_, "Test message\n"], re:split(Bin, "\n", [{return, list}, {parts, 2}])) end} end, fun(CrashLog) -> {"external logfile rotation/deletion should be handled", fun() -> case os:type() of {win32, _} -> % Note: test is skipped on win32 due to the fact that a file can't be deleted or renamed % while a process has an open file handle referencing it ok; _ -> {ok, _} = ?MODULE:start_link(CrashLog, 65535, 0, undefined, 0, lager_rotator_default), ?assertEqual(0, lager_test_backend:count()), sync_error_logger:error_msg("Test message~n"), _ = gen_event:which_handlers(error_logger), {ok, Bin} = file:read_file(CrashLog), ?assertMatch([_, "Test message\n"], re:split(Bin, "\n", [{return, list}, {parts, 2}])), ?assertEqual(ok, file:delete(CrashLog)), ?assertEqual(ok, lager_util:safe_write_file(CrashLog, "")), sync_error_logger:error_msg("Test message1~n"), _ = gen_event:which_handlers(error_logger), {ok, Bin1} = file:read_file(CrashLog), ?assertMatch([_, "Test message1\n"], re:split(Bin1, "\n", [{return, list}, {parts, 2}])), file:rename(CrashLog, CrashLog ++ ".0"), sync_error_logger:error_msg("Test message2~n"), _ = gen_event:which_handlers(error_logger), {ok, Bin2} = file:read_file(CrashLog), ?assertMatch([_, "Test message2\n"], re:split(Bin2, "\n", [{return, list}, {parts, 2}])) end end} end ]}. -endif. lager-3.8.0/src/lager_trunc_io.erl0000644000232200023220000012116513523436621017464 0ustar debalancedebalance%% ``The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in %% compliance with the License. You should have received a copy of the %% Erlang Public License along with your Erlang distribution. If not, it can be %% retrieved via the world wide web at http://www.erlang.org/. %% %% Software distributed under the License is distributed on an "AS IS" %% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See %% the License for the specific language governing rights and limitations %% under the License. %% %% The Initial Developer of the Original Code is Corelatus AB. %% Portions created by Corelatus are Copyright 2003, Corelatus %% AB. All Rights Reserved.'' %% %% @doc Module to print out terms for logging. Limits by length rather than depth. %% %% The resulting string may be slightly larger than the limit; the intention %% is to provide predictable CPU and memory consumption for formatting %% terms, not produce precise string lengths. %% %% Typical use: %% %% trunc_io:print(Term, 500). %% %% Source license: Erlang Public License. %% Original author: Matthias Lang, matthias@corelatus.se %% %% Various changes to this module, most notably the format/3 implementation %% were added by Andrew Thompson `'. The module has been renamed %% to avoid conflicts with the vanilla module. -module(lager_trunc_io). -author('matthias@corelatus.se'). %% And thanks to Chris Newcombe for a bug fix -export([format/3, format/4, print/2, print/3, fprint/2, fprint/3, safe/2]). % interface functions -version("$Id: trunc_io.erl,v 1.11 2009-02-23 12:01:06 matthias Exp $"). -ifdef(TEST). -export([perf/0, perf/3, perf1/0, test/0, test/2]). % testing functions -include_lib("eunit/include/eunit.hrl"). -endif. -type option() :: {'depth', integer()} | {'lists_as_strings', boolean()} | {'force_strings', boolean()}. -type options() :: [option()]. -record(print_options, { %% negative depth means no depth limiting depth = -1 :: integer(), %% whether to print lists as strings, if possible lists_as_strings = true :: boolean(), %% force strings, or binaries to be printed as a string, %% even if they're not printable force_strings = false :: boolean() }). format(Fmt, Args, Max) -> format(Fmt, Args, Max, []). format(Fmt, Args, Max, Options) -> try lager_format:format(Fmt, Args, Max, Options) catch _What:_Why -> erlang:error(badarg, [Fmt, Args]) end. %% @doc Returns an flattened list containing the ASCII representation of the given %% term. -spec fprint(term(), pos_integer()) -> string(). fprint(Term, Max) -> fprint(Term, Max, []). %% @doc Returns an flattened list containing the ASCII representation of the given %% term. -spec fprint(term(), pos_integer(), options()) -> string(). fprint(T, Max, Options) -> {L, _} = print(T, Max, prepare_options(Options, #print_options{})), lists:flatten(L). %% @doc Same as print, but never crashes. %% %% This is a tradeoff. Print might conceivably crash if it's asked to %% print something it doesn't understand, for example some new data %% type in a future version of Erlang. If print crashes, we fall back %% to io_lib to format the term, but then the formatting is %% depth-limited instead of length limited, so you might run out %% memory printing it. Out of the frying pan and into the fire. %% -spec safe(term(), pos_integer()) -> {string(), pos_integer()} | {string()}. safe(What, Len) -> case catch print(What, Len) of {L, Used} when is_list(L) -> {L, Used}; _ -> {"unable to print" ++ io_lib:write(What, 99)} end. %% @doc Returns {List, Length} -spec print(term(), pos_integer()) -> {iolist(), pos_integer()}. print(Term, Max) -> print(Term, Max, []). %% @doc Returns {List, Length} -spec print(term(), pos_integer(), options() | #print_options{}) -> {iolist(), pos_integer()}. print(Term, Max, Options) when is_list(Options) -> %% need to convert the proplist to a record print(Term, Max, prepare_options(Options, #print_options{})); print(Term, _Max, #print_options{force_strings=true}) when not is_list(Term), not is_binary(Term), not is_atom(Term) -> erlang:error(badarg); print(_, Max, _Options) when Max < 0 -> {"...", 3}; print(_, _, #print_options{depth=0}) -> {"...", 3}; %% @doc We assume atoms, floats, funs, integers, PIDs, ports and refs never need %% to be truncated. This isn't strictly true, someone could make an %% arbitrarily long bignum. Let's assume that won't happen unless someone %% is being malicious. %% print(Atom, _Max, #print_options{force_strings=NoQuote}) when is_atom(Atom) -> L = atom_to_list(Atom), R = case atom_needs_quoting_start(L) andalso not NoQuote of true -> lists:flatten([$', L, $']); false -> L end, {R, length(R)}; print(<<>>, _Max, #print_options{depth=1}) -> {"<<>>", 4}; print(Bin, _Max, #print_options{depth=1}) when is_binary(Bin) -> {"<<...>>", 7}; print(<<>>, _Max, Options) -> case Options#print_options.force_strings of true -> {"", 0}; false -> {"<<>>", 4} end; print(Binary, 0, _Options) when is_bitstring(Binary) -> {"<<..>>", 6}; print(Bin, Max, _Options) when is_binary(Bin), Max < 2 -> {"<<...>>", 7}; print(Binary, Max, Options) when is_binary(Binary) -> B = binary_to_list(Binary, 1, lists:min([Max, byte_size(Binary)])), {Res, Length} = case Options#print_options.lists_as_strings orelse Options#print_options.force_strings of true -> Depth = Options#print_options.depth, MaxSize = (Depth - 1) * 4, %% check if we need to truncate based on depth In = case Depth > -1 andalso MaxSize < length(B) andalso not Options#print_options.force_strings of true -> string:substr(B, 1, MaxSize); false -> B end, MaxLen = case Options#print_options.force_strings of true -> Max; false -> %% make room for the leading doublequote Max - 1 end, try alist(In, MaxLen, Options) of {L0, Len0} -> case Options#print_options.force_strings of false -> case B /= In of true -> {[$", L0, "..."], Len0+4}; false -> {[$"|L0], Len0+1} end; true -> {L0, Len0} end catch throw:{unprintable, C} -> Index = string:chr(In, C), case Index > 1 andalso Options#print_options.depth =< Index andalso Options#print_options.depth > -1 andalso not Options#print_options.force_strings of true -> %% print first Index-1 characters followed by ... {L0, Len0} = alist_start(string:substr(In, 1, Index - 1), Max - 1, Options), {L0++"...", Len0+3}; false -> list_body(In, Max-4, dec_depth(Options), true) end end; _ -> list_body(B, Max-4, dec_depth(Options), true) end, case Options#print_options.force_strings of true -> {Res, Length}; _ -> {["<<", Res, ">>"], Length+4} end; %% bitstrings are binary's evil brother who doesn't end on an 8 bit boundary. %% This makes printing them extremely annoying, so list_body/list_bodyc has %% some magic for dealing with the output of bitstring_to_list, which returns %% a list of integers (as expected) but with a trailing binary that represents %% the remaining bits. print({inline_bitstring, B}, _Max, _Options) when is_bitstring(B) -> Size = bit_size(B), <> = B, ValueStr = integer_to_list(Value), SizeStr = integer_to_list(Size), {[ValueStr, $:, SizeStr], length(ValueStr) + length(SizeStr) +1}; print(BitString, Max, Options) when is_bitstring(BitString) -> BL = case byte_size(BitString) > Max of true -> binary_to_list(BitString, 1, Max); _ -> R = erlang:bitstring_to_list(BitString), {Bytes, [Bits]} = lists:splitwith(fun erlang:is_integer/1, R), %% tag the trailing bits with a special tuple we catch when %% list_body calls print again Bytes ++ [{inline_bitstring, Bits}] end, {X, Len0} = list_body(BL, Max - 4, dec_depth(Options), true), {["<<", X, ">>"], Len0 + 4}; print(Float, _Max, _Options) when is_float(Float) -> %% use the same function io_lib:format uses to print floats %% float_to_list is way too verbose. L = io_lib_format:fwrite_g(Float), {L, length(L)}; print(Fun, Max, _Options) when is_function(Fun) -> L = erlang:fun_to_list(Fun), case length(L) > Max of true -> S = erlang:max(5, Max), Res = string:substr(L, 1, S) ++ "..>", {Res, length(Res)}; _ -> {L, length(L)} end; print(Integer, _Max, _Options) when is_integer(Integer) -> L = integer_to_list(Integer), {L, length(L)}; print(Pid, _Max, _Options) when is_pid(Pid) -> L = pid_to_list(Pid), {L, length(L)}; print(Ref, _Max, _Options) when is_reference(Ref) -> L = erlang:ref_to_list(Ref), {L, length(L)}; print(Port, _Max, _Options) when is_port(Port) -> L = erlang:port_to_list(Port), {L, length(L)}; print({'$lager_record', Name, Fields}, Max, Options) -> Leader = "#" ++ atom_to_list(Name) ++ "{", {RC, Len} = record_fields(Fields, Max - length(Leader) + 1, dec_depth(Options)), {[Leader, RC, "}"], Len + length(Leader) + 1}; print(Tuple, Max, Options) when is_tuple(Tuple) -> {TC, Len} = tuple_contents(Tuple, Max-2, Options), {[${, TC, $}], Len + 2}; print(List, Max, Options) when is_list(List) -> case Options#print_options.lists_as_strings orelse Options#print_options.force_strings of true -> alist_start(List, Max, dec_depth(Options)); _ -> {R, Len} = list_body(List, Max - 2, dec_depth(Options), false), {[$[, R, $]], Len + 2} end; print(Map, Max, Options) when is_map(Map) -> {MapBody, Len} = map_body(Map, Max - 3, dec_depth(Options)), {[$#, ${, MapBody, $}], Len + 3}; print(Term, Max, Options) -> error(badarg, [Term, Max, Options]). %% Returns {List, Length} tuple_contents(Tuple, Max, Options) -> L = tuple_to_list(Tuple), list_body(L, Max, dec_depth(Options), true). %% Format the inside of a list, i.e. do not add a leading [ or trailing ]. %% Returns {List, Length} list_body([], _Max, _Options, _Tuple) -> {[], 0}; list_body(_, Max, _Options, _Tuple) when Max < 4 -> {"...", 3}; list_body(_, _Max, #print_options{depth=0}, _Tuple) -> {"...", 3}; list_body([H], Max, Options=#print_options{depth=1}, _Tuple) -> print(H, Max, Options); list_body([H|_], Max, Options=#print_options{depth=1}, Tuple) -> {List, Len} = print(H, Max-4, Options), Sep = case Tuple of true -> $,; false -> $| end, {[List ++ [Sep | "..."]], Len + 4}; list_body([H|T], Max, Options, Tuple) -> {List, Len} = print(H, Max, Options), {Final, FLen} = list_bodyc(T, Max - Len, Options, Tuple), {[List|Final], FLen + Len}; list_body(X, Max, Options, _Tuple) -> %% improper list {List, Len} = print(X, Max - 1, Options), {[$|,List], Len + 1}. list_bodyc([], _Max, _Options, _Tuple) -> {[], 0}; list_bodyc(_, Max, _Options, _Tuple) when Max < 5 -> {",...", 4}; list_bodyc(_, _Max, #print_options{depth=1}, true) -> {",...", 4}; list_bodyc(_, _Max, #print_options{depth=1}, false) -> {"|...", 4}; list_bodyc([H|T], Max, #print_options{depth=Depth} = Options, Tuple) -> {List, Len} = print(H, Max, dec_depth(Options)), {Final, FLen} = list_bodyc(T, Max - Len - 1, dec_depth(Options), Tuple), Sep = case Depth == 1 andalso not Tuple of true -> $|; _ -> $, end, {[Sep, List|Final], FLen + Len + 1}; list_bodyc(X, Max, Options, _Tuple) -> %% improper list {List, Len} = print(X, Max - 1, Options), {[$|,List], Len + 1}. map_body(Map, Max, #print_options{depth=Depth}) when Max < 4; Depth =:= 0 -> case erlang:map_size(Map) of 0 -> {[], 0}; _ -> {"...", 3} end; map_body(Map, Max, Options) -> case maps:to_list(Map) of [] -> {[], 0}; [{Key, Value} | Rest] -> {KeyStr, KeyLen} = print(Key, Max - 4, Options), DiffLen = KeyLen + 4, {ValueStr, ValueLen} = print(Value, Max - DiffLen, Options), DiffLen2 = DiffLen + ValueLen, {Final, FLen} = map_bodyc(Rest, Max - DiffLen2, dec_depth(Options)), {[KeyStr, " => ", ValueStr | Final], DiffLen2 + FLen} end. map_bodyc([], _Max, _Options) -> {[], 0}; map_bodyc(_Rest, Max,#print_options{depth=Depth}) when Max < 5; Depth =:= 0 -> {",...", 4}; map_bodyc([{Key, Value} | Rest], Max, Options) -> {KeyStr, KeyLen} = print(Key, Max - 5, Options), DiffLen = KeyLen + 5, {ValueStr, ValueLen} = print(Value, Max - DiffLen, Options), DiffLen2 = DiffLen + ValueLen, {Final, FLen} = map_bodyc(Rest, Max - DiffLen2, dec_depth(Options)), {[$,, KeyStr, " => ", ValueStr | Final], DiffLen2 + FLen}. %% The head of a list we hope is ascii. Examples: %% %% [65,66,67] -> "ABC" %% [65,0,67] -> "A"[0,67] %% [0,65,66] -> [0,65,66] %% [65,b,66] -> "A"[b,66] %% alist_start([], _Max, #print_options{force_strings=true}) -> {"", 0}; alist_start([], _Max, _Options) -> {"[]", 2}; alist_start(_, Max, _Options) when Max < 4 -> {"...", 3}; alist_start(_, _Max, #print_options{depth=0}) -> {"[...]", 5}; alist_start(L, Max, #print_options{force_strings=true} = Options) -> alist(L, Max, Options); %alist_start([H|_T], _Max, #print_options{depth=1}) when is_integer(H) -> {[$[, H, $|, $., $., $., $]], 7}; alist_start([H|T], Max, Options) when is_integer(H), H >= 16#20, H =< 16#7e -> % definitely printable try alist([H|T], Max -1, Options) of {L, Len} -> {[$"|L], Len + 1} catch throw:{unprintable, _} -> {R, Len} = list_body([H|T], Max-2, Options, false), {[$[, R, $]], Len + 2} end; alist_start([H|T], Max, Options) when is_integer(H), H >= 16#a0, H =< 16#ff -> % definitely printable try alist([H|T], Max -1, Options) of {L, Len} -> {[$"|L], Len + 1} catch throw:{unprintable, _} -> {R, Len} = list_body([H|T], Max-2, Options, false), {[$[, R, $]], Len + 2} end; alist_start([H|T], Max, Options) when H =:= $\t; H =:= $\n; H =:= $\r; H =:= $\v; H =:= $\e; H=:= $\f; H=:= $\b -> try alist([H|T], Max -1, Options) of {L, Len} -> {[$"|L], Len + 1} catch throw:{unprintable, _} -> {R, Len} = list_body([H|T], Max-2, Options, false), {[$[, R, $]], Len + 2} end; alist_start(L, Max, Options) -> {R, Len} = list_body(L, Max-2, Options, false), {[$[, R, $]], Len + 2}. alist([], _Max, #print_options{force_strings=true}) -> {"", 0}; alist([], _Max, _Options) -> {"\"", 1}; alist(_, Max, #print_options{force_strings=true}) when Max < 4 -> {"...", 3}; alist(_, Max, #print_options{force_strings=false}) when Max < 5 -> {"...\"", 4}; alist([H|T], Max, Options = #print_options{force_strings=false,lists_as_strings=true}) when H =:= $"; H =:= $\\ -> %% preserve escaping around quotes {L, Len} = alist(T, Max-1, Options), {[$\\,H|L], Len + 2}; alist([H|T], Max, Options) when is_integer(H), H >= 16#20, H =< 16#7e -> % definitely printable {L, Len} = alist(T, Max-1, Options), {[H|L], Len + 1}; alist([H|T], Max, Options) when is_integer(H), H >= 16#a0, H =< 16#ff -> % definitely printable {L, Len} = alist(T, Max-1, Options), {[H|L], Len + 1}; alist([H|T], Max, Options) when H =:= $\t; H =:= $\n; H =:= $\r; H =:= $\v; H =:= $\e; H=:= $\f; H=:= $\b -> {L, Len} = alist(T, Max-1, Options), case Options#print_options.force_strings of true -> {[H|L], Len + 1}; _ -> {[escape(H)|L], Len + 1} end; alist([H|T], Max, #print_options{force_strings=true} = Options) when is_integer(H) -> {L, Len} = alist(T, Max-1, Options), {[H|L], Len + 1}; alist([H|T], Max, Options = #print_options{force_strings=true}) when is_binary(H); is_list(H) -> {List, Len} = print(H, Max, Options), case (Max - Len) =< 0 of true -> %% no more room to print anything {List, Len}; false -> %% no need to decrement depth, as we're in printable string mode {Final, FLen} = alist(T, Max - Len, Options), {[List|Final], FLen+Len} end; alist(_, _, #print_options{force_strings=true}) -> erlang:error(badarg); alist([H|_L], _Max, _Options) -> throw({unprintable, H}); alist(H, _Max, _Options) -> %% improper list throw({unprintable, H}). %% is the first character in the atom alphabetic & lowercase? atom_needs_quoting_start([H|T]) when H >= $a, H =< $z -> atom_needs_quoting(T); atom_needs_quoting_start(_) -> true. atom_needs_quoting([]) -> false; atom_needs_quoting([H|T]) when (H >= $a andalso H =< $z); (H >= $A andalso H =< $Z); (H >= $0 andalso H =< $9); H == $@; H == $_ -> atom_needs_quoting(T); atom_needs_quoting(_) -> true. -spec prepare_options(options(), #print_options{}) -> #print_options{}. prepare_options([], Options) -> Options; prepare_options([{depth, Depth}|T], Options) when is_integer(Depth) -> prepare_options(T, Options#print_options{depth=Depth}); prepare_options([{lists_as_strings, Bool}|T], Options) when is_boolean(Bool) -> prepare_options(T, Options#print_options{lists_as_strings = Bool}); prepare_options([{force_strings, Bool}|T], Options) when is_boolean(Bool) -> prepare_options(T, Options#print_options{force_strings = Bool}). dec_depth(#print_options{depth=Depth} = Options) when Depth > 0 -> Options#print_options{depth=Depth-1}; dec_depth(Options) -> Options. escape($\t) -> "\\t"; escape($\n) -> "\\n"; escape($\r) -> "\\r"; escape($\e) -> "\\e"; escape($\f) -> "\\f"; escape($\b) -> "\\b"; escape($\v) -> "\\v". record_fields([], _, _) -> {"", 0}; record_fields(_, Max, #print_options{depth=D}) when Max < 4; D == 0 -> {"...", 3}; record_fields([{Field, Value}|T], Max, Options) -> {ExtraChars, Terminator} = case T of [] -> {1, []}; _ -> {2, ","} end, {FieldStr, FieldLen} = print(Field, Max - ExtraChars, Options), {ValueStr, ValueLen} = print(Value, Max - (FieldLen + ExtraChars), Options), {Final, FLen} = record_fields(T, Max - (FieldLen + ValueLen + ExtraChars), dec_depth(Options)), {[FieldStr++"="++ValueStr++Terminator|Final], FLen + FieldLen + ValueLen + ExtraChars}. -ifdef(TEST). %%-------------------- %% The start of a test suite. So far, it only checks for not crashing. -spec test() -> ok. test() -> test(trunc_io, print). -spec test(atom(), atom()) -> ok. test(Mod, Func) -> Simple_items = [atom, 1234, 1234.0, {tuple}, [], [list], "string", self(), <<1,2,3>>, make_ref(), fun() -> ok end], F = fun(A) -> Mod:Func(A, 100), Mod:Func(A, 2), Mod:Func(A, 20) end, G = fun(A) -> case catch F(A) of {'EXIT', _} -> exit({failed, A}); _ -> ok end end, lists:foreach(G, Simple_items), Tuples = [ {1,2,3,a,b,c}, {"abc", def, 1234}, {{{{a},b,c,{d},e}},f}], Lists = [ [1,2,3,4,5,6,7], lists:seq(1,1000), [{a}, {a,b}, {a, [b,c]}, "def"], [a|b], [$a|$b] ], lists:foreach(G, Tuples), lists:foreach(G, Lists). -spec perf() -> ok. perf() -> {New, _} = timer:tc(trunc_io, perf, [trunc_io, print, 1000]), {Old, _} = timer:tc(trunc_io, perf, [io_lib, write, 1000]), io:fwrite("New code took ~p us, old code ~p\n", [New, Old]). -spec perf(atom(), atom(), integer()) -> done. perf(M, F, Reps) when Reps > 0 -> test(M,F), perf(M,F,Reps-1); perf(_,_,_) -> done. %% Performance test. Needs a particularly large term I saved as a binary... -spec perf1() -> {non_neg_integer(), non_neg_integer()}. perf1() -> {ok, Bin} = file:read_file("bin"), A = binary_to_term(Bin), {N, _} = timer:tc(trunc_io, print, [A, 1500]), {M, _} = timer:tc(io_lib, write, [A]), {N, M}. format_test() -> %% simple format strings ?assertEqual("foobar", lists:flatten(format("~s", [["foo", $b, $a, $r]], 50))), ?assertEqual("[\"foo\",98,97,114]", lists:flatten(format("~p", [["foo", $b, $a, $r]], 50))), ?assertEqual("[\"foo\",98,97,114]", lists:flatten(format("~P", [["foo", $b, $a, $r], 10], 50))), ?assertEqual("[[102,111,111],98,97,114]", lists:flatten(format("~w", [["foo", $b, $a, $r]], 50))), %% complex ones ?assertEqual(" foobar", lists:flatten(format("~10s", [["foo", $b, $a, $r]], 50))), ?assertEqual("f", lists:flatten(format("~1s", [["foo", $b, $a, $r]], 50))), ?assertEqual("[\"foo\",98,97,114]", lists:flatten(format("~22p", [["foo", $b, $a, $r]], 50))), ?assertEqual("[\"foo\",98,97,114]", lists:flatten(format("~22P", [["foo", $b, $a, $r], 10], 50))), ?assertEqual("**********", lists:flatten(format("~10W", [["foo", $b, $a, $r], 10], 50))), ?assertEqual("[[102,111,111],98,97,114]", lists:flatten(format("~25W", [["foo", $b, $a, $r], 10], 50))), % Note these next two diverge from io_lib:format; the field width is % ignored, when it should be used as max line length. ?assertEqual("[\"foo\",98,97,114]", lists:flatten(format("~10p", [["foo", $b, $a, $r]], 50))), ?assertEqual("[\"foo\",98,97,114]", lists:flatten(format("~10P", [["foo", $b, $a, $r], 10], 50))), ok. atom_quoting_test() -> ?assertEqual("hello", lists:flatten(format("~p", [hello], 50))), ?assertEqual("'hello world'", lists:flatten(format("~p", ['hello world'], 50))), ?assertEqual("'Hello world'", lists:flatten(format("~p", ['Hello world'], 50))), ?assertEqual("hello_world", lists:flatten(format("~p", ['hello_world'], 50))), ?assertEqual("'node@127.0.0.1'", lists:flatten(format("~p", ['node@127.0.0.1'], 50))), ?assertEqual("node@nohost", lists:flatten(format("~p", [node@nohost], 50))), ?assertEqual("abc123", lists:flatten(format("~p", [abc123], 50))), ok. sane_float_printing_test() -> ?assertEqual("1.0", lists:flatten(format("~p", [1.0], 50))), ?assertEqual("1.23456789", lists:flatten(format("~p", [1.23456789], 50))), ?assertEqual("1.23456789", lists:flatten(format("~p", [1.234567890], 50))), ?assertEqual("0.3333333333333333", lists:flatten(format("~p", [1/3], 50))), ?assertEqual("0.1234567", lists:flatten(format("~p", [0.1234567], 50))), ok. float_inside_list_test() -> ?assertEqual("[97,38.233913133184835,99]", lists:flatten(format("~p", [[$a, 38.233913133184835, $c]], 50))), ?assertError(badarg, lists:flatten(format("~s", [[$a, 38.233913133184835, $c]], 50))), ok. quote_strip_test() -> ?assertEqual("\"hello\"", lists:flatten(format("~p", ["hello"], 50))), ?assertEqual("hello", lists:flatten(format("~s", ["hello"], 50))), ?assertEqual("hello", lists:flatten(format("~s", [hello], 50))), ?assertEqual("hello", lists:flatten(format("~p", [hello], 50))), ?assertEqual("'hello world'", lists:flatten(format("~p", ['hello world'], 50))), ?assertEqual("hello world", lists:flatten(format("~s", ['hello world'], 50))), ok. binary_printing_test() -> ?assertEqual("<<>>", lists:flatten(format("~p", [<<>>], 50))), ?assertEqual("", lists:flatten(format("~s", [<<>>], 50))), ?assertEqual("<<..>>", lists:flatten(format("~p", [<<"hi">>], 0))), ?assertEqual("<<...>>", lists:flatten(format("~p", [<<"hi">>], 1))), ?assertEqual("<<\"hello\">>", lists:flatten(format("~p", [<<$h, $e, $l, $l, $o>>], 50))), ?assertEqual("<<\"hello\">>", lists:flatten(format("~p", [<<"hello">>], 50))), ?assertEqual("<<104,101,108,108,111>>", lists:flatten(format("~w", [<<"hello">>], 50))), ?assertEqual("<<1,2,3,4>>", lists:flatten(format("~p", [<<1, 2, 3, 4>>], 50))), ?assertEqual([1,2,3,4], lists:flatten(format("~s", [<<1, 2, 3, 4>>], 50))), ?assertEqual("hello", lists:flatten(format("~s", [<<"hello">>], 50))), ?assertEqual("hello\nworld", lists:flatten(format("~s", [<<"hello\nworld">>], 50))), ?assertEqual("<<\"hello\\nworld\">>", lists:flatten(format("~p", [<<"hello\nworld">>], 50))), ?assertEqual("<<\"\\\"hello world\\\"\">>", lists:flatten(format("~p", [<<"\"hello world\"">>], 50))), ?assertEqual("<<\"hello\\\\world\">>", lists:flatten(format("~p", [<<"hello\\world">>], 50))), ?assertEqual("<<\"hello\\\\\world\">>", lists:flatten(format("~p", [<<"hello\\\world">>], 50))), ?assertEqual("<<\"hello\\\\\\\\world\">>", lists:flatten(format("~p", [<<"hello\\\\world">>], 50))), ?assertEqual("<<\"hello\\bworld\">>", lists:flatten(format("~p", [<<"hello\bworld">>], 50))), ?assertEqual("<<\"hello\\tworld\">>", lists:flatten(format("~p", [<<"hello\tworld">>], 50))), ?assertEqual("<<\"hello\\nworld\">>", lists:flatten(format("~p", [<<"hello\nworld">>], 50))), ?assertEqual("<<\"hello\\rworld\">>", lists:flatten(format("~p", [<<"hello\rworld">>], 50))), ?assertEqual("<<\"hello\\eworld\">>", lists:flatten(format("~p", [<<"hello\eworld">>], 50))), ?assertEqual("<<\"hello\\fworld\">>", lists:flatten(format("~p", [<<"hello\fworld">>], 50))), ?assertEqual("<<\"hello\\vworld\">>", lists:flatten(format("~p", [<<"hello\vworld">>], 50))), ?assertEqual(" hello", lists:flatten(format("~10s", [<<"hello">>], 50))), ?assertEqual("[a]", lists:flatten(format("~s", [<<"[a]">>], 50))), ?assertEqual("[a]", lists:flatten(format("~s", [[<<"[a]">>]], 50))), ok. bitstring_printing_test() -> ?assertEqual("<<1,2,3,1:7>>", lists:flatten(format("~p", [<<1, 2, 3, 1:7>>], 100))), ?assertEqual("<<1:7>>", lists:flatten(format("~p", [<<1:7>>], 100))), ?assertEqual("<<1,2,3,...>>", lists:flatten(format("~p", [<<1, 2, 3, 1:7>>], 12))), ?assertEqual("<<1,2,3,...>>", lists:flatten(format("~p", [<<1, 2, 3, 1:7>>], 13))), ?assertEqual("<<1,2,3,1:7>>", lists:flatten(format("~p", [<<1, 2, 3, 1:7>>], 14))), ?assertEqual("<<..>>", lists:flatten(format("~p", [<<1:7>>], 0))), ?assertEqual("<<...>>", lists:flatten(format("~p", [<<1:7>>], 1))), ?assertEqual("[<<1>>,<<2>>]", lists:flatten(format("~p", [[<<1>>, <<2>>]], 100))), ?assertEqual("{<<1:7>>}", lists:flatten(format("~p", [{<<1:7>>}], 50))), ok. list_printing_test() -> ?assertEqual("[]", lists:flatten(format("~p", [[]], 50))), ?assertEqual("[]", lists:flatten(format("~w", [[]], 50))), ?assertEqual("", lists:flatten(format("~s", [[]], 50))), ?assertEqual("...", lists:flatten(format("~s", [[]], -1))), ?assertEqual("[[]]", lists:flatten(format("~p", [[[]]], 50))), ?assertEqual("[13,11,10,8,5,4]", lists:flatten(format("~p", [[13,11,10,8,5,4]], 50))), ?assertEqual("\"\\rabc\"", lists:flatten(format("~p", [[13,$a, $b, $c]], 50))), ?assertEqual("[1,2,3|4]", lists:flatten(format("~p", [[1, 2, 3|4]], 50))), ?assertEqual("[...]", lists:flatten(format("~p", [[1, 2, 3,4]], 4))), ?assertEqual("[1,...]", lists:flatten(format("~p", [[1, 2, 3, 4]], 6))), ?assertEqual("[1,...]", lists:flatten(format("~p", [[1, 2, 3, 4]], 7))), ?assertEqual("[1,2,...]", lists:flatten(format("~p", [[1, 2, 3, 4]], 8))), ?assertEqual("[1|4]", lists:flatten(format("~p", [[1|4]], 50))), ?assertEqual("[1]", lists:flatten(format("~p", [[1]], 50))), ?assertError(badarg, lists:flatten(format("~s", [[1|4]], 50))), ?assertEqual("\"hello...\"", lists:flatten(format("~p", ["hello world"], 10))), ?assertEqual("hello w...", lists:flatten(format("~s", ["hello world"], 10))), ?assertEqual("hello world\r\n", lists:flatten(format("~s", ["hello world\r\n"], 50))), ?assertEqual("\rhello world\r\n", lists:flatten(format("~s", ["\rhello world\r\n"], 50))), ?assertEqual("\"\\rhello world\\r\\n\"", lists:flatten(format("~p", ["\rhello world\r\n"], 50))), ?assertEqual("[13,104,101,108,108,111,32,119,111,114,108,100,13,10]", lists:flatten(format("~w", ["\rhello world\r\n"], 60))), ?assertEqual("...", lists:flatten(format("~s", ["\rhello world\r\n"], 3))), ?assertEqual("[22835963083295358096932575511191922182123945984,...]", lists:flatten(format("~p", [ [22835963083295358096932575511191922182123945984, 22835963083295358096932575511191922182123945984]], 9))), ?assertEqual("[22835963083295358096932575511191922182123945984,...]", lists:flatten(format("~p", [ [22835963083295358096932575511191922182123945984, 22835963083295358096932575511191922182123945984]], 53))), %%improper list ?assertEqual("[1,2,3|4]", lists:flatten(format("~P", [[1|[2|[3|4]]], 5], 50))), ?assertEqual("[1|1]", lists:flatten(format("~P", [[1|1], 5], 50))), ?assertEqual("[9|9]", lists:flatten(format("~p", [[9|9]], 50))), ok. iolist_printing_test() -> ?assertEqual("iolist: HelloIamaniolist", lists:flatten(format("iolist: ~s", [[$H, $e, $l, $l, $o, "I", ["am", [<<"an">>], [$i, $o, $l, $i, $s, $t]]]], 1000))), ?assertEqual("123...", lists:flatten(format("~s", [[<<"123456789">>, "HellIamaniolist"]], 6))), ?assertEqual("123456...", lists:flatten(format("~s", [[<<"123456789">>, "HellIamaniolist"]], 9))), ?assertEqual("123456789H...", lists:flatten(format("~s", [[<<"123456789">>, "HellIamaniolist"]], 13))), ?assertEqual("123456789HellIamaniolist", lists:flatten(format("~s", [[<<"123456789">>, "HellIamaniolist"]], 30))), ok. tuple_printing_test() -> ?assertEqual("{}", lists:flatten(format("~p", [{}], 50))), ?assertEqual("{}", lists:flatten(format("~w", [{}], 50))), ?assertError(badarg, lists:flatten(format("~s", [{}], 50))), ?assertEqual("{...}", lists:flatten(format("~p", [{foo}], 1))), ?assertEqual("{...}", lists:flatten(format("~p", [{foo}], 2))), ?assertEqual("{...}", lists:flatten(format("~p", [{foo}], 3))), ?assertEqual("{...}", lists:flatten(format("~p", [{foo}], 4))), ?assertEqual("{...}", lists:flatten(format("~p", [{foo}], 5))), ?assertEqual("{foo,...}", lists:flatten(format("~p", [{foo,bar}], 6))), ?assertEqual("{foo,...}", lists:flatten(format("~p", [{foo,bar}], 7))), ?assertEqual("{foo,...}", lists:flatten(format("~p", [{foo,bar}], 9))), ?assertEqual("{foo,bar}", lists:flatten(format("~p", [{foo,bar}], 10))), ?assertEqual("{22835963083295358096932575511191922182123945984,...}", lists:flatten(format("~w", [ {22835963083295358096932575511191922182123945984, 22835963083295358096932575511191922182123945984}], 10))), ?assertEqual("{22835963083295358096932575511191922182123945984,...}", lists:flatten(format("~w", [ {22835963083295358096932575511191922182123945984, bar}], 10))), ?assertEqual("{22835963083295358096932575511191922182123945984,...}", lists:flatten(format("~w", [ {22835963083295358096932575511191922182123945984, 22835963083295358096932575511191922182123945984}], 53))), ok. map_printing_test() -> ?assertEqual("#{}", lists:flatten(format("~p", [maps:new()], 50))), ?assertEqual("#{}", lists:flatten(format("~p", [maps:new()], 3))), ?assertEqual("#{}", lists:flatten(format("~w", [maps:new()], 50))), ?assertError(badarg, lists:flatten(format("~s", [maps:new()], 50))), ?assertEqual("#{...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}])], 1))), ?assertEqual("#{...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}])], 6))), ?assertEqual("#{bar => ...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}])], 7))), ?assertEqual("#{bar => ...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}])], 9))), ?assertEqual("#{bar => foo}", lists:flatten(format("~p", [maps:from_list([{bar, foo}])], 10))), ?assertEqual("#{bar => ...,...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 9))), ?assertEqual("#{bar => foo,...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 10))), ?assertEqual("#{bar => foo,...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 17))), ?assertEqual("#{bar => foo,foo => ...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 18))), ?assertEqual("#{bar => foo,foo => ...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 19))), ?assertEqual("#{bar => foo,foo => ...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 20))), ?assertEqual("#{bar => foo,foo => bar}", lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 21))), ?assertEqual("#{22835963083295358096932575511191922182123945984 => ...}", lists:flatten(format("~w", [ maps:from_list([{22835963083295358096932575511191922182123945984, 22835963083295358096932575511191922182123945984}])], 10))), ?assertEqual("#{22835963083295358096932575511191922182123945984 => ...}", lists:flatten(format("~w", [ maps:from_list([{22835963083295358096932575511191922182123945984, bar}])], 10))), ?assertEqual("#{22835963083295358096932575511191922182123945984 => ...}", lists:flatten(format("~w", [ maps:from_list([{22835963083295358096932575511191922182123945984, bar}])], 53))), ?assertEqual("#{22835963083295358096932575511191922182123945984 => bar}", lists:flatten(format("~w", [ maps:from_list([{22835963083295358096932575511191922182123945984, bar}])], 54))), ok. unicode_test() -> ?assertEqual([231,167,129], lists:flatten(format("~s", [<<231,167,129>>], 50))), ?assertEqual([31169], lists:flatten(format("~ts", [<<231,167,129>>], 50))), ok. depth_limit_test() -> ?assertEqual("{...}", lists:flatten(format("~P", [{a, [b, [c, [d]]]}, 1], 50))), ?assertEqual("{a,...}", lists:flatten(format("~P", [{a, [b, [c, [d]]]}, 2], 50))), ?assertEqual("{a,[...]}", lists:flatten(format("~P", [{a, [b, [c, [d]]]}, 3], 50))), ?assertEqual("{a,[b|...]}", lists:flatten(format("~P", [{a, [b, [c, [d]]]}, 4], 50))), ?assertEqual("{a,[b,[...]]}", lists:flatten(format("~P", [{a, [b, [c, [d]]]}, 5], 50))), ?assertEqual("{a,[b,[c|...]]}", lists:flatten(format("~P", [{a, [b, [c, [d]]]}, 6], 50))), ?assertEqual("{a,[b,[c,[...]]]}", lists:flatten(format("~P", [{a, [b, [c, [d]]]}, 7], 50))), ?assertEqual("{a,[b,[c,[d]]]}", lists:flatten(format("~P", [{a, [b, [c, [d]]]}, 8], 50))), ?assertEqual("{a,[b,[c,[d]]]}", lists:flatten(format("~P", [{a, [b, [c, [d]]]}, 9], 50))), ?assertEqual("{a,{...}}", lists:flatten(format("~P", [{a, {b, {c, {d}}}}, 3], 50))), ?assertEqual("{a,{b,...}}", lists:flatten(format("~P", [{a, {b, {c, {d}}}}, 4], 50))), ?assertEqual("{a,{b,{...}}}", lists:flatten(format("~P", [{a, {b, {c, {d}}}}, 5], 50))), ?assertEqual("{a,{b,{c,...}}}", lists:flatten(format("~P", [{a, {b, {c, {d}}}}, 6], 50))), ?assertEqual("{a,{b,{c,{...}}}}", lists:flatten(format("~P", [{a, {b, {c, {d}}}}, 7], 50))), ?assertEqual("{a,{b,{c,{d}}}}", lists:flatten(format("~P", [{a, {b, {c, {d}}}}, 8], 50))), ?assertEqual("#{a => #{...}}", lists:flatten(format("~P", [maps:from_list([{a, maps:from_list([{b, maps:from_list([{c, d}])}])}]), 2], 50))), ?assertEqual("#{a => #{b => #{...}}}", lists:flatten(format("~P", [maps:from_list([{a, maps:from_list([{b, maps:from_list([{c, d}])}])}]), 3], 50))), ?assertEqual("#{a => #{b => #{c => d}}}", lists:flatten(format("~P", [maps:from_list([{a, maps:from_list([{b, maps:from_list([{c, d}])}])}]), 4], 50))), ?assertEqual("#{}", lists:flatten(format("~P", [maps:new(), 1], 50))), ?assertEqual("#{...}", lists:flatten(format("~P", [maps:from_list([{1,1}, {2,2}, {3,3}]), 1], 50))), ?assertEqual("#{1 => 1,...}", lists:flatten(format("~P", [maps:from_list([{1,1}, {2,2}, {3,3}]), 2], 50))), ?assertEqual("#{1 => 1,2 => 2,...}", lists:flatten(format("~P", [maps:from_list([{1,1}, {2,2}, {3,3}]), 3], 50))), ?assertEqual("#{1 => 1,2 => 2,3 => 3}", lists:flatten(format("~P", [maps:from_list([{1,1}, {2,2}, {3,3}]), 4], 50))), ?assertEqual("{\"a\",[...]}", lists:flatten(format("~P", [{"a", ["b", ["c", ["d"]]]}, 3], 50))), ?assertEqual("{\"a\",[\"b\",[[...]|...]]}", lists:flatten(format("~P", [{"a", ["b", ["c", ["d"]]]}, 6], 50))), ?assertEqual("{\"a\",[\"b\",[\"c\",[\"d\"]]]}", lists:flatten(format("~P", [{"a", ["b", ["c", ["d"]]]}, 9], 50))), ?assertEqual("[...]", lists:flatten(format("~P", [[1, 2, 3], 1], 50))), ?assertEqual("[1|...]", lists:flatten(format("~P", [[1, 2, 3], 2], 50))), ?assertEqual("[1,2|...]", lists:flatten(format("~P", [[1, 2, 3], 3], 50))), ?assertEqual("[1,2,3]", lists:flatten(format("~P", [[1, 2, 3], 4], 50))), ?assertEqual("{1,...}", lists:flatten(format("~P", [{1, 2, 3}, 2], 50))), ?assertEqual("{1,2,...}", lists:flatten(format("~P", [{1, 2, 3}, 3], 50))), ?assertEqual("{1,2,3}", lists:flatten(format("~P", [{1, 2, 3}, 4], 50))), ?assertEqual("{1,...}", lists:flatten(format("~P", [{1, 2, 3}, 2], 50))), ?assertEqual("[1,2|...]", lists:flatten(format("~P", [[1, 2, <<3>>], 3], 50))), ?assertEqual("[1,2,<<...>>]", lists:flatten(format("~P", [[1, 2, <<3>>], 4], 50))), ?assertEqual("[1,2,<<3>>]", lists:flatten(format("~P", [[1, 2, <<3>>], 5], 50))), ?assertEqual("<<...>>", lists:flatten(format("~P", [<<0, 0, 0, 0>>, 1], 50))), ?assertEqual("<<0,...>>", lists:flatten(format("~P", [<<0, 0, 0, 0>>, 2], 50))), ?assertEqual("<<0,0,...>>", lists:flatten(format("~P", [<<0, 0, 0, 0>>, 3], 50))), ?assertEqual("<<0,0,0,...>>", lists:flatten(format("~P", [<<0, 0, 0, 0>>, 4], 50))), ?assertEqual("<<0,0,0,0>>", lists:flatten(format("~P", [<<0, 0, 0, 0>>, 5], 50))), %% this is a seriously weird edge case ?assertEqual("<<\" \"...>>", lists:flatten(format("~P", [<<32, 32, 32, 0>>, 2], 50))), ?assertEqual("<<\" \"...>>", lists:flatten(format("~P", [<<32, 32, 32, 0>>, 3], 50))), ?assertEqual("<<\" \"...>>", lists:flatten(format("~P", [<<32, 32, 32, 0>>, 4], 50))), ?assertEqual("<<32,32,32,0>>", lists:flatten(format("~P", [<<32, 32, 32, 0>>, 5], 50))), ?assertEqual("<<32,32,32,0>>", lists:flatten(format("~p", [<<32, 32, 32, 0>>], 50))), %% depth limiting for some reason works in 4 byte chunks on printable binaries? ?assertEqual("<<\"hell\"...>>", lists:flatten(format("~P", [<<"hello world">>, 2], 50))), ?assertEqual("<<\"abcd\"...>>", lists:flatten(format("~P", [<<$a, $b, $c, $d, $e, 0>>, 2], 50))), %% I don't even know... ?assertEqual("<<>>", lists:flatten(format("~P", [<<>>, 1], 50))), ?assertEqual("<<>>", lists:flatten(format("~W", [<<>>, 1], 50))), ?assertEqual("{abc,<<\"abc\\\"\">>}", lists:flatten(format("~P", [{abc,<<"abc\"">>}, 4], 50))), ok. print_terms_without_format_string_test() -> ?assertError(badarg, format({hello, world}, [], 50)), ?assertError(badarg, format([{google, bomb}], [], 50)), ?assertEqual([$h,$e,$l,$l,$o, 3594], format([$h,$e,$l,$l,$o, 3594], [], 50)), ?assertError(badarg, format([$h,$e,$l,$l,$o, 65535], [], 50)), ?assertEqual("helloworld", lists:flatten(format([$h,$e,$l,$l,$o, "world"], [], 50))), ?assertEqual("hello", lists:flatten(format(<<"hello">>, [], 50))), ?assertEqual("hello", lists:flatten(format('hello', [], 50))), ?assertError(badarg, format(<<1, 2, 3, 1:7>>, [], 100)), ?assertError(badarg, format(65535, [], 50)), ok. improper_io_list_test() -> ?assertEqual(">hello", lists:flatten(format('~s', [[$>|<<"hello">>]], 50))), ?assertEqual(">hello", lists:flatten(format('~ts', [[$>|<<"hello">>]], 50))), ?assertEqual("helloworld", lists:flatten(format('~ts', [[<<"hello">>|<<"world">>]], 50))), ok. -endif. lager-3.8.0/src/lager.erl0000644000232200023220000007136313523436621015566 0ustar debalancedebalance%% Copyright (c) 2011-2012 Basho Technologies, Inc. All Rights Reserved. %% %% This file is provided to you under the Apache License, %% Version 2.0 (the "License"); you may not use this file %% except in compliance with the License. You may obtain %% a copy of the License at %% %% http://www.apache.org/licenses/LICENSE-2.0 %% %% Unless required by applicable law or agreed to in writing, %% software distributed under the License is distributed on an %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY %% KIND, either express or implied. See the License for the %% specific language governing permissions and limitations %% under the License. %% @doc The lager logging framework. -module(lager). -include("lager.hrl"). -define(LAGER_MD_KEY, '__lager_metadata'). -define(TRACE_SINK, '__trace_sink'). -define(ROTATE_TIMEOUT, 100000). -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). -endif. %% API -export([start/0, log/3, log/4, log/5, log_unsafe/4, md/0, md/1, rotate_handler/1, rotate_handler/2, rotate_sink/1, rotate_all/0, trace/2, trace/3, trace_file/2, trace_file/3, trace_file/4, trace_console/1, trace_console/2, install_trace/2, install_trace/3, remove_trace/1, trace_state/3, trace_func/3, list_all_sinks/0, clear_all_traces/0, clear_trace_by_destination/1, stop_trace/1, stop_trace/3, status/0, get_loglevel/1, get_loglevel/2, set_loglevel/2, set_loglevel/3, set_loglevel/4, get_loglevels/1, update_loglevel_config/1, posix_error/1, set_loghwm/2, set_loghwm/3, set_loghwm/4, safe_format/3, safe_format_chop/3, unsafe_format/2, dispatch_log/5, dispatch_log/7, dispatch_log/9, do_log/9, do_log/10, do_log_unsafe/10, pr/2, pr/3, pr_stacktrace/1, pr_stacktrace/2]). -type log_level() :: none | debug | info | notice | warning | error | critical | alert | emergency. -type log_level_number() :: 0..7. -export_type([log_level/0, log_level_number/0]). -record(trace_func_state_v1, { pid :: undefined | pid(), level :: log_level(), count :: infinity | pos_integer(), format_string :: string(), timeout :: infinity | pos_integer(), started = os:timestamp() :: erlang:timestamp() %% use os:timestamp for compatability }). %% API %% @doc installs a lager trace handler into the target process (using sys:install) at the specified level. -spec install_trace(pid(), log_level()) -> ok. install_trace(Pid, Level) -> install_trace(Pid, Level, []). -spec install_trace(pid(), log_level(), [{count, infinity | pos_integer()} | {format_string, string()} | {timeout, timeout()}]) -> ok. install_trace(Pid, Level, Options) -> sys:install(Pid, {fun ?MODULE:trace_func/3, trace_state(Pid, Level, Options)}). %% @doc remove a previously installed lager trace handler from the target process. -spec remove_trace(pid()) -> ok. remove_trace(Pid) -> sys:remove(Pid, fun ?MODULE:trace_func/3). %% @doc Start the application. Mainly useful for using `-s lager' as a command %% line switch to the VM to make lager start on boot. start() -> start(lager). start(App) -> start_ok(App, application:start(App, permanent)). start_ok(_App, ok) -> ok; start_ok(_App, {error, {already_started, _App}}) -> ok; start_ok(App, {error, {not_started, Dep}}) -> ok = start(Dep), start(App); start_ok(App, {error, Reason}) -> erlang:error({app_start_failed, App, Reason}). %% @doc Get lager metadata for current process -spec md() -> [{atom(), any()}]. md() -> case erlang:get(?LAGER_MD_KEY) of undefined -> []; MD -> MD end. %% @doc Set lager metadata for current process. %% Will badarg if you don't supply a list of {key, value} tuples keyed by atoms. -spec md([{atom(), any()},...]) -> ok. md(NewMD) when is_list(NewMD) -> %% make sure its actually a real proplist case lists:all( fun({Key, _Value}) when is_atom(Key) -> true; (_) -> false end, NewMD) of true -> erlang:put(?LAGER_MD_KEY, NewMD), ok; false -> erlang:error(badarg) end; md(_) -> erlang:error(badarg). -spec dispatch_log(atom(), log_level(), list(), string(), list() | none, pos_integer(), safe | unsafe) -> ok | {error, lager_not_running} | {error, {sink_not_configured, atom()}}. %% this is the same check that the parse transform bakes into the module at compile time %% see lager_transform (lines 173-216) dispatch_log(Sink, Severity, Metadata, Format, Args, Size, Safety) when is_atom(Severity)-> SeverityAsInt=lager_util:level_to_num(Severity), case {whereis(Sink), whereis(?DEFAULT_SINK), lager_config:get({Sink, loglevel}, {?LOG_NONE, []})} of {undefined, undefined, _} -> {error, lager_not_running}; {undefined, _LagerEventPid0, _} -> {error, {sink_not_configured, Sink}}; {SinkPid, _LagerEventPid1, {Level, Traces}} when Safety =:= safe andalso ( (Level band SeverityAsInt) /= 0 orelse Traces /= [] ) -> do_log(Severity, Metadata, Format, Args, Size, SeverityAsInt, Level, Traces, Sink, SinkPid); {SinkPid, _LagerEventPid1, {Level, Traces}} when Safety =:= unsafe andalso ( (Level band SeverityAsInt) /= 0 orelse Traces /= [] ) -> do_log_unsafe(Severity, Metadata, Format, Args, Size, SeverityAsInt, Level, Traces, Sink, SinkPid); _ -> ok end. %% @private Should only be called externally from code generated from the parse transform do_log(Severity, Metadata, Format, Args, Size, SeverityAsInt, LevelThreshold, TraceFilters, Sink, SinkPid) when is_atom(Severity) -> FormatFun = fun() -> safe_format_chop(Format, Args, Size) end, do_log_impl(Severity, Metadata, Format, Args, SeverityAsInt, LevelThreshold, TraceFilters, Sink, SinkPid, FormatFun). do_log_impl(Severity, Metadata, Format, Args, SeverityAsInt, LevelThreshold, TraceFilters, Sink, SinkPid, FormatFun) -> {Destinations, TraceSinkPid} = case TraceFilters of [] -> {[], undefined}; _ -> {lager_util:check_traces(Metadata,SeverityAsInt,TraceFilters,[]), whereis(?TRACE_SINK)} end, case (LevelThreshold band SeverityAsInt) /= 0 orelse Destinations /= [] of true -> Msg = case Args of A when is_list(A) -> FormatFun(); _ -> Format end, LagerMsg = lager_msg:new(Msg, Severity, Metadata, Destinations), case lager_config:get({Sink, async}, false) of true -> gen_event:notify(SinkPid, {log, LagerMsg}); false -> gen_event:sync_notify(SinkPid, {log, LagerMsg}) end, case TraceSinkPid /= undefined of true -> gen_event:notify(TraceSinkPid, {log, LagerMsg}); false -> ok end; false -> ok end. %% @private Should only be called externally from code generated from the parse transform %% Specifically, it would be level ++ `_unsafe' as in `info_unsafe'. do_log_unsafe(Severity, Metadata, Format, Args, _Size, SeverityAsInt, LevelThreshold, TraceFilters, Sink, SinkPid) when is_atom(Severity) -> FormatFun = fun() -> unsafe_format(Format, Args) end, do_log_impl(Severity, Metadata, Format, Args, SeverityAsInt, LevelThreshold, TraceFilters, Sink, SinkPid, FormatFun). %% backwards compatible with beams compiled with lager 1.x dispatch_log(Severity, _Module, _Function, _Line, _Pid, Metadata, Format, Args, Size) -> dispatch_log(Severity, Metadata, Format, Args, Size). %% backwards compatible with beams compiled with lager 2.x dispatch_log(Severity, Metadata, Format, Args, Size) -> dispatch_log(?DEFAULT_SINK, Severity, Metadata, Format, Args, Size, safe). %% backwards compatible with beams compiled with lager 2.x do_log(Severity, Metadata, Format, Args, Size, SeverityAsInt, LevelThreshold, TraceFilters, SinkPid) -> do_log(Severity, Metadata, Format, Args, Size, SeverityAsInt, LevelThreshold, TraceFilters, ?DEFAULT_SINK, SinkPid). %% TODO: %% Consider making log2/4 that takes the Level, Pid and Message params of log/3 %% along with a Sink param?? %% @doc Manually log a message into lager without using the parse transform. -spec log(log_level(), pid() | atom() | [tuple(),...], list()) -> ok | {error, lager_not_running}. log(Level, Pid, Message) when is_pid(Pid); is_atom(Pid) -> dispatch_log(Level, [{pid,Pid}], Message, [], ?DEFAULT_TRUNCATION); log(Level, Metadata, Message) when is_list(Metadata) -> dispatch_log(Level, Metadata, Message, [], ?DEFAULT_TRUNCATION). %% @doc Manually log a message into lager without using the parse transform. -spec log(log_level(), pid() | atom() | [tuple(),...], string(), list()) -> ok | {error, lager_not_running}. log(Level, Pid, Format, Args) when is_pid(Pid); is_atom(Pid) -> dispatch_log(Level, [{pid,Pid}], Format, Args, ?DEFAULT_TRUNCATION); log(Level, Metadata, Format, Args) when is_list(Metadata) -> dispatch_log(Level, Metadata, Format, Args, ?DEFAULT_TRUNCATION). log_unsafe(Level, Metadata, Format, Args) when is_list(Metadata) -> dispatch_log(?DEFAULT_SINK, Level, Metadata, Format, Args, ?DEFAULT_TRUNCATION, unsafe). %% @doc Manually log a message into lager without using the parse transform. -spec log(atom(), log_level(), pid() | atom() | [tuple(),...], string(), list()) -> ok | {error, lager_not_running}. log(Sink, Level, Pid, Format, Args) when is_pid(Pid); is_atom(Pid) -> dispatch_log(Sink, Level, [{pid,Pid}], Format, Args, ?DEFAULT_TRUNCATION, safe); log(Sink, Level, Metadata, Format, Args) when is_list(Metadata) -> dispatch_log(Sink, Level, Metadata, Format, Args, ?DEFAULT_TRUNCATION, safe). validate_trace_filters(Filters, Level, Backend) -> Sink = proplists:get_value(sink, Filters, ?DEFAULT_SINK), {Sink, lager_util:validate_trace({ proplists:delete(sink, Filters), Level, Backend }) }. trace_file(File, Filter) -> trace_file(File, Filter, debug, []). trace_file(File, Filter, Level) when is_atom(Level) -> trace_file(File, Filter, Level, []); trace_file(File, Filter, Options) when is_list(Options) -> trace_file(File, Filter, debug, Options). trace_file(File, Filter, Level, Options) -> FileName = lager_util:expand_path(File), case validate_trace_filters(Filter, Level, {lager_file_backend, FileName}) of {Sink, {ok, Trace}} -> Handlers = lager_config:global_get(handlers, []), %% check if this file backend is already installed Res = case lager_util:find_file(FileName, Handlers) of false -> %% install the handler LogFileConfig = lists:keystore(level, 1, lists:keystore(file, 1, Options, {file, FileName}), {level, none}), HandlerInfo = lager_app:start_handler(Sink, {lager_file_backend, FileName}, LogFileConfig), lager_config:global_set(handlers, [HandlerInfo|Handlers]), {ok, installed}; {_Watcher, _Handler, Sink} -> {ok, exists}; {_Watcher, _Handler, _OtherSink} -> {error, file_in_use} end, case Res of {ok, _} -> add_trace_to_loglevel_config(Trace, Sink), {ok, {{lager_file_backend, FileName}, Filter, Level}}; {error, _} = E -> E end; {_Sink, Error} -> Error end. trace_console(Filter) -> trace_console(Filter, debug). trace_console(Filter, Level) -> trace(lager_console_backend, Filter, Level). trace(Backend, Filter) -> trace(Backend, Filter, debug). trace({lager_file_backend, File}, Filter, Level) -> trace_file(File, Filter, Level); trace(Backend, Filter, Level) -> case validate_trace_filters(Filter, Level, Backend) of {Sink, {ok, Trace}} -> add_trace_to_loglevel_config(Trace, Sink), {ok, {Backend, Filter, Level}}; {_Sink, Error} -> Error end. stop_trace(Backend, Filter, Level) -> case validate_trace_filters(Filter, Level, Backend) of {Sink, {ok, Trace}} -> stop_trace_int(Trace, Sink); {_Sink, Error} -> Error end. stop_trace({Backend, Filter, Level}) -> stop_trace(Backend, Filter, Level). %% Important: validate_trace_filters orders the arguments of %% trace tuples differently than the way outside callers have %% the trace tuple. %% %% That is to say, outside they are represented as %% `{Backend, Filter, Level}' %% %% and when they come back from validation, they're %% `{Filter, Level, Backend}' stop_trace_int({_Filter, _Level, Backend} = Trace, Sink) -> {Level, Traces} = lager_config:get({Sink, loglevel}), NewTraces = lists:delete(Trace, Traces), _ = lager_util:trace_filter([ element(1, T) || T <- NewTraces ]), %MinLevel = minimum_loglevel(get_loglevels() ++ get_trace_levels(NewTraces)), lager_config:set({Sink, loglevel}, {Level, NewTraces}), case get_loglevel(Sink, Backend) of none -> %% check no other traces point here case lists:keyfind(Backend, 3, NewTraces) of false -> gen_event:delete_handler(Sink, Backend, []), lager_config:global_set(handlers, lists:keydelete(Backend, 1, lager_config:global_get(handlers))); _ -> ok end; _ -> ok end, ok. list_all_sinks() -> sets:to_list( lists:foldl(fun({_Watcher, _Handler, Sink}, Set) -> sets:add_element(Sink, Set) end, sets:new(), lager_config:global_get(handlers, []))). clear_traces_by_sink(Sinks) -> lists:foreach(fun(S) -> {Level, _Traces} = lager_config:get({S, loglevel}), lager_config:set({S, loglevel}, {Level, []}) end, Sinks). clear_trace_by_destination(ID) -> Sinks = lists:sort(list_all_sinks()), Traces = find_traces(Sinks), [ stop_trace_int({Filter, Level, Destination}, Sink) || {Sink, {Filter, Level, Destination}} <- Traces, Destination == ID]. clear_all_traces() -> Handlers = lager_config:global_get(handlers, []), clear_traces_by_sink(list_all_sinks()), _ = lager_util:trace_filter(none), lager_config:global_set(handlers, lists:filter( fun({Handler, _Watcher, Sink}) -> case get_loglevel(Sink, Handler) of none -> gen_event:delete_handler(Sink, Handler, []), false; _ -> true end end, Handlers)). find_traces(Sinks) -> lists:foldl(fun(S, Acc) -> {_Level, Traces} = lager_config:get({S, loglevel}), Acc ++ lists:map(fun(T) -> {S, T} end, Traces) end, [], Sinks). status() -> Handlers = lager_config:global_get(handlers, []), Sinks = lists:sort(list_all_sinks()), Traces = find_traces(Sinks), TraceCount = case length(Traces) of 0 -> 1; N -> N end, Status = ["Lager status:\n", [begin Level = get_loglevel(Sink, Handler), get_sink_handler_status(Sink, Handler, Level) end || {Handler, _Watcher, Sink} <- lists:sort(fun({_, _, S1}, {_, _, S2}) -> S1 =< S2 end, Handlers)], "Active Traces:\n", [begin LevelName = case Level of {mask, Mask} -> case lager_util:mask_to_levels(Mask) of [] -> none; Levels -> hd(Levels) end; Num -> lager_util:num_to_level(Num) end, io_lib:format("Tracing messages matching ~p (sink ~s) at level ~p to ~p\n", [Filter, Sink, LevelName, Destination]) end || {Sink, {Filter, Level, Destination}} <- Traces], [ "Tracing Reductions:\n", case ?DEFAULT_TRACER:info('query') of {null, false} -> ""; Query -> io_lib:format("~p~n", [Query]) end ], [ "Tracing Statistics:\n ", [ begin [" ", atom_to_list(Table), ": ", integer_to_list(?DEFAULT_TRACER:info(Table) div TraceCount), "\n"] end || Table <- [input, output, filter] ] ]], io:put_chars(Status). get_sink_handler_status(Sink, Handler, Level) -> case Handler of {lager_file_backend, File} -> io_lib:format("File ~ts (~s) at level ~p\n", [File, Sink, Level]); lager_console_backend -> io_lib:format("Console (~s) at level ~p\n", [Sink, Level]); _ -> [] end. %% @doc Set the loglevel for a particular backend. set_loglevel(Handler, Level) when is_atom(Level) -> set_loglevel(?DEFAULT_SINK, Handler, undefined, Level). %% @doc Set the loglevel for a particular backend that has multiple identifiers %% (eg. the file backend). set_loglevel(Handler, Ident, Level) when is_atom(Level) -> set_loglevel(?DEFAULT_SINK, Handler, Ident, Level). %% @doc Set the loglevel for a particular sink's backend that potentially has %% multiple identifiers. (Use `undefined' if it doesn't have any.) set_loglevel(Sink, Handler, Ident, Level) when is_atom(Level) -> HandlerArg = case Ident of undefined -> Handler; _ -> {Handler, Ident} end, Reply = gen_event:call(Sink, HandlerArg, {set_loglevel, Level}, infinity), update_loglevel_config(Sink), Reply. %% @doc Get the loglevel for a particular backend on the default sink. In the case that the backend %% has multiple identifiers, the lowest is returned. get_loglevel(Handler) -> get_loglevel(?DEFAULT_SINK, Handler). %% @doc Get the loglevel for a particular sink's backend. In the case that the backend %% has multiple identifiers, the lowest is returned. get_loglevel(Sink, Handler) -> case gen_event:call(Sink, Handler, get_loglevel, infinity) of {mask, Mask} -> case lager_util:mask_to_levels(Mask) of [] -> none; Levels -> hd(Levels) end; X when is_integer(X) -> lager_util:num_to_level(X); Y -> Y end. %% @doc Try to convert an atom to a posix error, but fall back on printing the %% term if its not a valid posix error code. posix_error(Error) when is_atom(Error) -> case erl_posix_msg:message(Error) of "unknown POSIX error" -> atom_to_list(Error); Message -> Message end; posix_error(Error) -> safe_format_chop("~p", [Error], ?DEFAULT_TRUNCATION). %% @private get_loglevels(Sink) -> [gen_event:call(Sink, Handler, get_loglevel, infinity) || Handler <- gen_event:which_handlers(Sink)]. %% @doc Set the loghwm for the default sink. set_loghwm(Handler, Hwm) when is_integer(Hwm) -> set_loghwm(?DEFAULT_SINK, Handler, Hwm). %% @doc Set the loghwm for a particular backend. set_loghwm(Sink, Handler, Hwm) when is_integer(Hwm) -> gen_event:call(Sink, Handler, {set_loghwm, Hwm}, infinity). %% @doc Set the loghwm (log high water mark) for file backends with multiple identifiers set_loghwm(Sink, Handler, Ident, Hwm) when is_integer(Hwm) -> gen_event:call(Sink, {Handler, Ident}, {set_loghwm, Hwm}, infinity). %% @private add_trace_to_loglevel_config(Trace, Sink) -> {MinLevel, Traces} = lager_config:get({Sink, loglevel}), case lists:member(Trace, Traces) of false -> NewTraces = [Trace|Traces], _ = lager_util:trace_filter([ element(1, T) || T <- NewTraces]), lager_config:set({Sink, loglevel}, {MinLevel, [Trace|Traces]}); _ -> ok end. %% @doc recalculate min log level update_loglevel_config(error_logger) -> %% Not a sink under our control, part of the Erlang logging %% utility that error_logger_lager_h attaches to true; update_loglevel_config(Sink) -> {_, Traces} = lager_config:get({Sink, loglevel}, {ignore_me, []}), MinLog = minimum_loglevel(get_loglevels(Sink)), lager_config:set({Sink, loglevel}, {MinLog, Traces}). %% @private minimum_loglevel(Levels) -> lists:foldl(fun({mask, Mask}, Acc) -> Mask bor Acc; (Level, Acc) when is_integer(Level) -> {mask, Mask} = lager_util:config_to_mask(lager_util:num_to_level(Level)), Mask bor Acc; (_, Acc) -> Acc end, 0, Levels). %% @doc Print the format string `Fmt' with `Args' safely with a size %% limit of `Limit'. If the format string is invalid, or not enough %% arguments are supplied 'FORMAT ERROR' is printed with the offending %% arguments. The caller is NOT crashed. safe_format(Fmt, Args, Limit) -> safe_format(Fmt, Args, Limit, []). safe_format(Fmt, Args, Limit, Options) -> try lager_trunc_io:format(Fmt, Args, Limit, Options) catch _:_ -> lager_trunc_io:format("FORMAT ERROR: ~p ~p", [Fmt, Args], Limit) end. %% @private safe_format_chop(Fmt, Args, Limit) -> safe_format(Fmt, Args, Limit, [{chomp, true}]). %% @private Print the format string `Fmt' with `Args' without a size limit. %% This is unsafe because the output of this function is unbounded. %% %% Log messages with unbounded size will kill your application dead as %% OTP mechanisms stuggle to cope with them. So this function is %% intended only for messages which have a reasonable bounded %% size before they're formatted. %% %% If the format string is invalid or not enough arguments are %% supplied a 'FORMAT ERROR' message is printed instead with the %% offending arguments. The caller is NOT crashed. unsafe_format(Fmt, Args) -> try io_lib:format(Fmt, Args) catch _:_ -> io_lib:format("FORMAT ERROR: ~p ~p", [Fmt, Args]) end. %% @doc Print a record or a list of records lager found during parse transform pr(Record, Module) when is_tuple(Record), is_atom(element(1, Record)) -> pr(Record, Module, []); pr(List, Module) when is_list(List) -> pr(List, Module, []); pr(Record, _) -> Record. %% @doc Print a record or a list of records lager found during parse transform pr(Record, Module, Options) when is_tuple(Record), is_atom(element(1, Record)), is_list(Options) -> try case is_record_known(Record, Module) of false -> Record; {RecordName, RecordFields} -> {'$lager_record', RecordName, zip(RecordFields, tl(tuple_to_list(Record)), Module, Options, [])} end catch error:undef -> Record end; pr([Head|Tail], Module, Options) when is_list(Options) -> [pr(Head, Module, Options)|pr(Tail, Module, Options)]; pr(Record, _, _) -> Record. zip([FieldName|RecordFields], [FieldValue|Record], Module, Options, ToReturn) when is_list(FieldValue) -> zip(RecordFields, Record, Module, Options, [{FieldName, pr(FieldValue, Module, Options)}|ToReturn]); zip([FieldName|RecordFields], [FieldValue|Record], Module, Options, ToReturn) -> Compress = lists:member(compress, Options), case is_tuple(FieldValue) andalso tuple_size(FieldValue) > 0 andalso is_atom(element(1, FieldValue)) andalso is_record_known(FieldValue, Module) of false when Compress andalso FieldValue =:= undefined -> zip(RecordFields, Record, Module, Options, ToReturn); false -> zip(RecordFields, Record, Module, Options, [{FieldName, FieldValue}|ToReturn]); _Else -> F = {FieldName, pr(FieldValue, Module, Options)}, zip(RecordFields, Record, Module, Options, [F|ToReturn]) end; zip([], [], _Module, _Compress, ToReturn) -> lists:reverse(ToReturn). is_record_known(Record, Module) -> Name = element(1, Record), Attrs = Module:module_info(attributes), case lists:keyfind(lager_records, 1, Attrs) of false -> false; {lager_records, Records} -> case lists:keyfind(Name, 1, Records) of false -> false; {Name, RecordFields} -> case (tuple_size(Record) - 1) =:= length(RecordFields) of false -> false; true -> {Name, RecordFields} end end end. %% @doc Print stacktrace in human readable form pr_stacktrace(Stacktrace) -> Stacktrace1 = case application:get_env(lager, reverse_pretty_stacktrace, true) of true -> lists:reverse(Stacktrace); _ -> Stacktrace end, pr_stacktrace_(Stacktrace1). pr_stacktrace_(Stacktrace) -> Indent = "\n ", lists:foldl( fun(Entry, Acc) -> Acc ++ Indent ++ error_logger_lager_h:format_mfa(Entry) end, [], Stacktrace). pr_stacktrace(Stacktrace, {Class, Reason}) -> case application:get_env(lager, reverse_pretty_stacktrace, true) of true -> lists:flatten( pr_stacktrace_(lists:reverse(Stacktrace)) ++ "\n" ++ io_lib:format("~s:~p", [Class, Reason])); _ -> lists:flatten( io_lib:format("~s:~p", [Class, Reason]) ++ pr_stacktrace_(Stacktrace)) end. rotate_sink(Sink) -> Handlers = lager_config:global_get(handlers), RotateHandlers = lists:filtermap( fun({Handler,_,S}) when S == Sink -> {true, {Handler, Sink}}; (_) -> false end, Handlers), rotate_handlers(RotateHandlers). rotate_all() -> rotate_handlers(lists:map(fun({H,_,S}) -> {H, S} end, lager_config:global_get(handlers))). rotate_handlers(Handlers) -> [ rotate_handler(Handler, Sink) || {Handler, Sink} <- Handlers ]. rotate_handler(Handler) -> Handlers = lager_config:global_get(handlers), case lists:keyfind(Handler, 1, Handlers) of {Handler, _, Sink} -> rotate_handler(Handler, Sink); false -> ok end. rotate_handler(Handler, Sink) -> gen_event:call(Sink, Handler, rotate, ?ROTATE_TIMEOUT). %% @private trace_func(#trace_func_state_v1{pid=Pid, level=Level, format_string=Fmt}=FuncState, Event, ProcState) -> _ = lager:log(Level, Pid, Fmt, [Event, ProcState]), check_timeout(decrement_count(FuncState)). %% @private trace_state(Pid, Level, Options) -> #trace_func_state_v1{pid=Pid, level=Level, count=proplists:get_value(count, Options, infinity), timeout=proplists:get_value(timeout, Options, infinity), format_string=proplists:get_value(format_string, Options, "TRACE ~p ~p")}. decrement_count(#trace_func_state_v1{count=infinity} = FuncState) -> FuncState; decrement_count(#trace_func_state_v1{count=1}) -> %% hit the counter limit done; decrement_count(#trace_func_state_v1{count=Count} = FuncState) -> FuncState#trace_func_state_v1{count=Count - 1}. check_timeout(#trace_func_state_v1{timeout=infinity} = FuncState) -> FuncState; check_timeout(#trace_func_state_v1{timeout=Timeout, started=Started} = FuncState) -> case (timer:now_diff(os:timestamp(), Started) / 1000) > Timeout of true -> done; false -> FuncState end. -ifdef(TEST). get_sink_handler_status_ascii_test() -> File = "C:\\ProgramData\\Directory With Spaces\\lager.log", validate_status(File). get_sink_handler_status_latin_test() -> File = "C:\\ProgramData\\Tést Directory\\lager.log", validate_status(File). get_sink_handler_status_unicode_test() -> File = "C:\\ProgramData\\찦차를 타고 온 펲시맨과 쑛다리 똠방각하 (Korean)\\lager.log", validate_status(File). validate_status(File) -> Handler = {lager_file_backend, File}, Status = get_sink_handler_status(?DEFAULT_SINK, Handler, debug), ?assertNotEqual(nomatch, string:find(Status, File)). -endif. lager-3.8.0/src/lager_app.erl0000644000232200023220000004347013523436621016424 0ustar debalancedebalance%% Copyright (c) 2011-2012 Basho Technologies, Inc. All Rights Reserved. %% %% This file is provided to you under the Apache License, %% Version 2.0 (the "License"); you may not use this file %% except in compliance with the License. You may obtain %% a copy of the License at %% %% http://www.apache.org/licenses/LICENSE-2.0 %% %% Unless required by applicable law or agreed to in writing, %% software distributed under the License is distributed on an %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY %% KIND, either express or implied. See the License for the %% specific language governing permissions and limitations %% under the License. %% @doc Lager's application module. Not a lot to see here. %% @private -module(lager_app). -behaviour(application). -include("lager.hrl"). -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). -endif. -export([start/0, start/2, start_handler/3, configure_sink/2, stop/1, boot/1]). %% The `application:get_env/3` compatibility wrapper was useful %% for other modules in r15 and before -export([get_env/3]). -define(FILENAMES, '__lager_file_backend_filenames'). -define(THROTTLE, lager_backend_throttle). -define(DEFAULT_HANDLER_CONF, [{lager_console_backend, [{level, info}]}, {lager_file_backend, [{file, "log/error.log"}, {level, error}, {size, 10485760}, {date, "$D0"}, {count, 5}] }, {lager_file_backend, [{file, "log/console.log"}, {level, info}, {size, 10485760}, {date, "$D0"}, {count, 5}] } ]). start() -> application:start(lager). start_throttle(Sink, Threshold, Window) -> _ = supervisor:start_child(lager_handler_watcher_sup, [Sink, ?THROTTLE, [Threshold, Window]]), ok. determine_async_behavior(_Sink, undefined, _Window) -> ok; determine_async_behavior(_Sink, Threshold, _Window) when not is_integer(Threshold) orelse Threshold < 0 -> error_logger:error_msg("Invalid value for 'async_threshold': ~p~n", [Threshold]), throw({error, bad_config}); determine_async_behavior(Sink, Threshold, undefined) -> start_throttle(Sink, Threshold, erlang:trunc(Threshold * 0.2)); determine_async_behavior(_Sink, Threshold, Window) when not is_integer(Window) orelse Window > Threshold orelse Window < 0 -> error_logger:error_msg( "Invalid value for 'async_threshold_window': ~p~n", [Window]), throw({error, bad_config}); determine_async_behavior(Sink, Threshold, Window) -> start_throttle(Sink, Threshold, Window). start_handlers(_Sink, undefined) -> ok; start_handlers(_Sink, Handlers) when not is_list(Handlers) -> error_logger:error_msg( "Invalid value for 'handlers' (must be list): ~p~n", [Handlers]), throw({error, bad_config}); start_handlers(Sink, Handlers) -> %% handlers failing to start are handled in the handler_watcher lager_config:global_set(handlers, lager_config:global_get(handlers, []) ++ lists:map(fun({Module, Config}) -> check_handler_config(Module, Config), start_handler(Sink, Module, Config); (_) -> throw({error, bad_config}) end, expand_handlers(Handlers))), ok. start_handler(Sink, Module, Config) -> {ok, Watcher} = supervisor:start_child(lager_handler_watcher_sup, [Sink, Module, Config]), {Module, Watcher, Sink}. check_handler_config({lager_file_backend, F}, Config) when is_list(Config); is_tuple(Config) -> Fs = case get(?FILENAMES) of undefined -> ordsets:new(); X -> X end, case ordsets:is_element(F, Fs) of true -> error_logger:error_msg( "Cannot have same file (~p) in multiple file backends~n", [F]), throw({error, bad_config}); false -> put(?FILENAMES, ordsets:add_element(F, Fs)) end, ok; check_handler_config(_Handler, Config) when is_list(Config) orelse is_atom(Config) -> ok; check_handler_config(Handler, _BadConfig) -> throw({error, {bad_config, Handler}}). clean_up_config_checks() -> erase(?FILENAMES). interpret_hwm(undefined) -> undefined; interpret_hwm(HWM) when not is_integer(HWM) orelse HWM < 0 -> _ = lager:log(warning, self(), "Invalid error_logger high water mark: ~p, disabling", [HWM]), undefined; interpret_hwm(HWM) -> HWM. maybe_install_sink_killer(_Sink, undefined, _ReinstallTimer) -> ok; maybe_install_sink_killer(Sink, HWM, undefined) -> maybe_install_sink_killer(Sink, HWM, 5000); maybe_install_sink_killer(Sink, HWM, ReinstallTimer) when is_integer(HWM) andalso is_integer(ReinstallTimer) andalso HWM >= 0 andalso ReinstallTimer >= 0 -> _ = supervisor:start_child(lager_handler_watcher_sup, [Sink, lager_manager_killer, [HWM, ReinstallTimer]]); maybe_install_sink_killer(_Sink, HWM, ReinstallTimer) -> error_logger:error_msg("Invalid value for 'killer_hwm': ~p or 'killer_reinstall_after': ~p", [HWM, ReinstallTimer]), throw({error, bad_config}). -spec start_error_logger_handler(boolean(), pos_integer(), list()) -> list(). start_error_logger_handler(false, _HWM, _Whitelist) -> []; start_error_logger_handler(true, HWM, WhiteList) -> GlStrategy = case application:get_env(lager, error_logger_groupleader_strategy) of undefined -> handle; {ok, GlStrategy0} when GlStrategy0 =:= handle; GlStrategy0 =:= ignore; GlStrategy0 =:= mirror -> GlStrategy0; {ok, BadGlStrategy} -> error_logger:error_msg( "Invalid value for 'error_logger_groupleader_strategy': ~p~n", [BadGlStrategy]), throw({error, bad_config}) end, case whereis(error_logger) of undefined -> %% On OTP 21 and above, error_logger is deprecated in favor of 'logger' %% As a band-aid, boot up error_logger anyway and install it as a logger handler %% we can't use error_logger:add_report_handler because we want supervision of the handler %% so we have to manually add the logger handler %% %% Longer term we should be installing a logger handler instead, but this will bridge the gap %% for now. _ = error_logger:start(), _ = logger:add_handler(error_logger,error_logger,#{level=>info,filter_default=>log}), ok = maybe_remove_logger_handler(); _ -> ok end, %% capture which handlers we removed from error_logger so we can restore them when lager stops OldHandlers = case supervisor:start_child(lager_handler_watcher_sup, [error_logger, error_logger_lager_h, [HWM, GlStrategy]]) of {ok, _} -> [begin error_logger:delete_report_handler(X), X end || X <- gen_event:which_handlers(error_logger) -- [error_logger_lager_h | WhiteList]]; {error, _} -> [] end, OldHandlers. %% On OTP 21.1 and higher we need to remove the `default' handler. %% But it might not exist, so we will wrap this in a try-catch %% block maybe_remove_logger_handler() -> try ok = logger:remove_handler(default) catch error:undef -> ok; Err:Reason -> error_logger:error_msg("calling logger:remove_handler(default) failed: ~p ~p", [Err, Reason]) end. configure_sink(Sink, SinkDef) -> lager_config:new_sink(Sink), ChildId = lager_util:make_internal_sink_name(Sink), _ = supervisor:start_child(lager_sup, {ChildId, {gen_event, start_link, [{local, Sink}]}, permanent, 5000, worker, dynamic}), determine_async_behavior(Sink, proplists:get_value(async_threshold, SinkDef), proplists:get_value(async_threshold_window, SinkDef) ), _ = maybe_install_sink_killer(Sink, proplists:get_value(killer_hwm, SinkDef), proplists:get_value(killer_reinstall_after, SinkDef)), start_handlers(Sink, proplists:get_value(handlers, SinkDef, [])), lager:update_loglevel_config(Sink). configure_extra_sinks(Sinks) -> lists:foreach(fun({Sink, Proplist}) -> configure_sink(Sink, Proplist) end, Sinks). -spec get_env(atom(), atom(), term()) -> term(). get_env(Application, Key, Default) -> application:get_env(Application, Key, Default). start(_StartType, _StartArgs) -> {ok, Pid} = lager_sup:start_link(), SavedHandlers = boot(), _ = boot('__all_extra'), _ = boot('__traces'), clean_up_config_checks(), {ok, Pid, SavedHandlers}. boot() -> %% Handle the default sink. determine_async_behavior(?DEFAULT_SINK, application:get_env(lager, async_threshold, undefined), application:get_env(lager, async_threshold_window, undefined)), _ = maybe_install_sink_killer(?DEFAULT_SINK, application:get_env(lager, killer_hwm, undefined), application:get_env(lager, killer_reinstall_after, undefined)), start_handlers(?DEFAULT_SINK, application:get_env(lager, handlers, ?DEFAULT_HANDLER_CONF)), lager:update_loglevel_config(?DEFAULT_SINK), SavedHandlers = start_error_logger_handler( application:get_env(lager, error_logger_redirect, true), interpret_hwm(application:get_env(lager, error_logger_hwm, 0)), application:get_env(lager, error_logger_whitelist, []) ), SavedHandlers. boot('__traces') -> _ = lager_util:trace_filter(none), ok = add_configured_traces(); boot('__all_extra') -> configure_extra_sinks(application:get_env(lager, extra_sinks, [])); boot(?DEFAULT_SINK) -> boot(); boot(Sink) -> AllSinksDef = application:get_env(lager, extra_sinks, []), boot_sink(Sink, lists:keyfind(Sink, 1, AllSinksDef)). boot_sink(Sink, {Sink, Def}) -> configure_sink(Sink, Def); boot_sink(Sink, false) -> configure_sink(Sink, []). stop(Handlers) -> lists:foreach(fun(Handler) -> error_logger:add_report_handler(Handler) end, Handlers). expand_handlers([]) -> []; expand_handlers([{lager_file_backend, [{Key, _Value}|_]=Config}|T]) when is_atom(Key) -> %% this is definitely a new-style config, no expansion needed [maybe_make_handler_id(lager_file_backend, Config) | expand_handlers(T)]; expand_handlers([{lager_file_backend, Configs}|T]) -> ?INT_LOG(notice, "Deprecated lager_file_backend config detected, please consider updating it", []), [ {lager_file_backend:config_to_id(Config), Config} || Config <- Configs] ++ expand_handlers(T); expand_handlers([{Mod, Config}|T]) when is_atom(Mod) -> [maybe_make_handler_id(Mod, Config) | expand_handlers(T)]; expand_handlers([H|T]) -> [H | expand_handlers(T)]. add_configured_traces() -> Traces = case application:get_env(lager, traces) of undefined -> []; {ok, TraceVal} -> TraceVal end, lists:foreach(fun start_configured_trace/1, Traces), ok. start_configured_trace({Handler, Filter}) -> {ok, _} = lager:trace(Handler, Filter); start_configured_trace({Handler, Filter, Level}) when is_atom(Level) -> {ok, _} = lager:trace(Handler, Filter, Level). maybe_make_handler_id(Mod, Config) -> %% Allow the backend to generate a gen_event handler id, if it wants to. %% We don't use erlang:function_exported here because that requires the module %% already be loaded, which is unlikely at this phase of startup. Using code:load %% caused undesirable side-effects with generating code-coverage reports. try Mod:config_to_id(Config) of Id -> {Id, Config} catch error:undef -> {Mod, Config} end. -ifdef(TEST). application_config_mangling_test_() -> [ {"Explode the file backend handlers", ?_assertMatch( [{lager_console_backend, [{level, info}]}, {{lager_file_backend,"error.log"},{"error.log",error,10485760, "$D0",5}}, {{lager_file_backend,"console.log"},{"console.log",info,10485760, "$D0",5}} ], expand_handlers([{lager_console_backend, [{level, info}]}, {lager_file_backend, [ {"error.log", error, 10485760, "$D0", 5}, {"console.log", info, 10485760, "$D0", 5} ]}] )) }, {"Explode the short form of backend file handlers", ?_assertMatch( [{lager_console_backend, [{level, info}]}, {{lager_file_backend,"error.log"},{"error.log",error}}, {{lager_file_backend,"console.log"},{"console.log",info}} ], expand_handlers([{lager_console_backend, [{level, info}]}, {lager_file_backend, [ {"error.log", error}, {"console.log", info} ]}] )) }, {"Explode with formatter info", ?_assertMatch( [{{lager_file_backend,"test.log"}, [{"test.log", debug, 10485760, "$D0", 5},{lager_default_formatter,["[",severity,"] ", message, "\n"]}]}, {{lager_file_backend,"test2.log"}, [{"test2.log",debug, 10485760, "$D0", 5},{lager_default_formatter,["2>[",severity,"] ", message, "\n"]}]}], expand_handlers([{lager_file_backend, [ [{"test.log", debug, 10485760, "$D0", 5},{lager_default_formatter,["[",severity,"] ", message, "\n"]}], [{"test2.log",debug, 10485760, "$D0", 5},{lager_default_formatter,["2>[",severity,"] ",message, "\n"]}] ] }]) ) }, {"Explode short form with short formatter info", ?_assertMatch( [{{lager_file_backend,"test.log"}, [{"test.log", debug},{lager_default_formatter,["[",severity,"] ", message, "\n"]}]}, {{lager_file_backend,"test2.log"}, [{"test2.log",debug},{lager_default_formatter}]}], expand_handlers([{lager_file_backend, [ [{"test.log", debug},{lager_default_formatter,["[",severity,"] ", message, "\n"]}], [{"test2.log",debug},{lager_default_formatter}] ] }]) ) }, {"New form needs no expansion", ?_assertMatch([ {{lager_file_backend,"test.log"}, [{file, "test.log"}]}, {{lager_file_backend,"test2.log"}, [{file, "test2.log"}, {level, info}, {sync_on, none}]}, {{lager_file_backend,"test3.log"}, [{formatter, lager_default_formatter}, {file, "test3.log"}]} ], expand_handlers([ {lager_file_backend, [{file, "test.log"}]}, {lager_file_backend, [{file, "test2.log"}, {level, info}, {sync_on, none}]}, {lager_file_backend, [{formatter, lager_default_formatter},{file, "test3.log"}]} ]) ) } ]. check_handler_config_test_() -> Good = expand_handlers(?DEFAULT_HANDLER_CONF), Bad = expand_handlers([{lager_console_backend, [{level, info}]}, {lager_file_backend, [{file, "same_file.log"}]}, {lager_file_backend, [{file, "same_file.log"}, {level, info}]}]), AlsoBad = [{lager_logstash_backend, {level, info}, {output, {udp, "localhost", 5000}}, {format, json}, {json_encoder, jiffy}}], BadToo = [{fail, {fail}}], OldSchoolLagerGood = expand_handlers([{lager_console_backend, [{level, info}]}, {lager_file_backend, [ {"./log/error.log",error,10485760,"$D0",5}, {"./log/console.log",info,10485760,"$D0",5}, {"./log/debug.log",debug,10485760,"$D0",5} ]}]), NewConfigMissingList = expand_handlers([{foo_backend, {file, "same_file.log"}}]), [ {"lager_file_backend_good", ?_assertEqual([ok, ok, ok], [ check_handler_config(M,C) || {M,C} <- Good ]) }, {"lager_file_backend_bad", ?_assertThrow({error, bad_config}, [ check_handler_config(M,C) || {M,C} <- Bad ]) }, {"Invalid config dies", ?_assertThrow({error, bad_config}, start_handlers(foo, AlsoBad)) }, {"Invalid config dies", ?_assertThrow({error, {bad_config, _}}, start_handlers(foo, BadToo)) }, {"Old Lager config works", ?_assertEqual([ok, ok, ok, ok], [ check_handler_config(M, C) || {M, C} <- OldSchoolLagerGood]) }, {"New Config missing its list should fail", ?_assertThrow({error, {bad_config, foo_backend}}, [ check_handler_config(M, C) || {M, C} <- NewConfigMissingList]) } ]. -endif. lager-3.8.0/src/lager_config.erl0000644000232200023220000000475113523436621017110 0ustar debalancedebalance%% Copyright (c) 2011-2012 Basho Technologies, Inc. All Rights Reserved. %% %% This file is provided to you under the Apache License, %% Version 2.0 (the "License"); you may not use this file %% except in compliance with the License. You may obtain %% a copy of the License at %% %% http://www.apache.org/licenses/LICENSE-2.0 %% %% Unless required by applicable law or agreed to in writing, %% software distributed under the License is distributed on an %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY %% KIND, either express or implied. See the License for the %% specific language governing permissions and limitations %% under the License. %% @doc Helper functions for working with lager's runtime config -module(lager_config). -include("lager.hrl"). -export([new/0, new_sink/1, get/1, get/2, set/2, global_get/1, global_get/2, global_set/2]). -define(TBL, lager_config). -define(GLOBAL, '_global'). %% For multiple sinks, the key is now the registered event name and the old key %% as a tuple. %% %% {{lager_event, loglevel}, Value} instead of {loglevel, Value} new() -> %% set up the ETS configuration table _ = try ets:new(?TBL, [named_table, public, set, {keypos, 1}, {read_concurrency, true}]) of _Result -> ok catch error:badarg -> ?INT_LOG(warning, "Table ~p already exists", [?TBL]) end, new_sink(?DEFAULT_SINK), %% Need to be able to find the `lager_handler_watcher' for all handlers ets:insert_new(?TBL, {{?GLOBAL, handlers}, []}), ok. new_sink(Sink) -> %% use insert_new here so that if we're in an appup we don't mess anything up %% %% until lager is completely started, allow all messages to go through ets:insert_new(?TBL, {{Sink, loglevel}, {element(2, lager_util:config_to_mask(debug)), []}}). global_get(Key) -> global_get(Key, undefined). global_get(Key, Default) -> get({?GLOBAL, Key}, Default). global_set(Key, Value) -> set({?GLOBAL, Key}, Value). get({_Sink, _Key}=FullKey) -> get(FullKey, undefined); get(Key) -> get({?DEFAULT_SINK, Key}, undefined). get({Sink, Key}, Default) -> try case ets:lookup(?TBL, {Sink, Key}) of [] -> Default; [{{Sink, Key}, Res}] -> Res end catch _:_ -> Default end; get(Key, Default) -> get({?DEFAULT_SINK, Key}, Default). set({Sink, Key}, Value) -> ets:insert(?TBL, {{Sink, Key}, Value}); set(Key, Value) -> set({?DEFAULT_SINK, Key}, Value). lager-3.8.0/src/error_logger_lager_h.erl0000644000232200023220000010573713523436621020650 0ustar debalancedebalance%% Copyright (c) 2011-2015 Basho Technologies, Inc. All Rights Reserved. %% %% This file is provided to you under the Apache License, %% Version 2.0 (the "License"); you may not use this file %% except in compliance with the License. You may obtain %% a copy of the License at %% %% http://www.apache.org/licenses/LICENSE-2.0 %% %% Unless required by applicable law or agreed to in writing, %% software distributed under the License is distributed on an %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY %% KIND, either express or implied. See the License for the %% specific language governing permissions and limitations %% under the License. %% @doc A error_logger backend for redirecting events into lager. %% Error messages and crash logs are also optionally written to a crash log. %% @see lager_crash_log %% @private -module(error_logger_lager_h). -include("lager.hrl"). -behaviour(gen_event). -export([set_high_water/1]). -export([init/1, handle_call/2, handle_event/2, handle_info/2, terminate/2, code_change/3]). -export([format_reason/1, format_mfa/1, format_args/3]). -record(state, { sink :: atom(), shaper :: lager_shaper(), %% group leader strategy groupleader_strategy :: handle | ignore | mirror, raw :: boolean() }). -define(LOGMSG(Sink, Level, Pid, Msg), case ?SHOULD_LOG(Sink, Level) of true -> _ =lager:log(Sink, Level, Pid, Msg, []), logged; _ -> no_log end). -define(LOGFMT(Sink, Level, Pid, Fmt, Args), case ?SHOULD_LOG(Sink, Level) of true -> _ = lager:log(Sink, Level, Pid, Fmt, Args), logged; _ -> no_log end). -ifdef(TEST). %% Make CRASH synchronous when testing, to avoid timing headaches -define(CRASH_LOG(Event), catch(gen_server:call(lager_crash_log, {log, Event}))). -else. -define(CRASH_LOG(Event), gen_server:cast(lager_crash_log, {log, Event})). -endif. set_high_water(N) -> gen_event:call(error_logger, ?MODULE, {set_high_water, N}, infinity). -spec init(any()) -> {ok, #state{}}. init([HighWaterMark, GlStrategy]) -> Flush = application:get_env(lager, error_logger_flush_queue, true), FlushThr = application:get_env(lager, error_logger_flush_threshold, 0), Shaper = #lager_shaper{hwm=HighWaterMark, flush_queue = Flush, flush_threshold = FlushThr, filter=shaper_fun(), id=?MODULE}, Raw = application:get_env(lager, error_logger_format_raw, false), Sink = configured_sink(), {ok, #state{sink=Sink, shaper=Shaper, groupleader_strategy=GlStrategy, raw=Raw}}. handle_call({set_high_water, N}, #state{shaper=Shaper} = State) -> NewShaper = Shaper#lager_shaper{hwm=N}, {ok, ok, State#state{shaper = NewShaper}}; handle_call(_Request, State) -> {ok, unknown_call, State}. shaper_fun() -> case {application:get_env(lager, suppress_supervisor_start_stop, false), application:get_env(lager, suppress_application_start_stop, false)} of {false, false} -> fun(_) -> false end; {true, true} -> fun suppress_supervisor_start_and_application_start/1; {false, true} -> fun suppress_application_start/1; {true, false} -> fun suppress_supervisor_start/1 end. suppress_supervisor_start_and_application_start(E) -> suppress_supervisor_start(E) orelse suppress_application_start(E). suppress_application_start({info_report, _GL, {_Pid, std_info, D}}) when is_list(D) -> lists:member({exited, stopped}, D); suppress_application_start({info_report, _GL, {_P, progress, D}}) -> lists:keymember(application, 1, D) andalso lists:keymember(started_at, 1, D); suppress_application_start(_) -> false. suppress_supervisor_start({info_report, _GL, {_P, progress, D}}) -> lists:keymember(started, 1, D) andalso lists:keymember(supervisor, 1, D); suppress_supervisor_start(_) -> false. handle_event(Event, #state{sink=Sink, shaper=Shaper} = State) -> case lager_util:check_hwm(Shaper, Event) of {true, 0, NewShaper} -> eval_gl(Event, State#state{shaper=NewShaper}); {true, Drop, #lager_shaper{hwm=Hwm} = NewShaper} when Drop > 0 -> ?LOGFMT(Sink, warning, self(), "lager_error_logger_h dropped ~p messages in the last second that exceeded the limit of ~p messages/sec", [Drop, Hwm]), eval_gl(Event, State#state{shaper=NewShaper}); {false, _, #lager_shaper{dropped=D} = NewShaper} -> {ok, State#state{shaper=NewShaper#lager_shaper{dropped=D+1}}} end. handle_info({shaper_expired, ?MODULE}, #state{sink=Sink, shaper=Shaper} = State) -> case Shaper#lager_shaper.dropped of 0 -> ok; Dropped -> ?LOGFMT(Sink, warning, self(), "lager_error_logger_h dropped ~p messages in the last second that exceeded the limit of ~p messages/sec", [Dropped, Shaper#lager_shaper.hwm]) end, {ok, State#state{shaper=Shaper#lager_shaper{dropped=0, mps=0, lasttime=os:timestamp()}}}; handle_info(_Info, State) -> {ok, State}. terminate(_Reason, _State) -> ok. code_change(_OldVsn, {state, Shaper, GLStrategy}, _Extra) -> Raw = application:get_env(lager, error_logger_format_raw, false), {ok, #state{ sink=configured_sink(), shaper=Shaper, groupleader_strategy=GLStrategy, raw=Raw }}; code_change(_OldVsn, {state, Sink, Shaper, GLS}, _Extra) -> Raw = application:get_env(lager, error_logger_format_raw, false), {ok, #state{sink=Sink, shaper=Shaper, groupleader_strategy=GLS, raw=Raw}}; code_change(_OldVsn, State, _Extra) -> {ok, State}. %% internal functions configured_sink() -> case proplists:get_value(?ERROR_LOGGER_SINK, application:get_env(lager, extra_sinks, [])) of undefined -> ?DEFAULT_SINK; _ -> ?ERROR_LOGGER_SINK end. eval_gl(Event, #state{groupleader_strategy=GlStrategy0}=State) when is_pid(element(2, Event)) -> case element(2, Event) of GL when node(GL) =/= node(), GlStrategy0 =:= ignore -> gen_event:notify({error_logger, node(GL)}, Event), {ok, State}; GL when node(GL) =/= node(), GlStrategy0 =:= mirror -> gen_event:notify({error_logger, node(GL)}, Event), log_event(Event, State); _ -> log_event(Event, State) end; eval_gl(Event, State) -> log_event(Event, State). log_event(Event, #state{sink=Sink} = State) -> DidLog = case Event of {error, _GL, {Pid, Fmt, Args}} -> FormatRaw = State#state.raw, case {FormatRaw, Fmt} of {false, "** Generic server "++_} -> %% gen_server terminate {Reason, Name} = case Args of [N, _Msg, _State, R] -> {R, N}; [N, _Msg, _State, R, _Client] -> %% OTP 20 crash reports where the client pid is dead don't include the stacktrace {R, N}; [N, _Msg, _State, R, _Client, _Stacktrace] -> %% OTP 20 crash reports contain the pid of the client and stacktrace %% TODO do something with them {R, N} end, ?CRASH_LOG(Event), {Md, Formatted} = format_reason_md(Reason), ?LOGFMT(Sink, error, [{pid, Pid}, {name, Name} | Md], "gen_server ~w terminated with reason: ~s", [Name, Formatted]); {false, "** State machine "++_} -> %% Check if the terminated process is gen_fsm or gen_statem %% since they generate the same exit message {Type, Name, StateName, Reason} = case Args of [TName, _Msg, TStateName, _StateData, TReason] -> {gen_fsm, TName, TStateName, TReason}; %% Handle changed logging in gen_fsm stdlib-3.9 (TPid, ClientArgs) [TName, _Msg, TPid, TStateName, _StateData, TReason | _ClientArgs] when is_pid(TPid), is_atom(TStateName) -> {gen_fsm, TName, TStateName, TReason}; %% Handle changed logging in gen_statem stdlib-3.9 (ClientArgs) [TName, _Msg, {TStateName, _StateData}, _ExitType, TReason, _CallbackMode, Stacktrace | _ClientArgs] -> {gen_statem, TName, TStateName, {TReason, Stacktrace}}; [TName, _Msg, [{TStateName, _StateData}], _ExitType, TReason, _CallbackMode, Stacktrace | _ClientArgs] -> %% sometimes gen_statem wraps its statename/data in a list for some reason??? {gen_statem, TName, TStateName, {TReason, Stacktrace}} end, {Md, Formatted} = format_reason_md(Reason), ?CRASH_LOG(Event), ?LOGFMT(Sink, error, [{pid, Pid}, {name, Name} | Md], "~s ~w in state ~w terminated with reason: ~s", [Type, Name, StateName, Formatted]); {false, "** gen_event handler"++_} -> %% gen_event handler terminate [ID, Name, _Msg, _State, Reason] = Args, {Md, Formatted} = format_reason_md(Reason), ?CRASH_LOG(Event), ?LOGFMT(Sink, error, [{pid, Pid}, {name, Name} | Md], "gen_event ~w installed in ~w terminated with reason: ~s", [ID, Name, Formatted]); {false, "** Cowboy handler"++_} -> %% Cowboy HTTP server error ?CRASH_LOG(Event), case Args of [Module, Function, Arity, _Request, _State] -> %% we only get the 5-element list when its a non-exported function ?LOGFMT(Sink, error, Pid, "Cowboy handler ~p terminated with reason: call to undefined function ~p:~p/~p", [Module, Module, Function, Arity]); [Module, Function, Arity, _Class, Reason | Tail] -> %% any other cowboy error_format list *always* ends with the stacktrace StackTrace = lists:last(Tail), {Md, Formatted} = format_reason_md({Reason, StackTrace}), ?LOGFMT(Sink, error, [{pid, Pid} | Md], "Cowboy handler ~p terminated in ~p:~p/~p with reason: ~s", [Module, Module, Function, Arity, Formatted]) end; {false, "Ranch listener "++_} -> %% Ranch errors ?CRASH_LOG(Event), case Args of %% Error logged by cowboy, which starts as ranch error [Ref, ConnectionPid, StreamID, RequestPid, Reason, StackTrace] -> {Md, Formatted} = format_reason_md({Reason, StackTrace}), ?LOGFMT(Sink, error, [{pid, RequestPid} | Md], "Cowboy stream ~p with ranch listener ~p and connection process ~p " "had its request process exit with reason: ~s", [StreamID, Ref, ConnectionPid, Formatted]); [Ref, _Protocol, Worker, {[{reason, Reason}, {mfa, {Module, Function, Arity}}, {stacktrace, StackTrace} | _], _}] -> {Md, Formatted} = format_reason_md({Reason, StackTrace}), ?LOGFMT(Sink, error, [{pid, Worker} | Md], "Ranch listener ~p terminated in ~p:~p/~p with reason: ~s", [Ref, Module, Function, Arity, Formatted]); [Ref, _Protocol, Worker, Reason] -> {Md, Formatted} = format_reason_md(Reason), ?LOGFMT(Sink, error, [{pid, Worker} | Md], "Ranch listener ~p terminated with reason: ~s", [Ref, Formatted]); [Ref, Protocol, Ret] -> %% ranch_conns_sup.erl module line 119-123 has three parameters error msg, log it. {Md, Formatted} = format_reason_md(Ret), ?LOGFMT(Sink, error, [{pid, Protocol} | Md], "Ranch listener ~p terminated with result:~s", [Ref, Formatted]) end; {false, "webmachine error"++_} -> %% Webmachine HTTP server error ?CRASH_LOG(Event), [Path, Error] = Args, %% webmachine likes to mangle the stack, for some reason StackTrace = case Error of {error, {error, Reason, Stack}} -> {Reason, Stack}; _ -> Error end, {Md, Formatted} = format_reason_md(StackTrace), ?LOGFMT(Sink, error, [{pid, Pid} | Md], "Webmachine error at path ~p : ~s", [Path, Formatted]); _ -> ?CRASH_LOG(Event), ?LOGFMT(Sink, error, Pid, Fmt, Args) end; {error_report, _GL, {Pid, std_error, D}} -> ?CRASH_LOG(Event), ?LOGMSG(Sink, error, Pid, print_silly_list(D)); {error_report, _GL, {Pid, supervisor_report, D}} -> ?CRASH_LOG(Event), case lists:sort(D) of [{errorContext, Ctx}, {offender, Off}, {reason, Reason}, {supervisor, Name}] -> Offender = format_offender(Off), {Md, Formatted} = format_reason_md(Reason), ?LOGFMT(Sink, error, [{pid, Pid} | Md], "Supervisor ~w had child ~s exit with reason ~s in context ~w", [supervisor_name(Name), Offender, Formatted, Ctx]); _ -> ?LOGMSG(Sink, error, Pid, "SUPERVISOR REPORT " ++ print_silly_list(D)) end; {error_report, _GL, {Pid, crash_report, [Self, Neighbours]}} -> ?CRASH_LOG(Event), {Md, Formatted} = format_crash_report(Self, Neighbours), ?LOGMSG(Sink, error, [{pid, Pid} | Md], "CRASH REPORT " ++ Formatted); {warning_msg, _GL, {Pid, Fmt, Args}} -> ?LOGFMT(Sink, warning, Pid, Fmt, Args); {warning_report, _GL, {Pid, std_warning, Report}} -> ?LOGMSG(Sink, warning, Pid, print_silly_list(Report)); {info_msg, _GL, {Pid, Fmt, Args}} -> ?LOGFMT(Sink, info, Pid, Fmt, Args); {info_report, _GL, {Pid, std_info, D}} when is_list(D) -> Details = lists:sort(D), case Details of [{application, App}, {exited, Reason}, {type, _Type}] -> case application:get_env(lager, suppress_application_start_stop, false) of true when Reason == stopped -> no_log; _ -> {Md, Formatted} = format_reason_md(Reason), ?LOGFMT(Sink, info, [{pid, Pid} | Md], "Application ~w exited with reason: ~s", [App, Formatted]) end; _ -> ?LOGMSG(Sink, info, Pid, print_silly_list(D)) end; {info_report, _GL, {Pid, std_info, D}} -> ?LOGFMT(Sink, info, Pid, "~w", [D]); {info_report, _GL, {P, progress, D}} -> Details = lists:sort(D), case Details of [{application, App}, {started_at, Node}] -> case application:get_env(lager, suppress_application_start_stop, false) of true -> no_log; _ -> ?LOGFMT(Sink, info, P, "Application ~w started on node ~w", [App, Node]) end; [{started, Started}, {supervisor, Name}] -> case application:get_env(lager, suppress_supervisor_start_stop, false) of true -> no_log; _ -> MFA = format_mfa(get_value(mfargs, Started)), Pid = get_value(pid, Started), ?LOGFMT(Sink, debug, P, "Supervisor ~w started ~s at pid ~w", [supervisor_name(Name), MFA, Pid]) end; _ -> ?LOGMSG(Sink, info, P, "PROGRESS REPORT " ++ print_silly_list(D)) end; _ -> ?LOGFMT(Sink, warning, self(), "Unexpected error_logger event ~w", [Event]) end, case DidLog of logged -> {ok, State}; no_log -> Shaper = State#state.shaper, {ok, State#state{ shaper = Shaper#lager_shaper{ mps = Shaper#lager_shaper.mps - 1 } } } end. format_crash_report(Report, Neighbours) -> Name = case get_value(registered_name, Report, []) of [] -> %% process_info(Pid, registered_name) returns [] for unregistered processes get_value(pid, Report); Atom -> Atom end, Md0 = case get_value(dictionary, Report, []) of [] -> %% process_info(Pid, registered_name) returns [] for unregistered processes []; Dict -> %% pull the lager metadata out of the process dictionary, if we can get_value('_lager_metadata', Dict, []) end, {Class, Reason, Trace} = get_value(error_info, Report), {Md, ReasonStr} = format_reason_md({Reason, Trace}), Type = case Class of exit -> "exited"; _ -> "crashed" end, {Md0 ++ Md, io_lib:format("Process ~w with ~w neighbours ~s with reason: ~s", [Name, length(Neighbours), Type, ReasonStr])}. format_offender(Off) -> case get_value(mfargs, Off) of undefined -> %% supervisor_bridge io_lib:format("at module ~w at ~w", [get_value(mod, Off), get_value(pid, Off)]); MFArgs -> %% regular supervisor {_, MFA} = format_mfa_md(MFArgs), %% In 2014 the error report changed from `name' to %% `id', so try that first. Name = case get_value(id, Off) of undefined -> get_value(name, Off); Id -> Id end, io_lib:format("~p started with ~s at ~w", [Name, MFA, get_value(pid, Off)]) end. %% backwards compatability shim format_reason(Reason) -> element(2, format_reason_md(Reason)). -spec format_reason_md(Stacktrace:: any()) -> {Metadata:: [{atom(), any()}], String :: list()}. format_reason_md({'function not exported', [{M, F, A},MFA|_]}) -> {Md, Formatted} = format_mfa_md(MFA), {_, Formatted2} = format_mfa_md({M, F, length(A)}), {[{reason, 'function not exported'} | Md], ["call to undefined function ", Formatted2, " from ", Formatted]}; format_reason_md({'function not exported', [{M, F, A, _Props},MFA|_]}) -> %% R15 line numbers {Md, Formatted} = format_mfa_md(MFA), {_, Formatted2} = format_mfa_md({M, F, length(A)}), {[{reason, 'function not exported'} | Md], ["call to undefined function ", Formatted2, " from ", Formatted]}; format_reason_md({undef, [MFA|_]}) -> {Md, Formatted} = format_mfa_md(MFA), {[{reason, undef} | Md], ["call to undefined function ", Formatted]}; format_reason_md({bad_return, {_MFA, {'EXIT', Reason}}}) -> format_reason_md(Reason); format_reason_md({bad_return, {MFA, Val}}) -> {Md, Formatted} = format_mfa_md(MFA), {[{reason, bad_return} | Md], ["bad return value ", print_val(Val), " from ", Formatted]}; format_reason_md({bad_return_value, Val}) -> {[{reason, bad_return}], ["bad return value: ", print_val(Val)]}; format_reason_md({{bad_return_value, Val}, MFA}) -> {Md, Formatted} = format_mfa_md(MFA), {[{reason, bad_return_value} | Md], ["bad return value: ", print_val(Val), " in ", Formatted]}; format_reason_md({{badrecord, Record}, [MFA|_]}) -> {Md, Formatted} = format_mfa_md(MFA), {[{reason, badrecord} | Md], ["bad record ", print_val(Record), " in ", Formatted]}; format_reason_md({{case_clause, Val}, [MFA|_]}) -> {Md, Formatted} = format_mfa_md(MFA), {[{reason, case_clause} | Md], ["no case clause matching ", print_val(Val), " in ", Formatted]}; format_reason_md({function_clause, [MFA|_]}) -> {Md, Formatted} = format_mfa_md(MFA), {[{reason, function_clause} | Md], ["no function clause matching ", Formatted]}; format_reason_md({if_clause, [MFA|_]}) -> {Md, Formatted} = format_mfa_md(MFA), {[{reason, if_clause} | Md], ["no true branch found while evaluating if expression in ", Formatted]}; format_reason_md({{try_clause, Val}, [MFA|_]}) -> {Md, Formatted} = format_mfa_md(MFA), {[{reason, try_clause} | Md], ["no try clause matching ", print_val(Val), " in ", Formatted]}; format_reason_md({badarith, [MFA|_]}) -> {Md, Formatted} = format_mfa_md(MFA), {[{reason, badarith} | Md], ["bad arithmetic expression in ", Formatted]}; format_reason_md({{badmatch, Val}, [MFA|_]}) -> {Md, Formatted} = format_mfa_md(MFA), {[{reason, badmatch} | Md], ["no match of right hand value ", print_val(Val), " in ", Formatted]}; format_reason_md({emfile, _Trace}) -> {[{reason, emfile}], "maximum number of file descriptors exhausted, check ulimit -n"}; format_reason_md({system_limit, [{M, F, _}|_] = Trace}) -> Limit = case {M, F} of {erlang, open_port} -> "maximum number of ports exceeded"; {erlang, spawn} -> "maximum number of processes exceeded"; {erlang, spawn_opt} -> "maximum number of processes exceeded"; {erlang, list_to_atom} -> "tried to create an atom larger than 255, or maximum atom count exceeded"; {ets, new} -> "maximum number of ETS tables exceeded"; _ -> {Str, _} = lager_trunc_io:print(Trace, 500), Str end, {[{reason, system_limit}], ["system limit: ", Limit]}; format_reason_md({badarg, [MFA,MFA2|_]}) -> case MFA of {_M, _F, A, _Props} when is_list(A) -> %% R15 line numbers {Md, Formatted} = format_mfa_md(MFA2), {_, Formatted2} = format_mfa_md(MFA), {[{reason, badarg} | Md], ["bad argument in call to ", Formatted2, " in ", Formatted]}; {_M, _F, A} when is_list(A) -> {Md, Formatted} = format_mfa_md(MFA2), {_, Formatted2} = format_mfa_md(MFA), {[{reason, badarg} | Md], ["bad argument in call to ", Formatted2, " in ", Formatted]}; _ -> %% seems to be generated by a bad call to a BIF {Md, Formatted} = format_mfa_md(MFA), {[{reason, badarg} | Md], ["bad argument in ", Formatted]} end; format_reason_md({{badarg, Stack}, _}) -> format_reason_md({badarg, Stack}); format_reason_md({{badarity, {Fun, Args}}, [MFA|_]}) -> {arity, Arity} = lists:keyfind(arity, 1, erlang:fun_info(Fun)), {Md, Formatted} = format_mfa_md(MFA), {[{reason, badarity} | Md], [io_lib:format("fun called with wrong arity of ~w instead of ~w in ", [length(Args), Arity]), Formatted]}; format_reason_md({noproc, MFA}) -> {Md, Formatted} = format_mfa_md(MFA), {[{reason, noproc} | Md], ["no such process or port in call to ", Formatted]}; format_reason_md({{badfun, Term}, [MFA|_]}) -> {Md, Formatted} = format_mfa_md(MFA), {[{reason, badfun} | Md], ["bad function ", print_val(Term), " in ", Formatted]}; format_reason_md({Reason, [{M, F, A}|_]}) when is_atom(M), is_atom(F), is_integer(A) -> {Md, Formatted} = format_reason_md(Reason), {_, Formatted2} = format_mfa_md({M, F, A}), {Md, [Formatted, " in ", Formatted2]}; format_reason_md({Reason, [{M, F, A, Props}|_]}) when is_atom(M), is_atom(F), is_integer(A), is_list(Props) -> %% line numbers {Md, Formatted} = format_reason_md(Reason), {_, Formatted2} = format_mfa_md({M, F, A, Props}), {Md, [Formatted, " in ", Formatted2]}; format_reason_md(Reason) -> {Str, _} = lager_trunc_io:print(Reason, 500), {[], Str}. %% backwards compatability shim format_mfa(MFA) -> element(2, format_mfa_md(MFA)). -spec format_mfa_md(any()) -> {[{atom(), any()}], list()}. format_mfa_md({M, F, A}) when is_list(A) -> {FmtStr, Args} = format_args(A, [], []), {[{module, M}, {function, F}], io_lib:format("~w:~w("++FmtStr++")", [M, F | Args])}; format_mfa_md({M, F, A}) when is_integer(A) -> {[{module, M}, {function, F}], io_lib:format("~w:~w/~w", [M, F, A])}; format_mfa_md({M, F, A, Props}) when is_list(Props) -> case get_value(line, Props) of undefined -> format_mfa_md({M, F, A}); Line -> {Md, Formatted} = format_mfa_md({M, F, A}), {[{line, Line} | Md], [Formatted, io_lib:format(" line ~w", [Line])]} end; format_mfa_md([{M, F, A}| _]) -> %% this kind of weird stacktrace can be generated by a uncaught throw in a gen_server format_mfa_md({M, F, A}); format_mfa_md([{M, F, A, Props}| _]) when is_list(Props) -> %% this kind of weird stacktrace can be generated by a uncaught throw in a gen_server %% TODO we might not always want to print the first MFA we see here, often it is more helpful %% to print a lower one, but it is hard to programatically decide. format_mfa_md({M, F, A, Props}); format_mfa_md(Other) -> {[], io_lib:format("~w", [Other])}. format_args([], FmtAcc, ArgsAcc) -> {string:join(lists:reverse(FmtAcc), ", "), lists:reverse(ArgsAcc)}; format_args([H|T], FmtAcc, ArgsAcc) -> {Str, _} = lager_trunc_io:print(H, 100), format_args(T, ["~s"|FmtAcc], [Str|ArgsAcc]). print_silly_list(L) when is_list(L) -> case lager_stdlib:string_p(L) of true -> lager_trunc_io:format("~s", [L], ?DEFAULT_TRUNCATION); _ -> print_silly_list(L, [], []) end; print_silly_list(L) -> {Str, _} = lager_trunc_io:print(L, ?DEFAULT_TRUNCATION), Str. print_silly_list([], Fmt, Acc) -> lager_trunc_io:format(string:join(lists:reverse(Fmt), ", "), lists:reverse(Acc), ?DEFAULT_TRUNCATION); print_silly_list([{K,V}|T], Fmt, Acc) -> print_silly_list(T, ["~p: ~p" | Fmt], [V, K | Acc]); print_silly_list([H|T], Fmt, Acc) -> print_silly_list(T, ["~p" | Fmt], [H | Acc]). print_val(Val) -> {Str, _} = lager_trunc_io:print(Val, 500), Str. %% @doc Faster than proplists, but with the same API as long as you don't need to %% handle bare atom keys get_value(Key, Value) -> get_value(Key, Value, undefined). get_value(Key, List, Default) -> case lists:keyfind(Key, 1, List) of false -> Default; {Key, Value} -> Value end. supervisor_name({local, Name}) -> Name; supervisor_name(Name) -> Name. -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). no_silent_hwm_drops_test_() -> {timeout, 10000, [ fun() -> error_logger:tty(false), application:load(lager), application:set_env(lager, handlers, [{lager_test_backend, warning}]), application:set_env(lager, error_logger_redirect, true), application:set_env(lager, error_logger_hwm, 5), application:set_env(lager, error_logger_flush_queue, false), application:set_env(lager, suppress_supervisor_start_stop, true), application:set_env(lager, suppress_application_start_stop, true), application:unset_env(lager, crash_log), lager:start(), try {_, _, MS} = os:timestamp(), timer:sleep((1000000 - MS) div 1000 + 1), % start close to the beginning of a new second [error_logger:error_msg("Foo ~p~n", [K]) || K <- lists:seq(1, 15)], wait_for_message("lager_error_logger_h dropped 10 messages in the last second that exceeded the limit of 5 messages/sec", 100, 50), % and once again [error_logger:error_msg("Foo1 ~p~n", [K]) || K <- lists:seq(1, 20)], wait_for_message("lager_error_logger_h dropped 15 messages in the last second that exceeded the limit of 5 messages/sec", 100, 50) after application:stop(lager), application:stop(goldrush), error_logger:tty(true) end end ] }. shaper_does_not_forward_sup_progress_messages_to_info_level_backend_test_() -> {timeout, 10000, [fun() -> error_logger:tty(false), application:load(lager), application:set_env(lager, handlers, [{lager_test_backend, info}]), application:set_env(lager, error_logger_redirect, true), application:set_env(lager, error_logger_hwm, 5), application:set_env(lager, suppress_supervisor_start_stop, false), application:set_env(lager, suppress_application_start_stop, false), application:unset_env(lager, crash_log), lager:start(), try PidPlaceholder = self(), SupervisorMsg = [{supervisor, {PidPlaceholder,rabbit_connection_sup}}, {started, [{pid, PidPlaceholder}, {name,helper_sup}, {mfargs, {rabbit_connection_helper_sup,start_link,[]}}, {restart_type,intrinsic}, {shutdown,infinity}, {child_type,supervisor}]}], ApplicationExit = [{application, error_logger_lager_h_test}, {exited, stopped}, {type, permanent}], error_logger:info_report("This is not a progress message"), error_logger:info_report(ApplicationExit), [error_logger:info_report(progress, SupervisorMsg) || _K <- lists:seq(0, 100)], error_logger:info_report("This is not a progress message 2"), % Note: this gets logged in slow environments: % Application lager started on node nonode@nohost wait_for_count(fun lager_test_backend:count/0, [3, 4], 100, 50), % Note: this debug msg gets ignored in slow environments: % Lager installed handler lager_test_backend into lager_event wait_for_count(fun lager_test_backend:count_ignored/0, [0, 1], 100, 50) after application:stop(lager), application:stop(goldrush), error_logger:tty(true) end end ] }. supressed_messages_are_not_counted_for_hwm_test_() -> {timeout, 10000, [fun() -> error_logger:tty(false), application:load(lager), application:set_env(lager, handlers, [{lager_test_backend, debug}]), application:set_env(lager, error_logger_redirect, true), application:set_env(lager, error_logger_hwm, 5), application:set_env(lager, suppress_supervisor_start_stop, true), application:set_env(lager, suppress_application_start_stop, true), application:unset_env(lager, crash_log), lager:start(), try PidPlaceholder = self(), SupervisorMsg = [{supervisor, {PidPlaceholder,rabbit_connection_sup}}, {started, [{pid, PidPlaceholder}, {name,helper_sup}, {mfargs, {rabbit_connection_helper_sup,start_link,[]}}, {restart_type,intrinsic}, {shutdown,infinity}, {child_type,supervisor}]}], ApplicationExit = [{application, error_logger_lager_h_test}, {exited, stopped}, {type, permanent}], lager_test_backend:flush(), error_logger:info_report("This is not a progress message"), [error_logger:info_report(ApplicationExit) || _K <- lists:seq(0, 100)], [error_logger:info_report(progress, SupervisorMsg) || _K <- lists:seq(0, 100)], error_logger:info_report("This is not a progress message 2"), wait_for_count(fun lager_test_backend:count/0, 2, 100, 50), wait_for_count(fun lager_test_backend:count_ignored/0, 0, 100, 50) after application:stop(lager), application:stop(goldrush), error_logger:tty(true) end end ] }. wait_for_message(Expected, Tries, Sleep) -> maybe_find_expected_message(lager_test_backend:get_buffer(), Expected, Tries, Sleep). maybe_find_expected_message(_Buffer, Expected, 0, _Sleep) -> throw({not_found, Expected}); maybe_find_expected_message([], Expected, Tries, Sleep) -> timer:sleep(Sleep), maybe_find_expected_message(lager_test_backend:get_buffer(), Expected, Tries - 1, Sleep); maybe_find_expected_message([{_Severity, _Date, Msg, _Metadata}|T], Expected, Tries, Sleep) -> case lists:flatten(Msg) of Expected -> ok; _ -> maybe_find_expected_message(T, Expected, Tries, Sleep) end. wait_for_count(Fun, _Expected, 0, _Sleep) -> Actual = Fun(), Msg = io_lib:format("wait_for_count: fun ~p final value: ~p~n", [Fun, Actual]), throw({failed, Msg}); wait_for_count(Fun, Expected, Tries, Sleep) when is_list(Expected) -> Actual = Fun(), case lists:member(Actual, Expected) of true -> ok; false -> timer:sleep(Sleep), wait_for_count(Fun, Expected, Tries - 1, Sleep) end; wait_for_count(Fun, Expected, Tries, Sleep) -> case Fun() of Expected -> ok; _ -> timer:sleep(Sleep), wait_for_count(Fun, Expected, Tries - 1, Sleep) end. -endif. lager-3.8.0/src/lager_manager_killer.erl0000644000232200023220000000335313523436621020614 0ustar debalancedebalance-module(lager_manager_killer). -author("Sungjin Park "). -behavior(gen_event). -export([init/1, handle_event/2, handle_call/2, handle_info/2, terminate/2, code_change/3]). -export([kill_me/0]). -include("lager.hrl"). -record(state, { killer_hwm :: non_neg_integer(), killer_reinstall_after :: non_neg_integer() }). kill_me() -> gen_event:call(lager_event, ?MODULE, kill_self). init([KillerHWM, KillerReinstallAfter]) -> {ok, #state{killer_hwm=KillerHWM, killer_reinstall_after=KillerReinstallAfter}}. handle_call(get_loglevel, State) -> {ok, {mask, ?LOG_NONE}, State}; handle_call({set_loglevel, _Level}, State) -> {ok, ok, State}; handle_call(get_settings, State = #state{killer_hwm=KillerHWM, killer_reinstall_after=KillerReinstallAfter}) -> {ok, [KillerHWM, KillerReinstallAfter], State}; handle_call(kill_self, #state{killer_hwm=KillerHWM, killer_reinstall_after=KillerReinstallAfter}) -> exit({kill_me, [KillerHWM, KillerReinstallAfter]}); handle_call(_Request, State) -> {ok, ok, State}. %% It's not the best idea in the world to check the queue length for every %% log message. We can make this operation work on a poll timer in the %% future. handle_event({log, _Message}, State = #state{killer_hwm=KillerHWM, killer_reinstall_after=KillerReinstallAfter}) -> {message_queue_len, Len} = process_info(self(), message_queue_len), case Len > KillerHWM of true -> exit({kill_me, [KillerHWM, KillerReinstallAfter]}); _ -> {ok, State} end; handle_event(_Event, State) -> {ok, State}. handle_info(_Info, State) -> {ok, State}. terminate(_Reason, _State) -> ok. code_change(_OldVsn, State, _Extra) -> {ok, State}.