akkoma/lib/pleroma/application.ex

337 lines
9.8 KiB
Elixir
Raw Normal View History

# Pleroma: A lightweight social networking server
2022-02-25 23:11:42 -07:00
# Copyright © 2017-2022 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only
2017-03-17 10:09:58 -06:00
defmodule Pleroma.Application do
use Application
2020-02-11 00:12:57 -07:00
import Cachex.Spec
alias Pleroma.Config
2019-12-09 05:11:54 -07:00
require Logger
2017-03-17 10:09:58 -06:00
@name Mix.Project.config()[:name]
2018-11-20 09:55:03 -07:00
@version Mix.Project.config()[:version]
@repository Mix.Project.config()[:source_url]
@mix_env Mix.env()
2019-08-14 09:55:17 -06:00
2018-11-20 09:55:03 -07:00
def name, do: @name
def version, do: @version
def named_version, do: @name <> " " <> @version
def repository, do: @repository
2018-11-20 09:55:03 -07:00
def user_agent do
if Process.whereis(Pleroma.Web.Endpoint) do
case Config.get([:http, :user_agent], :default) do
:default ->
info = "#{Pleroma.Web.Endpoint.url()} <#{Config.get([:instance, :email], "")}>"
named_version() <> "; " <> info
custom ->
custom
end
else
# fallback, if endpoint is not started yet
"Pleroma Data Loader"
end
2018-11-23 09:40:45 -07:00
end
2017-03-17 10:09:58 -06:00
# See http://elixir-lang.org/docs/stable/elixir/Application.html
# for more information on OTP Applications
def start(_type, _args) do
# Scrubbers are compiled at runtime and therefore will cause a conflict
# every time the application is restarted, so we disable module
# conflicts at runtime
Code.compiler_options(ignore_module_conflict: true)
# Disable warnings_as_errors at runtime, it breaks Phoenix live reload
# due to protocol consolidation warnings
Code.compiler_options(warnings_as_errors: false)
Pleroma.Telemetry.Logger.attach()
2020-07-09 09:53:51 -06:00
Config.Holder.save_default()
Pleroma.HTML.compile_scrubbers()
Pleroma.Config.Oban.warn()
2020-02-11 00:12:57 -07:00
Config.DeprecationWarnings.warn()
Pleroma.Web.Plugs.HTTPSecurityPlug.warn_if_disabled()
Pleroma.ApplicationRequirements.verify!()
2019-01-30 08:32:30 -07:00
setup_instrumenters()
2019-12-05 06:18:25 -07:00
load_custom_modules()
Pleroma.Docs.JSON.compile()
limiters_setup()
2019-01-28 04:09:41 -07:00
2020-03-12 09:28:54 -06:00
adapter = Application.get_env(:tesla, :adapter)
if match?({Tesla.Adapter.Finch, _}, adapter) do
Logger.info("Starting Finch")
Finch.start_link(name: MyFinch)
end
2020-03-12 09:28:54 -06:00
if adapter == Tesla.Adapter.Gun do
2020-03-03 23:23:42 -07:00
if version = Pleroma.OTPVersion.version() do
[major, minor] =
version
|> String.split(".")
|> Enum.map(&String.to_integer/1)
|> Enum.take(2)
if (major == 22 and minor < 2) or major < 22 do
raise "
!!!OTP VERSION WARNING!!!
You are using gun adapter with OTP version #{version}, which doesn't support correct handling of unordered certificates chains. Please update your Erlang/OTP to at least 22.2.
2020-03-03 23:23:42 -07:00
"
end
else
raise "
!!!OTP VERSION WARNING!!!
To support correct handling of unordered certificates chains - OTP version must be > 22.2.
"
end
end
2017-03-17 10:09:58 -06:00
# Define workers and child supervisors to be supervised
2018-03-30 07:01:53 -06:00
children =
[
2019-08-14 09:55:17 -06:00
Pleroma.Repo,
2020-02-11 00:12:57 -07:00
Config.TransferTask,
2019-08-14 09:55:17 -06:00
Pleroma.Emoji,
Pleroma.Web.Plugs.RateLimiter.Supervisor,
{Task.Supervisor, name: Pleroma.TaskSupervisor}
2018-03-30 07:01:53 -06:00
] ++
2019-08-14 09:55:17 -06:00
cachex_children() ++
http_children(adapter, @mix_env) ++
[
2019-08-14 09:55:17 -06:00
Pleroma.Stats,
2019-09-26 05:49:57 -06:00
Pleroma.JobQueueMonitor,
{Majic.Pool, [name: Pleroma.MajicPool, pool_size: Config.get([:majic_pool, :size], 2)]},
{Oban, Config.get(Oban)},
Pleroma.Web.Endpoint
] ++
task_children(@mix_env) ++
dont_run_in_test(@mix_env) ++
shout_child(shout_enabled?()) ++
[Pleroma.Gopher.Server]
2017-03-17 10:09:58 -06:00
# See http://elixir-lang.org/docs/stable/elixir/Supervisor.html
# for other strategies and supported options
Allow higher amount of restarts for Pleroma.Repo during testing This was done by floatingghost as part of a bigger commit in Akkoma. See <https://akkoma.dev/AkkomaGang/akkoma/src/commit/37ae047e1652c4089934434ec79f393c4c839122/lib/pleroma/application.ex#L83>. As explained in <https://ihatebeinga.live/objects/860d23e1-dc64-4b07-8b4d-020b9c56cff6> > there are so many caches that clearing them all can nuke the supervisor, which by default will become an hero if it gets more than 3 restarts in <5 seconds And further down the thread > essentially we've got like 11 caches (https://akkoma.dev/AkkomaGang/akkoma/src/commit/37ae047e1652c4089934434ec79f393c4c839122/lib/pleroma/application.ex#L165) > then in test we fetch them all (https://akkoma.dev/AkkomaGang/akkoma/src/branch/develop/test/support/data_case.ex#L50) and call clear on them > so if this clear fails on any 3 of them, the pleroma supervisor itself will die How it fails? > idk maybe cachex dies, maybe :ets does a weird thing > it doesn't log anything, it just consistently dies during cache clearing so i figured it had to be that > honestly my best bet is locksmith and queuing > https://github.com/whitfin/cachex/blob/master/lib/cachex/actions/clear.ex#L26 > clear is thrown into a locksmith transaction > locksmith says > >If the process is already in a transactional context, the provided function will be executed immediately. Otherwise the required keys will be locked until the provided function has finished executing. > so if we get 2 clears too close together, maybe it locks, then doesn't like the next clear?
2022-07-14 05:35:33 -06:00
# If we have a lot of caches, default max_restarts can cause test
# resets to fail.
# Go for the default 3 unless we're in test
max_restarts =
if @mix_env == :test do
100
else
3
end
opts = [strategy: :one_for_one, name: Pleroma.Supervisor, max_restarts: max_restarts]
result = Supervisor.start_link(children, opts)
set_postgres_server_version()
result
end
2020-11-20 08:38:05 -07:00
defp set_postgres_server_version do
version =
with %{rows: [[version]]} <- Ecto.Adapters.SQL.query!(Pleroma.Repo, "show server_version"),
{num, _} <- Float.parse(version) do
num
else
e ->
Logger.warn(
"Could not get the postgres version: #{inspect(e)}.\nSetting the default value of 9.6"
)
9.6
end
:persistent_term.put({Pleroma.Repo, :postgres_version}, version)
2017-03-17 10:09:58 -06:00
end
2019-12-05 06:29:17 -07:00
def load_custom_modules do
2020-02-11 00:12:57 -07:00
dir = Config.get([:modules, :runtime_dir])
2019-12-05 06:18:25 -07:00
if dir && File.exists?(dir) do
dir
2019-12-09 04:23:07 -07:00
|> Pleroma.Utils.compile_dir()
2019-12-05 06:18:25 -07:00
|> case do
{:error, _errors, _warnings} ->
raise "Invalid custom modules"
{:ok, modules, _warnings} ->
if @mix_env != :test do
2019-12-06 03:05:09 -07:00
Enum.each(modules, fn mod ->
2019-12-09 05:11:54 -07:00
Logger.info("Custom module loaded: #{inspect(mod)}")
2019-12-06 03:05:09 -07:00
end)
end
2019-12-05 06:18:25 -07:00
:ok
end
end
end
2019-04-04 11:36:57 -06:00
defp setup_instrumenters do
2019-03-25 02:29:04 -06:00
require Prometheus.Registry
if Application.get_env(:prometheus, Pleroma.Repo.Instrumenter) do
:ok =
:telemetry.attach(
"prometheus-ecto",
[:pleroma, :repo, :query],
&Pleroma.Repo.Instrumenter.handle_event/4,
%{}
)
Pleroma.Repo.Instrumenter.setup()
end
2019-03-25 02:29:04 -06:00
Pleroma.Web.Endpoint.MetricsExporter.setup()
Pleroma.Web.Endpoint.PipelineInstrumenter.setup()
# Note: disabled until prometheus-phx is integrated into prometheus-phoenix:
# Pleroma.Web.Endpoint.Instrumenter.setup()
PrometheusPhx.setup()
2019-03-25 02:29:04 -06:00
end
2019-08-14 09:55:17 -06:00
defp cachex_children do
[
build_cachex("used_captcha", ttl_interval: seconds_valid_interval()),
build_cachex("user", default_ttl: 25_000, ttl_interval: 1000, limit: 2500),
build_cachex("object", default_ttl: 25_000, ttl_interval: 1000, limit: 2500),
build_cachex("rich_media", default_ttl: :timer.minutes(120), limit: 5000),
build_cachex("scrubber", limit: 2500),
build_cachex("scrubber_management", limit: 2500),
2019-09-09 12:53:08 -06:00
build_cachex("idempotency", expiration: idempotency_expiration(), limit: 2500),
2019-08-12 04:13:01 -06:00
build_cachex("web_resp", limit: 2500),
build_cachex("emoji_packs", expiration: emoji_packs_expiration(), limit: 10),
2020-06-14 12:02:57 -06:00
build_cachex("failed_proxy_url", limit: 2500),
build_cachex("banned_urls", default_ttl: :timer.hours(24 * 30), limit: 5_000),
build_cachex("chat_message_id_idempotency_key",
expiration: chat_message_id_idempotency_key_expiration(),
limit: 500_000
),
build_cachex("rel_me", limit: 2500),
build_cachex("host_meta", default_ttl: :timer.minutes(120), limit: 5000)
2019-08-14 09:55:17 -06:00
]
end
2019-08-12 04:13:01 -06:00
defp emoji_packs_expiration,
do: expiration(default: :timer.seconds(5 * 60), interval: :timer.seconds(60))
2019-08-14 09:55:17 -06:00
defp idempotency_expiration,
2019-08-14 12:01:11 -06:00
do: expiration(default: :timer.seconds(6 * 60 * 60), interval: :timer.seconds(60))
2019-08-14 09:55:17 -06:00
defp chat_message_id_idempotency_key_expiration,
do: expiration(default: :timer.minutes(2), interval: :timer.seconds(60))
2019-08-14 09:55:17 -06:00
defp seconds_valid_interval,
2020-02-11 00:12:57 -07:00
do: :timer.seconds(Config.get!([Pleroma.Captcha, :seconds_valid]))
2019-08-14 09:55:17 -06:00
2020-07-03 10:18:08 -06:00
@spec build_cachex(String.t(), keyword()) :: map()
def build_cachex(type, opts),
2019-08-14 12:01:11 -06:00
do: %{
id: String.to_atom("cachex_" <> type),
start: {Cachex, :start_link, [String.to_atom(type <> "_cache"), opts]},
type: :worker
}
2019-08-14 09:55:17 -06:00
defp shout_enabled?, do: Config.get([:shout, :enabled])
2019-08-14 09:55:17 -06:00
defp dont_run_in_test(env) when env in [:test, :benchmark], do: []
2019-08-14 09:55:17 -06:00
defp dont_run_in_test(_) do
2020-05-07 03:13:32 -06:00
[
{Registry,
[
name: Pleroma.Web.Streamer.registry(),
keys: :duplicate,
partitions: System.schedulers_online()
]}
] ++ background_migrators()
end
defp background_migrators do
[
Pleroma.Migrators.HashtagsTableMigrator,
Pleroma.Migrators.ContextObjectsDeletionMigrator
2020-05-07 03:13:32 -06:00
]
end
defp shout_child(true) do
[
2020-08-03 17:34:58 -06:00
Pleroma.Web.ShoutChannel.ShoutChannelState,
{Phoenix.PubSub, [name: Pleroma.PubSub, adapter: Phoenix.PubSub.PG2]}
]
2019-08-14 09:55:17 -06:00
end
defp shout_child(_), do: []
2019-08-14 09:55:17 -06:00
2019-09-17 08:44:52 -06:00
defp task_children(:test) do
[
%{
id: :web_push_init,
start: {Task, :start_link, [&Pleroma.Web.Push.init/0]},
restart: :temporary
}
]
end
defp task_children(_) do
[
%{
id: :web_push_init,
start: {Task, :start_link, [&Pleroma.Web.Push.init/0]},
restart: :temporary
},
%{
id: :internal_fetch_init,
start: {Task, :start_link, [&Pleroma.Web.ActivityPub.InternalFetchActor.init/0]},
restart: :temporary
}
]
end
2020-02-11 00:12:57 -07:00
# start hackney and gun pools in tests
2020-03-12 09:28:54 -06:00
defp http_children(_, :test) do
http_children(Tesla.Adapter.Hackney, nil) ++ http_children(Tesla.Adapter.Gun, nil)
2020-02-11 00:12:57 -07:00
end
2020-03-12 09:28:54 -06:00
defp http_children(Tesla.Adapter.Hackney, _) do
2020-02-11 00:12:57 -07:00
pools = [:federation, :media]
pools =
if Config.get([Pleroma.Upload, :proxy_remote]) do
[:upload | pools]
else
pools
end
for pool <- pools do
options = Config.get([:hackney_pools, pool])
:hackney_pool.child_spec(pool, options)
end
end
defp http_children(Tesla.Adapter.Gun, _) do
2020-05-17 13:16:02 -06:00
Pleroma.Gun.ConnectionPool.children() ++
[{Task, &Pleroma.HTTP.AdapterHelper.Gun.limiter_setup/0}]
end
2020-03-03 02:19:29 -07:00
2020-03-12 09:28:54 -06:00
defp http_children(_, _), do: []
2020-11-20 01:37:01 -07:00
@spec limiters_setup() :: :ok
def limiters_setup do
config = Config.get(ConcurrentLimiter, [])
[Pleroma.Web.RichMedia.Helpers, Pleroma.Web.ActivityPub.MRF.MediaProxyWarmingPolicy]
|> Enum.each(fn module ->
mod_config = Keyword.get(config, module, [])
max_running = Keyword.get(mod_config, :max_running, 5)
max_waiting = Keyword.get(mod_config, :max_waiting, 5)
ConcurrentLimiter.new(module, max_running, max_waiting)
end)
end
2017-03-17 10:09:58 -06:00
end