mirror of
https://github.com/discourse/discourse.git
synced 2026-03-03 23:54:20 +08:00
159 lines
5.1 KiB
Ruby
159 lines
5.1 KiB
Ruby
# frozen_string_literal: true
|
|
|
|
discourse_path = File.expand_path(File.expand_path(File.dirname(__FILE__)) + "/../")
|
|
enable_logstash_logger = ENV["ENABLE_LOGSTASH_LOGGER"] == "1"
|
|
unicorn_stderr_path = "#{discourse_path}/log/unicorn.stderr.log"
|
|
|
|
if enable_logstash_logger
|
|
require_relative "../lib/discourse_logstash_logger"
|
|
require_relative "../lib/pitchfork_logstash_patch"
|
|
FileUtils.touch(unicorn_stderr_path) if !File.exist?(unicorn_stderr_path)
|
|
logger DiscourseLogstashLogger.logger(
|
|
logdev: unicorn_stderr_path,
|
|
type: :unicorn,
|
|
customize_event: lambda { |event| event["@timestamp"] = ::Time.now.utc },
|
|
)
|
|
else
|
|
logger Logger.new(STDOUT)
|
|
end
|
|
|
|
worker_processes (ENV["UNICORN_WORKERS"] || 3).to_i
|
|
|
|
# stree-ignore
|
|
listen ENV["UNICORN_LISTENER"] || "#{(ENV["UNICORN_BIND_ALL"] ? "" : "127.0.0.1:")}#{(ENV["UNICORN_PORT"] || 3000).to_i}", reuseport: true
|
|
|
|
if ENV["RAILS_ENV"] == "production"
|
|
# nuke workers after 30 seconds instead of 20 seconds (the default)
|
|
timeout 30
|
|
else
|
|
# we want a longer timeout in dev cause first request can be really slow
|
|
timeout(ENV["UNICORN_TIMEOUT"] && ENV["UNICORN_TIMEOUT"].to_i || 60)
|
|
end
|
|
|
|
# On some really constrained environments, the mold process can take a long
|
|
# time to spawn workers. This is a safety valve to prevent the mold process
|
|
# from giving up too early.
|
|
spawn_timeout(Integer(ENV["APP_SERVER_SPAWN_TIMEOUT"], exception: false) || 60)
|
|
|
|
check_client_connection false
|
|
|
|
if ENV["RAILS_ENV"] != "production"
|
|
# Pitchfork defaults to setpgid true, which moves workers into their own process group.
|
|
# This prevents interactive debuggers (binding.pry, etc.) from reading STDIN.
|
|
setpgid false
|
|
end
|
|
|
|
before_fork do |server|
|
|
Discourse.redis.close
|
|
|
|
throttle_time = Float(ENV["APP_SERVER_FORK_THROTTLE"], exception: false) || 1
|
|
sleep(throttle_time) if !Rails.env.development?
|
|
end
|
|
|
|
after_mold_fork do |server, mold|
|
|
if mold.generation.zero?
|
|
Discourse.preload_rails!
|
|
|
|
supervisor = ENV["UNICORN_SUPERVISOR_PID"].to_i
|
|
|
|
if supervisor > 0
|
|
Thread.new do
|
|
while true
|
|
unless File.exist?("/proc/#{supervisor}")
|
|
server.logger.error "Kill self, supervisor is gone"
|
|
Process.kill "TERM", Process.pid
|
|
end
|
|
sleep 2
|
|
end
|
|
end
|
|
end
|
|
end
|
|
|
|
Discourse.redis.close
|
|
Discourse.before_fork
|
|
end
|
|
|
|
oob_gc_enabled = ENV["DISCOURSE_DISABLE_MAJOR_GC_DURING_REQUESTS"] && RUBY_VERSION >= "3.4"
|
|
|
|
after_worker_fork do |server, worker|
|
|
DiscourseEvent.trigger(:web_fork_started)
|
|
Discourse.after_fork
|
|
SignalTrapLogger.instance.after_fork
|
|
|
|
GC.config(rgengc_allow_full_mark: false) if oob_gc_enabled
|
|
end
|
|
|
|
if oob_gc_enabled
|
|
after_request_complete do |_server, _worker, _rack_env|
|
|
GC.start if GC.latest_gc_info(:need_major_by)
|
|
end
|
|
end
|
|
|
|
before_service_worker_ready do |server, service_worker|
|
|
sidekiqs = ENV["UNICORN_SIDEKIQS"].to_i
|
|
|
|
if sidekiqs > 0
|
|
server.logger.info "starting #{sidekiqs} supervised sidekiqs"
|
|
|
|
require "demon/sidekiq"
|
|
Demon::Sidekiq.after_fork { DiscourseEvent.trigger(:sidekiq_fork_started) }
|
|
Demon::Sidekiq.start(sidekiqs, logger: server.logger)
|
|
|
|
if Discourse.enable_sidekiq_logging?
|
|
# Trap USR1, so we can re-issue to sidekiq workers
|
|
# but chain the default unicorn implementation as well
|
|
old_handler =
|
|
Signal.trap("USR1") do
|
|
old_handler.call
|
|
|
|
# We have seen Sidekiq processes getting stuck in production sporadically when log rotation happens.
|
|
# The cause is currently unknown but we suspect that it is related to the Unicorn master process and
|
|
# Sidekiq demon processes reopening logs at the same time as we noticed that Unicorn worker processes only
|
|
# reopen logs after the Unicorn master process is done. To workaround the problem, we are adding an arbitrary
|
|
# delay of 1 second to Sidekiq's log reopeing procedure. The 1 second delay should be
|
|
# more than enough for the Unicorn master process to finish reopening logs.
|
|
Demon::Sidekiq.kill("USR2")
|
|
end
|
|
end
|
|
end
|
|
|
|
DiscoursePluginRegistry.demon_processes.each do |demon_class|
|
|
server.logger.info "starting #{demon_class.prefix} demon"
|
|
demon_class.start(1, logger: server.logger)
|
|
end
|
|
|
|
Thread.new do
|
|
while true
|
|
begin
|
|
sleep 60
|
|
|
|
if sidekiqs > 0
|
|
Demon::Sidekiq.ensure_running
|
|
Demon::Sidekiq.heartbeat_check
|
|
Demon::Sidekiq.rss_memory_check
|
|
end
|
|
|
|
DiscoursePluginRegistry.demon_processes.each { |demon_class| demon_class.ensure_running }
|
|
rescue => e
|
|
Rails.logger.warn(
|
|
"Error in demon processes heartbeat check: #{e}\n#{e.backtrace.join("\n")}",
|
|
)
|
|
end
|
|
end
|
|
end
|
|
end
|
|
|
|
after_worker_timeout do |server, worker, timeout_info|
|
|
message = <<~MSG
|
|
Pitchfork worker is about to timeout, dumping backtrace for main thread
|
|
#{timeout_info.thread.backtrace&.join("\n")}
|
|
MSG
|
|
|
|
Rails.logger.error(message)
|
|
end
|
|
|
|
if RUBY_PLATFORM.include?("darwin") && ENV["RAILS_ENV"] != "production"
|
|
# macOS doesn't support the default :SOCK_SEQPACKET
|
|
# So we override it to avoid the warning
|
|
Pitchfork.instance_variable_set(:@socket_type, :SOCK_STREAM)
|
|
end
|