mongodb之mongod啓動,各核心類之間的關係
目錄
類ServiceEntryPointMongod負責處理客戶端的命令
mongodb核心類 ServiceStateMachine
ServiceStateMachine之接收信息_sourceMessage
ServiceStateMachine之處理消息_processMessage
mongodb核心類:任務調度Executor之ServiceExecutorReserved
本文主要分析了mongodb啓動過程,分析了網絡層,任務線程池,服務點,服務狀態機,客戶端會話的初始化與執行過程。故意忽略了mongos代理,複製集,分片等核心內容。
- TransportLayerASIO:異步IO接收客戶端請求並收發包
- ServiceStateMachine:維護一個客戶端會話的狀態:接收包,處理包,發送包。其中處理包調用ServiceEntryPointMongod來處理,接收和發送包調用TransportLayerASIO來處理。 同時使用ServiceExecutorReserved中的線程來執行以上三種任務。
- ServiceEntryPoint主要完成客戶端命令執行。支持各種指令。
main函數位置
src/mongo/db/mongod_main.cpp
int mongod_main(int argc, char* argv[]) {
//禁止多線程
ThreadSafetyContext::getThreadSafetyContext()->forbidMultiThreading();
registerShutdownTask(shutdownTask);
//信號處理
setupSignalHandlers();
srand(static_cast<unsigned>(curTimeMicros64()));
Status status = mongo::runGlobalInitializers(std::vector<std::string>(argv, argv + argc));
//創建了最最核心的對象ServiceContext對象,並把它設置在全局變量裏
auto* service = [] {
try {
auto serviceContextHolder = ServiceContext::make();
auto* serviceContext = serviceContextHolder.get();
setGlobalServiceContext(std::move(serviceContextHolder));
return serviceContext;
} catch (...) {
auto cause = exceptionToStatus();
quickExit(EXIT_FAILURE);
}
}();
setUpCollectionShardingState(service);
setUpCatalog(service);
setUpReplication(service);
setUpObservers(service);
//能提供的服務,也是說支持的命令,是ServiceEntryPointMongod類來提供
service->setServiceEntryPoint(std::make_unique<ServiceEntryPointMongod>(service));
startSignalProcessingThread();
//ReadWrite Concern
ReadWriteConcernDefaults::create(service, readWriteConcernDefaultsCacheLookupMongoD);
//初始化並監聽端口
ExitCode exitCode = initAndListen(service, serverGlobalParams.port);
exitCleanly(exitCode);
return 0;
}
src/mongo/db/mongod_main.cpp:main函數中的initAndListen直接調用了_initAndListen(service, listenPort);
監聽網絡端口,初始化ServiceEntryPoint, TransportLayer。其中ServiceEntryPoint->start()是啓動多個線程。TransportLayer->start()是開始接收請求
ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) {
Client::initThread("initandlisten"); //初始化了一個Client,但是沒有SessionHandle
initWireSpec();
//調用消耗小但是不精確的時鐘。這裏只精確到10ms
serviceContext->setFastClockSource(FastClockSourceFactory::create(Milliseconds(10)));
DBDirectClientFactory::get(serviceContext).registerImplementation([](OperationContext* opCtx) {
return std::unique_ptr<DBClientBase>(new DBDirectClient(opCtx));
});
const repl::ReplSettings& replSettings =
repl::ReplicationCoordinator::get(serviceContext)->getSettings();
//真正提供服務的類ServiceEntryPointMongod
serviceContext->setServiceEntryPoint(std::make_unique<ServiceEntryPointMongod>(serviceContext));
//操作上下文
auto startupOpCtx = serviceContext->makeOperationContext(&cc());
//週期運行的方法
auto runner = makePeriodicRunner(serviceContext);
serviceContext->setPeriodicRunner(std::move(runner));
//啓用線程池
OCSPManager::get()->startThreadPool();
if (!storageGlobalParams.repair) { //不是啓用維護模式,那就打開端口
auto tl =
transport::TransportLayerManager::createWithConfig(&serverGlobalParams, serviceContext);
auto res = tl->setup(); //這裏監聽端口
if (!res.isOK()) {
return EXIT_NET_ERROR;
}
serviceContext->setTransportLayer(std::move(tl));
}
FlowControl::set(serviceContext,
std::make_unique<FlowControl>(
serviceContext,
repl::ReplicationCoordinator::get(serviceContext)));
initializeStorageEngine(serviceContext, StorageEngineInitFlags::kNone);
StorageControl::startStorageControls(serviceContext);
initializeSNMP();
startWatchdog(serviceContext);
//任務調度器來調度每個請求
auto start = serviceContext->getServiceExecutor()->start();
//所有服務點開始工作,同時啓用executor線程:std::unique_ptr<transport::ServiceExecutorReserved> _adminInternalPool;
start = serviceContext->getServiceEntryPoint()->start();
//網絡傳輸層開始接受客戶端
start = serviceContext->getTransportLayer()->start();
serviceContext->notifyStartupComplete();
return waitForShutdown();
}
疑問:爲什麼在mongod_main中 service->setServiceEntryPoint(std::make_unique<ServiceEntryPointMongod>
在_initAndListen中也有:serviceContext->setServiceEntryPoint(std::make_unique<ServiceEntryPointMongod>(serviceContext));。 設置了兩遍?
使用類mongo::transport::TransportLayerASIO來監聽端口,接受客戶端連接,並收發數據。直接使用的異步IO.同時這裏傳入ServiceEntryPoint也就是ServiceEntryPointMongod對象sep給TransportLatyerASIO, 最終會收發數據並調用sep->handleRequest()
//_initAndListen中auto tl =
transport::TransportLayerManager::createWithConfig(&serverGlobalParams, serviceContext);是如下實現:
std::unique_ptr<TransportLayer> TransportLayerManager::createWithConfig(
const ServerGlobalParams* config, ServiceContext* ctx) {
std::unique_ptr<TransportLayer> transportLayer;
auto sep = ctx->getServiceEntryPoint(); //服務點
transport::TransportLayerASIO::Options opts(config);
//直接使用異步IO
auto transportLayerASIO = std::make_unique<transport::TransportLayerASIO>(opts, sep); //創建傳輸層,並在setUp方法中監聽端口。 這裏傳入sep,用來處理客戶端命令sep->handleRequest
transportLayer = std::move(transportLayerASIO);
std::vector<std::unique_ptr<TransportLayer>> retVector;
retVector.emplace_back(std::move(transportLayer));
return std::make_unique<TransportLayerManager>(std::move(retVector));
}
mongodb核心類:TransportLayerASIO
監聽端口,接收請求,創建ASIOSession來處理請求
Status TransportLayerASIO::start() {
stdx::unique_lock lk(_mutex);
// Make sure we haven't shutdown already
invariant(!_isShutdown);
if (_listenerOptions.isIngress()) {
//這裏runnListener就是接收客戶端請求
_listener.thread = stdx::thread([this] { _runListener(); });
_listener.cv.wait(lk, [&] { return _isShutdown || _listener.active; });
return Status::OK();
}
invariant(_acceptors.empty());
return Status::OK();
}
void TransportLayerASIO::_runListener() noexcept {
setThreadName("listener");
stdx::unique_lock lk(_mutex);
if (_isShutdown) {
return;
}
for (auto& acceptor : _acceptors) {
asio::error_code ec;
acceptor.second.listen(serverGlobalParams.listenBacklog, ec); //監聽端口
_acceptConnection(acceptor.second); //接受請求,並處理客戶端數據
LOGV2(23015, "Listening on", "address"_attr = acceptor.first.getAddr());
}
const char* ssl = "off";
#ifdef MONGO_CONFIG_SSL
if (_sslMode() != SSLParams::SSLMode_disabled) {
ssl = "on";
}
#endif
LOGV2(23016, "Waiting for connections", "port"_attr = _listenerPort, "ssl"_attr = ssl);
_listener.active = true;
_listener.cv.notify_all();
ON_BLOCK_EXIT([&] {
_listener.active = false;
_listener.cv.notify_all();
});
while (!_isShutdown) {
lk.unlock();
_acceptorReactor->run(); //循環運行
lk.lock();
}
}
void TransportLayerASIO::_acceptConnection(GenericAcceptor& acceptor) {
//接收到請求的回調函數,在_acceptorReactor->run(); //循環運行 這裏會回調過來
auto acceptCb = [this, &acceptor](const std::error_code& ec, GenericSocket peerSocket) mutable {
if (auto lk = stdx::lock_guard(_mutex); _isShutdown) {
return;
}
if (ec) {
_acceptConnection(acceptor);
return;
}
try {
//創建並開始一個新session,這裏一個session代表了一個客戶端
std::shared_ptr<ASIOSession> session(
new ASIOSession(this, std::move(peerSocket), true)); //session引用TransportLayerASIO
_sep->startSession(std::move(session)); //這裏_sep就是initAndListen裏傳入的ServiceEntryPointMongod
} catch (const DBException& e) {
}
//重新開始接收連接,構成循環,也就是當接收一個連接,讓executor去掉度,又把自己註冊到異步IOService中去。
_acceptConnection(acceptor);
}; //這裏還是在回調函數裏
//異步接收,當收到連接後,會調用acceptCb
acceptor.async_accept(*_ingressReactor, std::move(acceptCb));
}
類ServiceEntryPointMongod負責處理客戶端的命令
TransportLayerASIO在接受客戶端連接後,創建了ASIOSession,並交由ServiceEntryPointMongod負責start, ServiceEntryPointMongod繼承了ServiceEntryPointImpl,所以最後使用ServiceEntryPointImpl->StartSession()來處理這個session. 在StartSession中,創建了ServiceStateMachine狀態機來處理客戶端連接
最終使用ServiceEntryPointCommon::handleRequest(opCtx, msg, Hooks{})來處理命令,並返回處理結果。DBResponse
class ServiceEntryPointImpl : public ServiceEntryPoint {
//不能拷貝
ServiceEntryPointImpl(const ServiceEntryPointImpl&) = delete;
ServiceEntryPointImpl& operator=(const ServiceEntryPointImpl&) = delete;
public:
/*explicit*/ ServiceEntryPointImpl(ServiceContext* svcCtx);
//開始一個session, TransportLayerASIO裏,_acceptConnection後,會創建ASIOSession並調用此函數來開始收發數據
void startSession(transport::SessionHandle session) override;
void endAllSessions(transport::Session::TagMask tags) final;
Status start() final;
bool shutdown(Milliseconds timeout) final;
void appendStats(BSONObjBuilder* bob) const final;
size_t numOpenSessions() const final {
return _currentConnections.load();
}
private:
//ServiceStateMachine控制當前連接狀態:接收,處理,發送
using SSMList = std::list<std::shared_ptr<ServiceStateMachine>>;
using SSMListIterator = SSMList::iterator;
ServiceContext* const _svcCtx; //服務上下文,代表整個服務
AtomicWord<std::size_t> _nWorkers;
//會話鎖
mutable Mutex _sessionsMutex =
MONGO_MAKE_LATCH(HierarchicalAcquisitionLevel(0), "ServiceEntryPointImpl::_sessionsMutex");
stdx::condition_variable _shutdownCondition;
SSMList _sessions; //所有的會話,也就是一個個ServiceStateMachine
size_t _maxNumConnections{DEFAULT_MAX_CONN};
AtomicWord<size_t> _currentConnections{0};
AtomicWord<size_t> _createdConnections{0};
std::unique_ptr<transport::ServiceExecutorReserved> _adminInternalPool;
};
//開始一次會話
void ServiceEntryPointImpl::startSession(transport::SessionHandle session) {
// Setup the restriction environment on the Session, if the Session has local/remote Sockaddrs
const auto& remoteAddr = session->remoteAddr();
const auto& localAddr = session->localAddr();
invariant(remoteAddr.isValid() && localAddr.isValid());
auto restrictionEnvironment = std::make_unique<RestrictionEnvironment>(remoteAddr, localAddr);
RestrictionEnvironment::set(session, std::move(restrictionEnvironment));
SSMListIterator ssmIt;
const bool quiet = serverGlobalParams.quiet.load();
size_t connectionCount;
auto transportMode = _svcCtx->getServiceExecutor()->transportMode();
//創建一個ServiceStatmeMachine,並添加到_sessions list中
auto ssm = ServiceStateMachine::create(_svcCtx, session, transportMode);
auto usingMaxConnOverride = false;
{
stdx::lock_guard<decltype(_sessionsMutex)> lk(_sessionsMutex);
connectionCount = _sessions.size() + 1;
if (connectionCount > _maxNumConnections) {
usingMaxConnOverride =
shouldOverrideMaxConns(session, serverGlobalParams.maxConnsOverride);
}
if (connectionCount <= _maxNumConnections || usingMaxConnOverride) {
ssmIt = _sessions.emplace(_sessions.begin(), ssm);
_currentConnections.store(connectionCount);
_createdConnections.addAndFetch(1);
}
}
// Checking if we successfully added a connection above. Separated from the lock so we don't log
// while holding it.
if (connectionCount > _maxNumConnections && !usingMaxConnOverride) {
if (!quiet) {
LOGV2(22942,
"connection refused because too many open connections",
"connectionCount"_attr = connectionCount);
}
return;
} else if (usingMaxConnOverride && _adminInternalPool) {
//選擇一個線程來執行這個狀態機
ssm->setServiceExecutor(_adminInternalPool.get());
}
if (!quiet) {
LOGV2(22943,
"connection accepted",
"remote"_attr = session->remote(),
"sessionId"_attr = session->id(),
"connectionCount"_attr = connectionCount);
}
//狀態機執行完後清理
ssm->setCleanupHook([this, ssmIt, quiet, session = std::move(session)] {
size_t connectionCount;
auto remote = session->remote();
{
stdx::lock_guard<decltype(_sessionsMutex)> lk(_sessionsMutex);
_sessions.erase(ssmIt);
connectionCount = _sessions.size();
_currentConnections.store(connectionCount);
}
_shutdownCondition.notify_one();
});
auto ownership = ServiceStateMachine::Ownership::kOwned;
if (transportMode == transport::Mode::kSynchronous) {
ownership = ServiceStateMachine::Ownership::kStatic;
}
//狀態機開始運行,從這裏開始,接收,處理,發送都由狀態機來控制
ssm->start(ownership);
}
namespace mongo {
class ServiceEntryPointMongod final : public ServiceEntryPointImpl {
ServiceEntryPointMongod(const ServiceEntryPointMongod&) = delete;
ServiceEntryPointMongod& operator=(const ServiceEntryPointMongod&) = delete;
public:
using ServiceEntryPointImpl::ServiceEntryPointImpl;
DbResponse handleRequest(OperationContext* opCtx, const Message& request) override;
private:
class Hooks;
};
DbResponse ServiceEntryPointMongod::handleRequest(OperationContext* opCtx, const Message& m) {
//最終使用ServiceEntryPointCommon類的handleRequest來處理
return ServiceEntryPointCommon::handleRequest(opCtx, m, Hooks{});
}
//處理請求,返回結果
DbResponse ServiceEntryPointCommon::handleRequest(OperationContext* opCtx,
const Message& m,
const Hooks& behaviors);
}
mongodb核心類 ServiceStateMachine
class ServiceStateMachine : public std::enable_shared_from_this<ServiceStateMachine>;
//所有狀態如下:
/*
* Any state may transition to EndSession in case of an error, otherwise the valid state
* transitions are:
* Source -> SourceWait -> Process -> SinkWait -> Source (standard RPC)
* Source -> SourceWait -> Process -> SinkWait -> Process -> SinkWait ... (exhaust)
* Source -> SourceWait -> Process -> Source (fire-and-forget)
*/
enum class State {
Created, // The session has been created, but no operations have been performed yet
Source, // Request a new Message from the network to handle
SourceWait, // Wait for the new Message to arrive from the network
Process, // Run the Message through the database
SinkWait, // Wait for the database result to be sent by the network
EndSession, // End the session - the ServiceStateMachine will be invalid after this
Ended // The session has ended. It is illegal to call any method besides
// state() if this is the current state.
};
void ServiceStateMachine::start(Ownership ownershipModel) {
_scheduleNextWithGuard(ThreadGuard(this),
transport::ServiceExecutor::kEmptyFlags,
transport::ServiceExecutorTaskName::kSSMStartSession,
ownershipModel);
}
void ServiceStateMachine::_scheduleNextWithGuard(ThreadGuard guard,
transport::ServiceExecutor::ScheduleFlags flags,
transport::ServiceExecutorTaskName taskName,
Ownership ownershipModel) {
auto func = [ssm = shared_from_this(), ownershipModel] {
ThreadGuard guard(ssm.get());
if (ownershipModel == Ownership::kStatic)
guard.markStaticOwnership();
ssm->_runNextInGuard(std::move(guard)); //線程函數,就是運行下一個函數
};
guard.release();
//調度這個func去執行
Status status = _serviceExecutor->schedule(std::move(func), flags, taskName);
if (status.isOK()) {
return;
}
}
//狀態機狀態跳轉核心
void ServiceStateMachine::_runNextInGuard(ThreadGuard guard) {
auto curState = state();
dassert(curState != State::Ended);
// If this is the first run of the SSM, then update its state to Source
if (curState == State::Created) {
curState = State::Source;
_state.store(curState);
}
// Destroy the opCtx (already killed) here, to potentially use the delay between clients'
// requests to hide the destruction cost.
if (MONGO_likely(_killedOpCtx)) {
_killedOpCtx.reset();
}
// Make sure the current Client got set correctly
dassert(Client::getCurrent() == _dbClientPtr);
try {
switch (curState) {
case State::Source:
_sourceMessage(std::move(guard));
break;
case State::Process:
_processMessage(std::move(guard));
break;
case State::EndSession:
_cleanupSession(std::move(guard));
break;
default:
MONGO_UNREACHABLE;
}
return;
} catch (const DBException& e) {
}
if (!guard) {
guard = ThreadGuard(this);
}
_state.store(State::EndSession);
_cleanupSession(std::move(guard));
}
ServiceStateMachine之接收信息_sourceMessage
還是調用了session的接收,其實就是ASIOSession的接收
void ServiceStateMachine::_sourceMessage(ThreadGuard guard) {
invariant(_inMessage.empty());
invariant(_state.load() == State::Source);
_state.store(State::SourceWait);
guard.release();
auto sourceMsgImpl = [&] {
if (_transportMode == transport::Mode::kSynchronous) {
MONGO_IDLE_THREAD_BLOCK;
return Future<Message>::makeReady(_session()->sourceMessage());
} else { //異步模式走這裏,異步接收
invariant(_transportMode == transport::Mode::kAsynchronous);
return _session()->asyncSourceMessage();
}
};
//這裏調用上報lambda函數,並註冊了接收成功的新的一個lambda函數,等待異步回調
sourceMsgImpl().getAsync([this](StatusWith<Message> msg) {
if (msg.isOK()) {
_inMessage = std::move(msg.getValue());
invariant(!_inMessage.empty());
}
_sourceCallback(msg.getStatus()); //收到消息就進入下一個狀態,處理消息
});
}
ServiceStateMachine之處理消息_processMessage
處理請求,並異步發送。
void ServiceStateMachine::_processMessage(ThreadGuard guard) {
invariant(!_inMessage.empty());
TrafficRecorder::get(_serviceContext)
.observe(_sessionHandle, _serviceContext->getPreciseClockSource()->now(), _inMessage);
//解壓縮消息
auto& compressorMgr = MessageCompressorManager::forSession(_session());
_compressorId = boost::none;
if (_inMessage.operation() == dbCompressed) {
MessageCompressorId compressorId;
auto swm = compressorMgr.decompressMessage(_inMessage, &compressorId);
uassertStatusOK(swm.getStatus());
_inMessage = swm.getValue();
_compressorId = compressorId;
}
networkCounter.hitLogicalIn(_inMessage.size());
//創建了新的操作上下文
// Pass sourced Message to handler to generate response.
auto opCtx = Client::getCurrent()->makeOperationContext();
if (_inExhaust) {
opCtx->markKillOnClientDisconnect();
}
//這裏來處理請求,實際上是使用的ServiceEntryPointMongod的handleRequest
DbResponse dbresponse = _sep->handleRequest(opCtx.get(), _inMessage);
_serviceContext->killAndDelistOperation(opCtx.get(), ErrorCodes::OperationIsKilledAndDelisted);
invariant(!_killedOpCtx);
_killedOpCtx = std::move(opCtx);
//構建一個響應包
Message& toSink = dbresponse.response;
if (!toSink.empty()) { //空的
invariant(!OpMsg::isFlagSet(_inMessage, OpMsg::kMoreToCome));
invariant(!OpMsg::isFlagSet(toSink, OpMsg::kChecksumPresent));
// Update the header for the response message.
toSink.header().setId(nextMessageId());
toSink.header().setResponseToMsgId(_inMessage.header().getId());
if (OpMsg::isFlagSet(_inMessage, OpMsg::kChecksumPresent)) {
#ifdef MONGO_CONFIG_SSL
if (!SSLPeerInfo::forSession(_session()).isTLS) {
OpMsg::appendChecksum(&toSink);
}
#else
OpMsg::appendChecksum(&toSink);
#endif
}
// If the incoming message has the exhaust flag set, then we bypass the normal RPC behavior.
// We will sink the response to the network, but we also synthesize a new request, as if we
// sourced a new message from the network. This new request is sent to the database once
// again to be processed. This cycle repeats as long as the command indicates the exhaust
// stream should continue.
_inMessage = makeExhaustMessage(_inMessage, &dbresponse);
_inExhaust = !_inMessage.empty();
networkCounter.hitLogicalOut(toSink.size());
if (_compressorId) {
auto swm = compressorMgr.compressMessage(toSink, &_compressorId.value());
uassertStatusOK(swm.getStatus());
toSink = swm.getValue();
}
TrafficRecorder::get(_serviceContext)
.observe(_sessionHandle, _serviceContext->getPreciseClockSource()->now(), toSink);
//發送響應包
_sinkMessage(std::move(guard), std::move(toSink));
} else {
_state.store(State::Source);
_inMessage.reset();
_inExhaust = false;
//安排異步,進入接收消息狀態
return _scheduleNextWithGuard(std::move(guard),
ServiceExecutor::kDeferredTask,
transport::ServiceExecutorTaskName::kSSMSourceMessage);
}
}
mongodb核心類:任務調度Executor之ServiceExecutorReserved
這是預留的executor,使用了線程池。調度任務就是在這裏調度
Status ServiceExecutorReserved::schedule(Task task,
ScheduleFlags flags,
ServiceExecutorTaskName taskName) {
if (!_stillRunning.load()) {
return Status{ErrorCodes::ShutdownInProgress, "Executor is not running"};
stdx::lock_guard<Latch> lk(_mutex);
_readyTasks.push_back(std::move(task));
_threadWakeup.notify_one(); //喚醒一個線程來執行任務
return Status::OK();
}
Status ServiceExecutorReserved::_startWorker() {
return launchServiceWorkerThread([this] { //lambda是線程函數
stdx::unique_lock<Latch> lk(_mutex);
_numRunningWorkerThreads.addAndFetch(1);
auto numRunningGuard = makeGuard([&] {
_numRunningWorkerThreads.subtractAndFetch(1);
_shutdownCondition.notify_one();
});
_numStartingThreads--;
_numReadyThreads++;
while (_stillRunning.load()) { //死循環
//沒有任務的時候卡在這裏,上邊的_threadWakeup.notify_one();喚醒
_threadWakeup.wait(lk, [&] { return (!_stillRunning.load() || !_readyTasks.empty()); });
if (!_stillRunning.loadRelaxed()) {
break;
}
if (_readyTasks.empty()) {
continue;
}
auto task = std::move(_readyTasks.front()); //拿出任務
_readyTasks.pop_front();
_numReadyThreads -= 1;
bool launchReplacement = false;
if (_numReadyThreads + _numStartingThreads < _reservedThreads) {
_numStartingThreads++;
launchReplacement = true;
}
lk.unlock();
//從全局隊列放入本地隊列
_localWorkQueue.emplace_back(std::move(task));
while (!_localWorkQueue.empty() && _stillRunning.loadRelaxed()) {
_localRecursionDepth = 1;
_localWorkQueue.front()(); //這裏執行了真正的任務
_localWorkQueue.pop_front();
}
lk.lock();
if (_numReadyThreads + 1 > _reservedThreads) {
break;
} else {
_numReadyThreads += 1;
}
}
});
}
//啓動線程
Status launchServiceWorkerThread(std::function<void()> task) {
try {
#if defined(_WIN32)
stdx::thread(std::move(task)).detach();
#else
pthread_attr_t attrs;
pthread_attr_init(&attrs);
pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED);
static const rlim_t kStackSize =
1024 * 1024; // if we change this we need to update the warning
struct rlimit limits;
invariant(getrlimit(RLIMIT_STACK, &limits) == 0);
if (limits.rlim_cur > kStackSize) {
size_t stackSizeToSet = kStackSize;
#if !__has_feature(address_sanitizer)
if (kDebugBuild)
stackSizeToSet /= 2;
#endif
int failed = pthread_attr_setstacksize(&attrs, stackSizeToSet);
if (failed) {
const auto ewd = errnoWithDescription(failed);
LOGV2_WARNING(22949,
"pthread_attr_setstacksize failed: {error}",
"pthread_attr_setstacksize failed",
"error"_attr = ewd);
}
} else if (limits.rlim_cur < 1024 * 1024) {
LOGV2_WARNING(22950,
"Stack size set to {stackSizeKiB}KiB. We suggest 1024KiB",
"Stack size not set to suggested 1024KiB",
"stackSizeKiB"_attr = (limits.rlim_cur / 1024));
}
// Wrap the user-specified `task` so it runs with an installed `sigaltstack`.
task = [sigAltStackController = std::make_shared<stdx::support::SigAltStackController>(),
f = std::move(task)] {
auto sigAltStackGuard = sigAltStackController->makeInstallGuard();
f();
};
pthread_t thread;
auto ctx = std::make_unique<std::function<void()>>(std::move(task));
ThreadSafetyContext::getThreadSafetyContext()->onThreadCreate();
int failed = pthread_create(&thread, &attrs, runFunc, ctx.get());
pthread_attr_destroy(&attrs);
if (failed) {
LOGV2(22948,
"pthread_create failed: {errno}",
"pthread_create failed",
"error"_attr = errnoWithDescription(failed));
throw std::system_error(
std::make_error_code(std::errc::resource_unavailable_try_again));
}
ctx.release();
#endif
} catch (...) {
return {ErrorCodes::InternalError, "failed to create service entry worker thread"};
}
return Status::OK();
}