Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

pageserver: automated rename of Tenant -> TenantShard #10428

Open
wants to merge 5 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions pageserver/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -109,13 +109,13 @@ pub struct PageServerConf {
/// A lower value implicitly deprioritizes loading such tenants, vs. other work in the system.
pub concurrent_tenant_warmup: ConfigurableSemaphore,

/// Number of concurrent [`Tenant::gather_size_inputs`](crate::tenant::Tenant::gather_size_inputs) allowed.
/// Number of concurrent [`TenantShard::gather_size_inputs`](crate::tenant::TenantShard::gather_size_inputs) allowed.
pub concurrent_tenant_size_logical_size_queries: ConfigurableSemaphore,
/// Limit of concurrent [`Tenant::gather_size_inputs`] issued by module `eviction_task`.
/// Limit of concurrent [`TenantShard::gather_size_inputs`] issued by module `eviction_task`.
/// The number of permits is the same as `concurrent_tenant_size_logical_size_queries`.
/// See the comment in `eviction_task` for details.
///
/// [`Tenant::gather_size_inputs`]: crate::tenant::Tenant::gather_size_inputs
/// [`TenantShard::gather_size_inputs`]: crate::tenant::TenantShard::gather_size_inputs
pub eviction_task_immitated_concurrent_logical_size_queries: ConfigurableSemaphore,

// How often to collect metrics and send them to the metrics endpoint.
Expand Down Expand Up @@ -509,10 +509,10 @@ impl ConfigurableSemaphore {
/// Initializse using a non-zero amount of permits.
///
/// Require a non-zero initial permits, because using permits == 0 is a crude way to disable a
/// feature such as [`Tenant::gather_size_inputs`]. Otherwise any semaphore using future will
/// feature such as [`TenantShard::gather_size_inputs`]. Otherwise any semaphore using future will
/// behave like [`futures::future::pending`], just waiting until new permits are added.
///
/// [`Tenant::gather_size_inputs`]: crate::tenant::Tenant::gather_size_inputs
/// [`TenantShard::gather_size_inputs`]: crate::tenant::TenantShard::gather_size_inputs
pub fn new(initial_permits: NonZeroUsize) -> Self {
ConfigurableSemaphore {
initial_permits,
Expand Down
10 changes: 5 additions & 5 deletions pageserver/src/consumption_metrics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use crate::context::{DownloadBehavior, RequestContext};
use crate::task_mgr::{self, TaskKind, BACKGROUND_RUNTIME};
use crate::tenant::size::CalculateSyntheticSizeError;
use crate::tenant::tasks::BackgroundLoopKind;
use crate::tenant::{mgr::TenantManager, LogicalSizeCalculationCause, Tenant};
use crate::tenant::{mgr::TenantShardManager, LogicalSizeCalculationCause, TenantShard};
use camino::Utf8PathBuf;
use consumption_metrics::EventType;
use itertools::Itertools as _;
Expand Down Expand Up @@ -95,7 +95,7 @@ type Cache = HashMap<MetricsKey, NewRawMetric>;

pub async fn run(
conf: &'static PageServerConf,
tenant_manager: Arc<TenantManager>,
tenant_manager: Arc<TenantShardManager>,
cancel: CancellationToken,
) {
let Some(metric_collection_endpoint) = conf.metric_collection_endpoint.as_ref() else {
Expand Down Expand Up @@ -150,7 +150,7 @@ pub async fn run(
/// Main thread that serves metrics collection
#[allow(clippy::too_many_arguments)]
async fn collect_metrics(
tenant_manager: Arc<TenantManager>,
tenant_manager: Arc<TenantShardManager>,
metric_collection_endpoint: &Url,
metric_collection_bucket: &Option<RemoteStorageConfig>,
metric_collection_interval: Duration,
Expand Down Expand Up @@ -362,7 +362,7 @@ async fn reschedule(

/// Caclculate synthetic size for each active tenant
async fn calculate_synthetic_size_worker(
tenant_manager: Arc<TenantManager>,
tenant_manager: Arc<TenantShardManager>,
synthetic_size_calculation_interval: Duration,
cancel: CancellationToken,
ctx: RequestContext,
Expand Down Expand Up @@ -425,7 +425,7 @@ async fn calculate_synthetic_size_worker(
}
}

async fn calculate_and_log(tenant: &Tenant, cancel: &CancellationToken, ctx: &RequestContext) {
async fn calculate_and_log(tenant: &TenantShard, cancel: &CancellationToken, ctx: &RequestContext) {
const CAUSE: LogicalSizeCalculationCause =
LogicalSizeCalculationCause::ConsumptionMetricsSyntheticSize;

Expand Down
16 changes: 8 additions & 8 deletions pageserver/src/consumption_metrics/metrics.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use crate::tenant::mgr::TenantManager;
use crate::tenant::mgr::TenantShardManager;
use crate::{context::RequestContext, tenant::timeline::logical_size::CurrentLogicalSize};
use chrono::{DateTime, Utc};
use consumption_metrics::EventType;
Expand Down Expand Up @@ -174,9 +174,9 @@ impl MetricsKey {
.absolute_values()
}

/// [`Tenant::remote_size`]
/// [`TenantShard::remote_size`]
///
/// [`Tenant::remote_size`]: crate::tenant::Tenant::remote_size
/// [`TenantShard::remote_size`]: crate::tenant::TenantShard::remote_size
const fn remote_storage_size(tenant_id: TenantId) -> AbsoluteValueFactory {
MetricsKey {
tenant_id,
Expand All @@ -198,9 +198,9 @@ impl MetricsKey {
.absolute_values()
}

/// [`Tenant::cached_synthetic_size`] as refreshed by [`calculate_synthetic_size_worker`].
/// [`TenantShard::cached_synthetic_size`] as refreshed by [`calculate_synthetic_size_worker`].
///
/// [`Tenant::cached_synthetic_size`]: crate::tenant::Tenant::cached_synthetic_size
/// [`TenantShard::cached_synthetic_size`]: crate::tenant::TenantShard::cached_synthetic_size
/// [`calculate_synthetic_size_worker`]: super::calculate_synthetic_size_worker
const fn synthetic_size(tenant_id: TenantId) -> AbsoluteValueFactory {
MetricsKey {
Expand All @@ -213,7 +213,7 @@ impl MetricsKey {
}

pub(super) async fn collect_all_metrics(
tenant_manager: &Arc<TenantManager>,
tenant_manager: &Arc<TenantShardManager>,
cached_metrics: &Cache,
ctx: &RequestContext,
) -> Vec<NewRawMetric> {
Expand Down Expand Up @@ -253,7 +253,7 @@ pub(super) async fn collect_all_metrics(

async fn collect<S>(tenants: S, cache: &Cache, ctx: &RequestContext) -> Vec<NewRawMetric>
where
S: futures::stream::Stream<Item = (TenantId, Arc<crate::tenant::Tenant>)>,
S: futures::stream::Stream<Item = (TenantId, Arc<crate::tenant::TenantShard>)>,
{
let mut current_metrics: Vec<NewRawMetric> = Vec::new();

Expand Down Expand Up @@ -307,7 +307,7 @@ impl TenantSnapshot {
///
/// `resident_size` is calculated of the timelines we had access to for other metrics, so we
/// cannot just list timelines here.
fn collect(t: &Arc<crate::tenant::Tenant>, resident_size: u64) -> Self {
fn collect(t: &Arc<crate::tenant::TenantShard>, resident_size: u64) -> Self {
TenantSnapshot {
resident_size,
remote_size: t.remote_size(),
Expand Down
6 changes: 3 additions & 3 deletions pageserver/src/deletion_queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -703,7 +703,7 @@ mod test {

use crate::{
controller_upcall_client::RetryForeverError,
tenant::{harness::TenantHarness, storage_layer::DeltaLayerName},
tenant::{harness::TenantShardHarness, storage_layer::DeltaLayerName},
};

use super::*;
Expand All @@ -722,7 +722,7 @@ mod test {
});

struct TestSetup {
harness: TenantHarness,
harness: TenantShardHarness,
remote_fs_dir: Utf8PathBuf,
storage: GenericRemoteStorage,
mock_control_plane: MockControlPlane,
Expand Down Expand Up @@ -825,7 +825,7 @@ mod test {

async fn setup(test_name: &str) -> anyhow::Result<TestSetup> {
let test_name = Box::leak(Box::new(format!("deletion_queue__{test_name}")));
let harness = TenantHarness::create(test_name).await?;
let harness = TenantShardHarness::create(test_name).await?;

// We do not load() the harness: we only need its config and remote_storage

Expand Down
12 changes: 6 additions & 6 deletions pageserver/src/disk_usage_eviction_task.rs
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ use crate::{
metrics::disk_usage_based_eviction::METRICS,
task_mgr::{self, BACKGROUND_RUNTIME},
tenant::{
mgr::TenantManager,
mgr::TenantShardManager,
remote_timeline_client::LayerFileMetadata,
secondary::SecondaryTenant,
storage_layer::{AsLayerDesc, EvictionError, Layer, LayerName, LayerVisibilityHint},
Expand Down Expand Up @@ -166,7 +166,7 @@ pub fn launch_disk_usage_global_eviction_task(
conf: &'static PageServerConf,
storage: GenericRemoteStorage,
state: Arc<State>,
tenant_manager: Arc<TenantManager>,
tenant_manager: Arc<TenantShardManager>,
background_jobs_barrier: completion::Barrier,
) -> Option<DiskUsageEvictionTask> {
let Some(task_config) = &conf.disk_usage_based_eviction else {
Expand Down Expand Up @@ -203,7 +203,7 @@ async fn disk_usage_eviction_task(
state: &State,
task_config: &DiskUsageEvictionTaskConfig,
storage: &GenericRemoteStorage,
tenant_manager: Arc<TenantManager>,
tenant_manager: Arc<TenantShardManager>,
cancel: CancellationToken,
) {
scopeguard::defer! {
Expand Down Expand Up @@ -265,7 +265,7 @@ async fn disk_usage_eviction_task_iteration(
state: &State,
task_config: &DiskUsageEvictionTaskConfig,
storage: &GenericRemoteStorage,
tenant_manager: &Arc<TenantManager>,
tenant_manager: &Arc<TenantShardManager>,
cancel: &CancellationToken,
) -> anyhow::Result<()> {
let tenants_dir = tenant_manager.get_conf().tenants_path();
Expand Down Expand Up @@ -361,7 +361,7 @@ pub(crate) async fn disk_usage_eviction_task_iteration_impl<U: Usage>(
state: &State,
_storage: &GenericRemoteStorage,
usage_pre: U,
tenant_manager: &Arc<TenantManager>,
tenant_manager: &Arc<TenantShardManager>,
eviction_order: EvictionOrder,
cancel: &CancellationToken,
) -> anyhow::Result<IterationOutcome<U>> {
Expand Down Expand Up @@ -788,7 +788,7 @@ enum EvictionCandidates {
/// - tenant B 1 layer
/// - tenant C 8 layers
async fn collect_eviction_candidates(
tenant_manager: &Arc<TenantManager>,
tenant_manager: &Arc<TenantShardManager>,
eviction_order: EvictionOrder,
cancel: &CancellationToken,
) -> anyhow::Result<EvictionCandidates> {
Expand Down
12 changes: 6 additions & 6 deletions pageserver/src/http/routes.rs
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ use crate::task_mgr::TaskKind;
use crate::tenant::config::{LocationConf, TenantConfOpt};
use crate::tenant::mgr::GetActiveTenantError;
use crate::tenant::mgr::{
GetTenantError, TenantManager, TenantMapError, TenantMapInsertError, TenantSlotError,
GetTenantError, TenantMapError, TenantMapInsertError, TenantShardManager, TenantSlotError,
TenantSlotUpsertError, TenantStateError,
};
use crate::tenant::mgr::{TenantSlot, UpsertLocationError};
Expand Down Expand Up @@ -130,7 +130,7 @@ pub(crate) const ACTIVE_TENANT_TIMEOUT: Duration = Duration::from_millis(30000);

pub struct State {
conf: &'static PageServerConf,
tenant_manager: Arc<TenantManager>,
tenant_manager: Arc<TenantShardManager>,
auth: Option<Arc<SwappableJwtAuth>>,
allowlist_routes: &'static [&'static str],
remote_storage: GenericRemoteStorage,
Expand All @@ -145,7 +145,7 @@ impl State {
#[allow(clippy::too_many_arguments)]
pub fn new(
conf: &'static PageServerConf,
tenant_manager: Arc<TenantManager>,
tenant_manager: Arc<TenantShardManager>,
auth: Option<Arc<SwappableJwtAuth>>,
remote_storage: GenericRemoteStorage,
broker_client: storage_broker::BrokerClientChannel,
Expand Down Expand Up @@ -1761,7 +1761,7 @@ async fn update_tenant_config_handler(
&ShardParameters::default(),
);

crate::tenant::Tenant::persist_tenant_config(state.conf, &tenant_shard_id, &location_conf)
crate::tenant::TenantShard::persist_tenant_config(state.conf, &tenant_shard_id, &location_conf)
.await
.map_err(|e| ApiError::InternalServerError(anyhow::anyhow!(e)))?;

Expand Down Expand Up @@ -1802,7 +1802,7 @@ async fn patch_tenant_config_handler(
&ShardParameters::default(),
);

crate::tenant::Tenant::persist_tenant_config(state.conf, &tenant_shard_id, &location_conf)
crate::tenant::TenantShard::persist_tenant_config(state.conf, &tenant_shard_id, &location_conf)
.await
.map_err(|e| ApiError::InternalServerError(anyhow::anyhow!(e)))?;

Expand Down Expand Up @@ -2571,7 +2571,7 @@ async fn timeline_collect_keyspace(
}

async fn active_timeline_of_active_tenant(
tenant_manager: &TenantManager,
tenant_manager: &TenantShardManager,
tenant_shard_id: TenantShardId,
timeline_id: TimelineId,
) -> Result<Arc<Timeline>, ApiError> {
Expand Down
4 changes: 2 additions & 2 deletions pageserver/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ pub mod walredo;
use camino::Utf8Path;
use deletion_queue::DeletionQueue;
use tenant::{
mgr::{BackgroundPurges, TenantManager},
mgr::{BackgroundPurges, TenantShardManager},
secondary,
};
use tracing::{info, info_span};
Expand Down Expand Up @@ -81,7 +81,7 @@ pub async fn shutdown_pageserver(
page_service: page_service::Listener,
consumption_metrics_worker: ConsumptionMetricsTasks,
disk_usage_eviction_task: Option<DiskUsageEvictionTask>,
tenant_manager: &TenantManager,
tenant_manager: &TenantShardManager,
background_purges: BackgroundPurges,
mut deletion_queue: DeletionQueue,
secondary_controller_tasks: secondary::GlobalTasks,
Expand Down
2 changes: 1 addition & 1 deletion pageserver/src/metrics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -920,7 +920,7 @@ pub(crate) static TIMELINE_EPHEMERAL_BYTES: Lazy<UIntGauge> = Lazy::new(|| {
.expect("Failed to register metric")
});

/// Metrics related to the lifecycle of a [`crate::tenant::Tenant`] object: things
/// Metrics related to the lifecycle of a [`crate::tenant::TenantShard`] object: things
/// like how long it took to load.
///
/// Note that these are process-global metrics, _not_ per-tenant metrics. Per-tenant
Expand Down
14 changes: 7 additions & 7 deletions pageserver/src/page_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ use crate::span::debug_assert_current_span_has_tenant_and_timeline_id_no_shard_i
use crate::task_mgr::TaskKind;
use crate::task_mgr::{self, COMPUTE_REQUEST_RUNTIME};
use crate::tenant::mgr::ShardSelector;
use crate::tenant::mgr::TenantManager;
use crate::tenant::mgr::TenantShardManager;
use crate::tenant::mgr::{GetActiveTenantError, GetTenantError, ShardResolveResult};
use crate::tenant::timeline::{self, WaitLsnError};
use crate::tenant::GetTimelineError;
Expand Down Expand Up @@ -94,7 +94,7 @@ pub struct Connections {

pub fn spawn(
conf: &'static PageServerConf,
tenant_manager: Arc<TenantManager>,
tenant_manager: Arc<TenantShardManager>,
pg_auth: Option<Arc<SwappableJwtAuth>>,
tcp_listener: tokio::net::TcpListener,
) -> Listener {
Expand Down Expand Up @@ -159,7 +159,7 @@ impl Connections {
/// open connections.
///
pub async fn libpq_listener_main(
tenant_manager: Arc<TenantManager>,
tenant_manager: Arc<TenantShardManager>,
auth: Option<Arc<SwappableJwtAuth>>,
listener: tokio::net::TcpListener,
auth_type: AuthType,
Expand Down Expand Up @@ -218,7 +218,7 @@ type ConnectionHandlerResult = anyhow::Result<()>;

#[instrument(skip_all, fields(peer_addr))]
async fn page_service_conn_main(
tenant_manager: Arc<TenantManager>,
tenant_manager: Arc<TenantShardManager>,
auth: Option<Arc<SwappableJwtAuth>>,
socket: tokio::net::TcpStream,
auth_type: AuthType,
Expand Down Expand Up @@ -337,7 +337,7 @@ struct TimelineHandles {
}

impl TimelineHandles {
fn new(tenant_manager: Arc<TenantManager>) -> Self {
fn new(tenant_manager: Arc<TenantShardManager>) -> Self {
Self {
wrapper: TenantManagerWrapper {
tenant_manager,
Expand Down Expand Up @@ -379,7 +379,7 @@ impl TimelineHandles {
}

pub(crate) struct TenantManagerWrapper {
tenant_manager: Arc<TenantManager>,
tenant_manager: Arc<TenantShardManager>,
// We do not support switching tenant_id on a connection at this point.
// We can can add support for this later if needed without changing
// the protocol.
Expand Down Expand Up @@ -613,7 +613,7 @@ impl BatchedFeMessage {

impl PageServerHandler {
pub fn new(
tenant_manager: Arc<TenantManager>,
tenant_manager: Arc<TenantShardManager>,
auth: Option<Arc<SwappableJwtAuth>>,
pipelining_config: PageServicePipeliningConfig,
connection_ctx: RequestContext,
Expand Down
4 changes: 2 additions & 2 deletions pageserver/src/pgdatadir_mapping.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2404,13 +2404,13 @@ mod tests {

use super::*;

use crate::{tenant::harness::TenantHarness, DEFAULT_PG_VERSION};
use crate::{tenant::harness::TenantShardHarness, DEFAULT_PG_VERSION};

/// Test a round trip of aux file updates, from DatadirModification to reading back from the Timeline
#[tokio::test]
async fn aux_files_round_trip() -> anyhow::Result<()> {
let name = "aux_files_round_trip";
let harness = TenantHarness::create(name).await?;
let harness = TenantShardHarness::create(name).await?;

pub const TIMELINE_ID: TimelineId =
TimelineId::from_array(hex!("11223344556677881122334455667788"));
Expand Down
Loading
Loading