diff --git a/_partials/_not-supported-for-azure.md b/_partials/_not-supported-for-azure.md
new file mode 100644
index 0000000000..cc108fd357
--- /dev/null
+++ b/_partials/_not-supported-for-azure.md
@@ -0,0 +1,5 @@
+
+
+This feature is on our roadmap for $CLOUD_LONG on Microsoft Azure. Stay tuned!
+
+
\ No newline at end of file
diff --git a/_partials/_prometheus-integrate.md b/_partials/_prometheus-integrate.md
index ad92cdcbd5..9a18db415d 100644
--- a/_partials/_prometheus-integrate.md
+++ b/_partials/_prometheus-integrate.md
@@ -1,4 +1,5 @@
 import IntegrationPrereqs from "versionContent/_partials/_integration-prereqs.mdx";
+import NotSupportedAzure from "versionContent/_partials/_not-supported-for-azure.mdx";
 
 [Prometheus][prometheus] is an open-source monitoring system with a dimensional data model, flexible query language, and a modern alerting approach.
 
@@ -20,6 +21,8 @@ To follow the steps on this page:
   - [Install Postgres Exporter][install-exporter].
   To reduce latency and potential data transfer costs, install Prometheus and Postgres Exporter on a machine in the same AWS region as your $SERVICE_LONG.
 
+
+
 ## Export $SERVICE_LONG telemetry to Prometheus
 
 To export your data, do the following:
diff --git a/integrations/aws.md b/integrations/aws.md
index ab828ef7df..13e2f0662a 100644
--- a/integrations/aws.md
+++ b/integrations/aws.md
@@ -8,9 +8,11 @@ keywords: [AWS, integrations]
 
 import IntegrationPrereqsCloud from "versionContent/_partials/_integration-prereqs-cloud-only.mdx";
 import TransitGateway from "versionContent/_partials/_transit-gateway.mdx";
+import NotSupportedAzure from "versionContent/_partials/_not-supported-for-azure.mdx";
 
 # Integrate Amazon Web Services with $CLOUD_LONG
 
+
 [Amazon Web Services (AWS)][aws] is a comprehensive cloud computing platform that provides on-demand infrastructure, storage, databases, AI, analytics, and security services to help businesses build, deploy, and scale applications in the cloud.
 
 This page explains how to integrate your AWS infrastructure with $CLOUD_LONG using [AWS Transit Gateway][aws-transit-gateway].
@@ -21,6 +23,8 @@ This page explains how to integrate your AWS infrastructure with $CLOUD_LONG usi
 
 - Set up [AWS Transit Gateway][gtw-setup].
 
+
+
 ## Connect your AWS infrastructure to your $SERVICE_LONGs
 
 To connect to $CLOUD_LONG:
@@ -33,6 +37,11 @@ To connect to $CLOUD_LONG:
 
 You have successfully integrated your AWS infrastructure with $CLOUD_LONG. 
 
+
+
+
+
+
 [aws]: https://aws.amazon.com/
 [aws-transit-gateway]: https://aws.amazon.com/transit-gateway/
 [gtw-setup]: https://docs.aws.amazon.com/vpc/latest/tgw/tgw-getting-started.html
diff --git a/integrations/cloudwatch.md b/integrations/cloudwatch.md
index b8aafa5488..58722a9b32 100644
--- a/integrations/cloudwatch.md
+++ b/integrations/cloudwatch.md
@@ -6,6 +6,7 @@ price_plans: [scale, enterprise]
 keywords: [integrate]
 ---
 
+import NotSupportedAzure from "versionContent/_partials/_not-supported-for-azure.mdx";
 import IntegrationPrereqsCloud from "versionContent/_partials/_integration-prereqs-cloud-only.mdx";
 import CloudWatchExporter from "versionContent/_partials/_cloudwatch-data-exporter.mdx";
 import ManageDataExporter from "versionContent/_partials/_manage-a-data-exporter.mdx";
@@ -24,6 +25,8 @@ This pages explains how to export telemetry data from your $SERVICE_LONG into Cl
 
 - Sign up for [Amazon CloudWatch][cloudwatch-signup].
 
+
+
 ## Create a data exporter
 
 A $CLOUD_LONG data exporter sends telemetry data from a $SERVICE_LONG to a third-party monitoring
@@ -33,6 +36,9 @@ tool. You create an exporter on the [project level][projects], in the same AWS r
 
 
 
+
+
+
 [projects]: /use-timescale/:currentVersion:/security/members/
 [pricing-plan-features]: /about/:currentVersion:/pricing-and-account-management/#features-included-in-each-pricing-plan
 [cloudwatch]: https://aws.amazon.com/cloudwatch/
diff --git a/integrations/corporate-data-center.md b/integrations/corporate-data-center.md
index c95fed50be..f097a6f454 100644
--- a/integrations/corporate-data-center.md
+++ b/integrations/corporate-data-center.md
@@ -8,6 +8,7 @@ keywords: [on-premise, integrations]
 
 import IntegrationPrereqsCloud from "versionContent/_partials/_integration-prereqs-cloud-only.mdx";
 import TransitGateway from "versionContent/_partials/_transit-gateway.mdx";
+import NotSupportedAzure from "versionContent/_partials/_not-supported-for-azure.mdx";
 
 # Integrate your data center with $CLOUD_LONG
 
@@ -18,6 +19,7 @@ This page explains how to integrate your corporate on-premise infrastructure wit
 
 
 - Set up [AWS Transit Gateway][gtw-setup].
+
 
 ## Connect your on-premise infrastructure to your $SERVICE_LONGs
 
@@ -33,7 +35,11 @@ To connect to $CLOUD_LONG:
 
 
 
-You have successfully integrated your Microsoft Azure infrastructure with $CLOUD_LONG.
+You have successfully integrated your corporate data center with $CLOUD_LONG.
+
+
+
+
 
 [aws-transit-gateway]: https://aws.amazon.com/transit-gateway/
 [gtw-setup]: https://docs.aws.amazon.com/vpc/latest/tgw/tgw-getting-started.html
diff --git a/integrations/datadog.md b/integrations/datadog.md
index ccd4219fc9..005f9c063f 100644
--- a/integrations/datadog.md
+++ b/integrations/datadog.md
@@ -9,6 +9,7 @@ keywords: [integrate]
 import IntegrationPrereqsCloud from "versionContent/_partials/_integration-prereqs-cloud-only.mdx";
 import DataDogExporter from "versionContent/_partials/_datadog-data-exporter.mdx";
 import ManageDataExporter from "versionContent/_partials/_manage-a-data-exporter.mdx";
+import NotSupportedAzure from "versionContent/_partials/_not-supported-for-azure.mdx";
 
 # Integrate Datadog with $CLOUD_LONG
 
@@ -36,6 +37,8 @@ This page explains how to:
 
 - Install [Datadog Agent][datadog-agent-install].
 
+
+
 ## Monitor $SERVICE_LONG metrics with Datadog
 
 Export telemetry data from your $SERVICE_LONGs with the time-series and analytics capability enabled to
@@ -132,6 +135,9 @@ metrics about your $SERVICE_LONGs.
 Metrics for your $SERVICE_LONG are now visible in Datadog. Check the Datadog $PG integration documentation for a
 comprehensive list of [metrics][datadog-postgres-metrics] collected.
 
+
+
+
 [datadog]: https://www.datadoghq.com/
 [datadog-agent-install]: https://docs.datadoghq.com/getting_started/agent/#installation
 [datadog-postgres]: https://docs.datadoghq.com/integrations/postgres/
diff --git a/integrations/google-cloud.md b/integrations/google-cloud.md
index 83f7cf095b..a0659da661 100644
--- a/integrations/google-cloud.md
+++ b/integrations/google-cloud.md
@@ -8,6 +8,7 @@ keywords: [Google Cloud, integrations]
 
 import IntegrationPrereqsCloud from "versionContent/_partials/_integration-prereqs-cloud-only.mdx";
 import TransitGateway from "versionContent/_partials/_transit-gateway.mdx";
+import NotSupportedAzure from "versionContent/_partials/_not-supported-for-azure.mdx";
 
 # Integrate Google Cloud with $CLOUD_LONG
 
@@ -20,6 +21,7 @@ This page explains how to integrate your Google Cloud infrastructure with $CLOUD
 
 
 - Set up [AWS Transit Gateway][gtw-setup].
+
 
 ## Connect your Google Cloud infrastructure to your $SERVICE_LONGs
 
@@ -37,6 +39,12 @@ To connect to $CLOUD_LONG:
 
 You have successfully integrated your Google Cloud infrastructure with $CLOUD_LONG.
 
+
+
+
+
+
+
 [google-cloud]: https://cloud.google.com/?hl=en
 [aws-transit-gateway]: https://aws.amazon.com/transit-gateway/
 [gtw-setup]: https://docs.aws.amazon.com/vpc/latest/tgw/tgw-getting-started.html
diff --git a/integrations/microsoft-azure.md b/integrations/microsoft-azure.md
index 0be270d87a..7787d04279 100644
--- a/integrations/microsoft-azure.md
+++ b/integrations/microsoft-azure.md
@@ -8,9 +8,11 @@ keywords: [Azure, integrations]
 
 import IntegrationPrereqsCloud from "versionContent/_partials/_integration-prereqs-cloud-only.mdx";
 import TransitGateway from "versionContent/_partials/_transit-gateway.mdx";
+import NotSupportedAzure from "versionContent/_partials/_not-supported-for-azure.mdx";
 
 # Integrate Microsoft Azure with $CLOUD_LONG
 
+
 [Microsoft Azure][azure] is a cloud computing platform and services suite, offering infrastructure, AI, analytics, security, and developer tools to help businesses build, deploy, and manage applications.
 
 This page explains how to integrate your Microsoft Azure infrastructure with $CLOUD_LONG using [AWS Transit Gateway][aws-transit-gateway].
@@ -20,6 +22,7 @@ This page explains how to integrate your Microsoft Azure infrastructure with $CL
 
 
 - Set up [AWS Transit Gateway][gtw-setup].
+
 
 ## Connect your Microsoft Azure infrastructure to your $SERVICE_LONGs
 
@@ -37,6 +40,10 @@ To connect to $CLOUD_LONG:
 
 You have successfully integrated your Microsoft Azure infrastructure with $CLOUD_LONG.
 
+
+
+
+
 [aws-transit-gateway]: https://aws.amazon.com/transit-gateway/
 [gtw-setup]: https://docs.aws.amazon.com/vpc/latest/tgw/tgw-getting-started.html
 [azure]: https://azure.microsoft.com/en-gb/
diff --git a/integrations/prometheus.md b/integrations/prometheus.md
index ae4a61531d..11d85e1080 100644
--- a/integrations/prometheus.md
+++ b/integrations/prometheus.md
@@ -7,7 +7,9 @@ keywords: [integrate]
 ---
 
 import PrometheusIntegrate from "versionContent/_partials/_prometheus-integrate.mdx";
+import NotSupportedAzure from "versionContent/_partials/_not-supported-for-azure.mdx";
 
 # Integrate Prometheus with $CLOUD_LONG
 
-
\ No newline at end of file
+
+
diff --git a/use-timescale/data-tiering/about-data-tiering.md b/use-timescale/data-tiering/about-data-tiering.md
index 9a2f71300b..a6fc0791de 100644
--- a/use-timescale/data-tiering/about-data-tiering.md
+++ b/use-timescale/data-tiering/about-data-tiering.md
@@ -10,6 +10,7 @@ cloud_ui:
 ---
 
 import TieredStorageBilling from "versionContent/_partials/_tiered-storage-billing.mdx";
+import NotSupportedAzure from "versionContent/_partials/_not-supported-for-azure.mdx";
 
 # About storage tiers
 
@@ -34,6 +35,8 @@ $CLOUD_LONG high-performance storage comes in the following types:
 
 Once you [enable tiered storage][manage-tiering], you can start moving rarely used data to the object tier. The object tier is based on AWS S3 and stores your data in the [Apache Parquet][parquet] format. Within a Parquet file, a set of rows is grouped together to form a row group. Within a row group, values for a single column across multiple rows are stored together. The original size of the data in your $SERVICE_SHORT, compressed or uncompressed, does not correspond directly to its size in S3. A compressed hypertable may even take more space in S3 than it does in $CLOUD_LONG.
 
+
+
 Apache Parquet allows for more efficient scans across longer time periods, and $CLOUD_LONG uses other metadata and query optimizations to reduce the amount of data that needs to be fetched to satisfy a query, such as: 
 
 - **Chunk skipping**: exclude the chunks that fall outside the query time window.
@@ -122,6 +125,7 @@ The low-cost storage tier comes with the following limitations:
     partitioned on more than one dimension. Make sure your hypertables are
     partitioned on time only, before you enable tiered storage.
 
+
 [blog-data-tiering]: https://www.timescale.com/blog/expanding-the-boundaries-of-postgresql-announcing-a-bottomless-consumption-based-object-storage-layer-built-on-amazon-s3/
 [querying-tiered-data]: /use-timescale/:currentVersion:/data-tiering/querying-tiered-data/
 [parquet]: https://parquet.apache.org/
diff --git a/use-timescale/data-tiering/enabling-data-tiering.md b/use-timescale/data-tiering/enabling-data-tiering.md
index 08bebb0656..ceb62e3b87 100644
--- a/use-timescale/data-tiering/enabling-data-tiering.md
+++ b/use-timescale/data-tiering/enabling-data-tiering.md
@@ -11,6 +11,7 @@ cloud_ui:
 ---
 
 import TieredStorageBilling from "versionContent/_partials/_tiered-storage-billing.mdx";
+import NotSupportedAzure from "versionContent/_partials/_not-supported-for-azure.mdx";
 
 # Manage storage and tiering
 
@@ -54,7 +55,11 @@ This storage type gives you up to 16 TB of storage and is available under [all $
 
 
 
-This storage type gives you up to 64 TB and 32,000 IOPS, and is available under the [$ENTERPRISE $PRICING_PLAN][pricing-plans]. To get enhanced storage:
+This storage type gives you up to 64 TB and 32,000 IOPS, and is available under the [$ENTERPRISE $PRICING_PLAN][pricing-plans]. 
+
+
+
+To get enhanced storage:
 
 
 
@@ -87,6 +92,8 @@ You change from enhanced storage to standard in the same way. If you are using o
 
 You enable the low-cost object storage tier in $CONSOLE and then tier the data with policies or manually. 
 
+
+
 ### Enable tiered storage
 
 You enable tiered storage from the `Overview` tab in $CONSOLE.
@@ -280,6 +287,7 @@ If you no longer want to use tiered storage for a particular hypertable, drop th
 
 
 
+
 [data-retention]: /use-timescale/:currentVersion:/data-retention/
 [console]: https://console.cloud.timescale.com/dashboard/services
 [hypertable]: /use-timescale/:currentVersion:/hypertables/
diff --git a/use-timescale/data-tiering/index.md b/use-timescale/data-tiering/index.md
index fac5c509c9..046e307f69 100644
--- a/use-timescale/data-tiering/index.md
+++ b/use-timescale/data-tiering/index.md
@@ -1,6 +1,6 @@
 ---
-title: Storage in Tiger
-excerpt: Save on storage costs by tiering older data to a low-cost bottomless object storage tier. Tiger Cloud tiered storage makes sure you cut costs while having data available for analytical queries
+title: Storage on Tiger Cloud
+excerpt: Save on storage costs by tiering older data to a low-cost bottomless object storage tier. Tiger tiered storage makes sure you cut costs while having data available for analytical queries
 products: [cloud]
 keywords: [tiered storage]
 tags: [storage, data management]
@@ -8,6 +8,10 @@ tags: [storage, data management]
 
 # Storage
 
+
+
+
+
 Tiered storage is a [hierarchical storage management architecture][hierarchical-storage] for 
 [real-time analytics][create-service] $SERVICE_SHORTs you create in [$CLOUD_LONG](https://console.cloud.timescale.com/).
 
@@ -42,6 +46,19 @@ In this section, you:
 * [Learn about replicas and forks with tiered data][replicas-and-forks]: understand how tiered storage works
   with forks and replicas of your $SERVICE_SHORT.
 
+
+
+
+
+$CLOUD_LONG stores your data in high-performance storage optimized for frequent querying. Based on [AWS EBS gp3][aws-gp3], the high-performance storage provides you with up to 16 TB and 16,000 IOPS. Its [$HYPERCORE row-columnar storage engine][hypercore], designed specifically for real-time analytics, enables you to compress your data by up to 98%, while improving performance. 
+
+Coupled with other optimizations, $CLOUD_LONG high-performance storage makes sure your data is always accessible and your queries run at lightning speed. 
+
+
+
+
+
+
 [about-data-tiering]: /use-timescale/:currentVersion:/data-tiering/about-data-tiering/
 [enabling-data-tiering]: /use-timescale/:currentVersion:/data-tiering/enabling-data-tiering/
 [replicas-and-forks]: /use-timescale/:currentVersion:/data-tiering/tiered-data-replicas-forks/
@@ -49,4 +66,6 @@ In this section, you:
 [querying-tiered-data]: /use-timescale/:currentVersion:/data-tiering/querying-tiered-data/
 [add-retention-policies]: /api/:currentVersion:/continuous-aggregates/add_policies/
 [create-service]: /getting-started/:currentVersion:/services/
-[hierarchical-storage]: https://en.wikipedia.org/wiki/Hierarchical_storage_management
\ No newline at end of file
+[hierarchical-storage]: https://en.wikipedia.org/wiki/Hierarchical_storage_management
+[hypercore]: /use-timescale/:currentVersion:/hypercore
+[aws-gp3]: https://docs.aws.amazon.com/ebs/latest/userguide/general-purpose.html
\ No newline at end of file
diff --git a/use-timescale/data-tiering/querying-tiered-data.md b/use-timescale/data-tiering/querying-tiered-data.md
index 9b19ed5686..a33a961a6d 100644
--- a/use-timescale/data-tiering/querying-tiered-data.md
+++ b/use-timescale/data-tiering/querying-tiered-data.md
@@ -7,6 +7,8 @@ keywords: [ tiered storage, tiering ]
 tags: [ storage, data management ]
 ---
 
+import NotSupportedAzure from "versionContent/_partials/_not-supported-for-azure.mdx";
+
 # Querying tiered data
 
 Once rarely used data is tiered and migrated to the object storage tier, it can still be queried 
@@ -24,6 +26,8 @@ Your hypertable is spread across the tiers, so queries and `JOIN`s work and fetc
 By default, tiered data is not accessed by queries. Querying tiered data may slow down query performance 
 as the data is not stored locally on the high-performance storage tier. See [Performance considerations](#performance-considerations).
 
+
+
 ## Enable querying tiered data for a single query
 
 
@@ -186,3 +190,5 @@ Queries over tiered data are expected to be slower than over local data. However
 
 * Text and non-native types (JSON, JSONB, GIS) filtering is slower when querying tiered data.
 
+
+
diff --git a/use-timescale/data-tiering/tiered-data-replicas-forks.md b/use-timescale/data-tiering/tiered-data-replicas-forks.md
index 645ea13a6d..d1b23af3b7 100644
--- a/use-timescale/data-tiering/tiered-data-replicas-forks.md
+++ b/use-timescale/data-tiering/tiered-data-replicas-forks.md
@@ -7,7 +7,9 @@ keywords: [tiered storage]
 tags: [storage, data management]
 ---
 
-# How tiered data works on replicas and forks 
+import NotSupportedAzure from "versionContent/_partials/_not-supported-for-azure.mdx";
+
+# How tiered data works on replicas and forks
 
 There is one more thing that makes Tiered Storage even more amazing: when you keep data in the low-cost object storage tier,
 you pay for this data only once, regardless of whether you have a [high-availability replica][ha-replica]
@@ -19,6 +21,8 @@ When creating one (or more) forks, you won't be billed for data shared with the
 If you decide to tier more data that's not in the primary, you will pay to store it in the low-cost tier,
 but you will still see substantial savings by moving that data from the high-performance tier of the fork to the cheaper object storage tier.
 
+
+
 ## How this works behind the scenes
 
 Once you tier data to the low-cost object storage tier, we keep a reference to that data on your Database's catalog.
@@ -68,6 +72,7 @@ In the case of such a restore, new references are added to the deleted tiered ch
 
 Once 14 days pass after soft deleting the data,that is the number of references to the tiered data drop to 0, we hard delete the tiered data.
 
+
 [ha-replica]: /use-timescale/:currentVersion:/ha-replicas/high-availability/
 [read-replica]: /use-timescale/:currentVersion:/ha-replicas/read-scaling/#read-replicas
 [operations-forking]: /use-timescale/:currentVersion:/services/service-management/#fork-a-service
diff --git a/use-timescale/metrics-logging/aws-cloudwatch.md b/use-timescale/metrics-logging/aws-cloudwatch.md
index 21167975c7..a5ee21f90a 100644
--- a/use-timescale/metrics-logging/aws-cloudwatch.md
+++ b/use-timescale/metrics-logging/aws-cloudwatch.md
@@ -10,6 +10,7 @@ tags: [telemetry, monitor]
 import ManageDataExporter from "versionContent/_partials/_manage-a-data-exporter.mdx";
 import PrereqsCloud from "versionContent/_partials/_prereqs-cloud-no-connection.mdx";
 import CloudWatchExporter from "versionContent/_partials/_cloudwatch-data-exporter.mdx";
+import NotSupportedAzure from "versionContent/_partials/_not-supported-for-azure.mdx";
 
 # Export telemetry data to AWS Cloudwatch
 
@@ -22,6 +23,8 @@ This page shows you how to create an Amazon CloudWatch exporter in $CONSOLE, and
 
 
 
+
+
 ## Create a data exporter
 
 $CLOUD_LONG data exporters send telemetry data from a $SERVICE_LONG to a third-party monitoring
@@ -35,6 +38,8 @@ This section shows you how to attach, monitor, edit, and delete a data exporter.
 
 
 
+
+
 [cloudwatch]: https://aws.amazon.com/cloudwatch/
 [cloudwatch-docs]: https://docs.aws.amazon.com/cloudwatch/index.html
 [console-integrations]: https://console.cloud.timescale.com/dashboard/integrations
diff --git a/use-timescale/metrics-logging/datadog.md b/use-timescale/metrics-logging/datadog.md
index 48ce55f8a9..a1dcbb4cc3 100644
--- a/use-timescale/metrics-logging/datadog.md
+++ b/use-timescale/metrics-logging/datadog.md
@@ -10,6 +10,7 @@ tags: [telemetry, monitor]
 import DataDogExporter from "versionContent/_partials/_datadog-data-exporter.mdx";
 import PrereqsCloud from "versionContent/_partials/_prereqs-cloud-no-connection.mdx";
 import ManageDataExporter from "versionContent/_partials/_manage-a-data-exporter.mdx";
+import NotSupportedAzure from "versionContent/_partials/_not-supported-for-azure.mdx";
 
 # Export telemetry data to Datadog
 
@@ -21,6 +22,8 @@ This page shows you how to create a Datadog exporter in $CONSOLE, and manage the
 
 
 
+
+
 ## Create a data exporter
 
 $CLOUD_LONG data exporters send telemetry data from a $SERVICE_LONG to third-party monitoring
@@ -34,6 +37,8 @@ This section shows you how to attach, monitor, edit, and delete a data exporter.
 
 
 
+
+
 [datadog]: https://www.datadoghq.com
 [datadog-api-key]: https://docs.datadoghq.com/account_management/api-app-keys/#add-an-api-key-or-client-token
 [datadog-docs]: https://docs.datadoghq.com/
diff --git a/use-timescale/metrics-logging/index.md b/use-timescale/metrics-logging/index.md
index 47000ace7d..4d6e9147be 100644
--- a/use-timescale/metrics-logging/index.md
+++ b/use-timescale/metrics-logging/index.md
@@ -19,6 +19,7 @@ Find metrics and logs for your $SERVICE_SHORTs in $CONSOLE, or integrate with th
 *   Export metrics to [Amazon Cloudwatch][cloudwatch].
 *   Export metrics to [Prometheus][prometheus].
 
+
 [prometheus]: /use-timescale/:currentVersion:/metrics-logging/metrics-to-prometheus/
 [datadog]: /use-timescale/:currentVersion:/metrics-logging/datadog/
 [cloudwatch]: /use-timescale/:currentVersion:/metrics-logging/aws-cloudwatch/
diff --git a/use-timescale/metrics-logging/metrics-to-prometheus.md b/use-timescale/metrics-logging/metrics-to-prometheus.md
index df2e3c2a6d..6bfbb8aa52 100644
--- a/use-timescale/metrics-logging/metrics-to-prometheus.md
+++ b/use-timescale/metrics-logging/metrics-to-prometheus.md
@@ -15,4 +15,4 @@ import PrometheusIntegrate from "versionContent/_partials/_prometheus-integrate.
 
 # Export metrics to Prometheus
 
-
\ No newline at end of file
+
diff --git a/use-timescale/security/transit-gateway.md b/use-timescale/security/transit-gateway.md
index 735488b652..8721a3aece 100644
--- a/use-timescale/security/transit-gateway.md
+++ b/use-timescale/security/transit-gateway.md
@@ -11,11 +11,14 @@ cloud_ui:
 ---
 
 import TransitGateway from "versionContent/_partials/_transit-gateway.mdx";
+import NotSupportedAzure from "versionContent/_partials/_not-supported-for-azure.mdx";
 
 # Securely connect to $CLOUD_LONG using AWS Transit Gateway
 
 [AWS Transit Gateway][aws-transit-gateway] enables you to securely connect to your $CLOUD_LONG from AWS, Google Cloud, Microsoft Azure, or any other cloud or on-premise environment.
 
+
+
 You use AWS Transit Gateway as a traffic controller for your network. Instead of setting up multiple direct connections to different clouds, on-premise data centers, and other AWS services, you connect everything to AWS Transit Gateway. This simplifies your network and makes it easier to manage and scale.
 
 You can then create a peering connection between your $SERVICE_LONGs and AWS Transit Gateway in $CLOUD_LONG. This means that, no matter how big or complex your infrastructure is, you can connect securely to your $SERVICE_LONGs. 
@@ -87,7 +90,7 @@ AWS Transit Gateway enables you to connect from almost any environment, this pag
 
 
 
-You can now securely access your $SERVICE_SHORTs in $CLOUD_LONG. 
+You can now securely access your $SERVICE_SHORTs in $CLOUD_LONG.
 
 [aws-transit-gateway]: https://aws.amazon.com/transit-gateway/
 [pricing-plans]: /about/:currentVersion:/pricing-and-account-management/
diff --git a/use-timescale/security/vpc.md b/use-timescale/security/vpc.md
index edccadb84b..f195b8d3fe 100644
--- a/use-timescale/security/vpc.md
+++ b/use-timescale/security/vpc.md
@@ -11,6 +11,7 @@ cloud_ui:
 ---
 
 import VpcLimitations from "versionContent/_partials/_vpc-limitations.mdx";
+import NotSupportedAzure from "versionContent/_partials/_not-supported-for-azure.mdx";
 
 # Secure your $CLOUD_LONG services with $VPC Peering and AWS PrivateLink
 
@@ -18,6 +19,8 @@ You use Virtual Private Cloud ($VPC) peering to ensure that your $SERVICE_LONGs
 only accessible through your secured AWS infrastructure. This reduces the potential 
 attack vector surface and improves security.
 
+
+
 The data isolation architecture that ensures a highly secure connection between your apps and 
 $CLOUD_LONG is:
 
@@ -216,6 +219,7 @@ Migration takes a few minutes to complete and requires a change to DNS settings
 $SERVICE_SHORT. The $SERVICE_SHORT is not accessible during this time. If you receive a DNS error, allow
 some time for DNS propagation.
 
+
 [aws-dashboard]: https://console.aws.amazon.com/vpc/home#PeeringConnections:
 [aws-security-groups]: https://console.aws.amazon.com/vpcconsole/home#securityGroups:
 [console-login]: https://console.cloud.timescale.com/