diff --git a/_partials/_not-supported-for-azure.mdx b/_partials/_not-supported-for-azure.mdx
new file mode 100644
index 0000000000..cc108fd357
--- /dev/null
+++ b/_partials/_not-supported-for-azure.mdx
@@ -0,0 +1,5 @@
+
+
+This feature is on our roadmap for $CLOUD_LONG on Microsoft Azure. Stay tuned!
+
+
\ No newline at end of file
diff --git a/migrate/livesync-for-kafka.md b/migrate/livesync-for-kafka.md
index de2b713f53..af08ad025a 100644
--- a/migrate/livesync-for-kafka.md
+++ b/migrate/livesync-for-kafka.md
@@ -9,12 +9,13 @@ tags: [stream, connector]
import PrereqCloud from "versionContent/_partials/_prereqs-cloud-only.mdx";
import EarlyAccessNoRelease from "versionContent/_partials/_early_access.mdx";
+import NotSupportedAzure from "versionContent/_partials/_not-supported-for-azure.mdx";
# Stream data from Kafka
You use the Kafka source connector in $CLOUD_LONG to stream events from Kafka into your $SERVICE_SHORT. $CLOUD_LONG connects to your Confluent Cloud Kafka cluster and Schema Registry using SASL/SCRAM authentication and service account–based API keys. Only the Avro format is currently supported [with some limitations][limitations].
-This page explains how to connect $CLOUD_LONG to your Confluence Cloud Kafka cluster.
+This page explains how to connect $CLOUD_LONG to your Confluent Cloud Kafka cluster.
: the Kafka source connector is not yet supported for production use.
@@ -25,6 +26,8 @@ This page explains how to connect $CLOUD_LONG to your Confluence Cloud Kafka clu
- [Sign up][confluence-signup] for Confluence Cloud.
- [Create][create-kafka-cluster] a Kafka cluster in Confluence Cloud.
+
+
## Access your Kafka cluster in Confluent Cloud
Take the following steps to prepare your Kafka cluster for connection to $CLOUD_LONG:
diff --git a/migrate/livesync-for-s3.md b/migrate/livesync-for-s3.md
index 4997904fba..b5c9e91ae1 100644
--- a/migrate/livesync-for-s3.md
+++ b/migrate/livesync-for-s3.md
@@ -9,6 +9,7 @@ tags: [recovery, logical backup, replication]
import PrereqCloud from "versionContent/_partials/_prereqs-cloud-only.mdx";
import EarlyAccessNoRelease from "versionContent/_partials/_early_access.mdx";
+import NotSupportedAzure from "versionContent/_partials/_not-supported-for-azure.mdx";
# Sync data from S3
@@ -63,6 +64,8 @@ The $S3_CONNECTOR continuously imports data from an Amazon S3 bucket into your d
- [Public anonymous user][credentials-public].
+
+
## Limitations
- **File naming**:
@@ -162,6 +165,8 @@ To sync data from your S3 bucket to your $SERVICE_LONG using $CONSOLE:
And that is it, you are using the $S3_CONNECTOR to synchronize all the data, or specific files, from an S3 bucket to your
$SERVICE_LONG in real time.
+
+
[about-hypertables]: /use-timescale/:currentVersion:/hypertables/
[lives-sync-specify-tables]: /migrate/:currentVersion:/livesync-for-postgresql/#specify-the-tables-to-synchronize
[compression]: /use-timescale/:currentVersion:/compression/about-compression
diff --git a/migrate/upload-file-using-console.md b/migrate/upload-file-using-console.md
index accd788d8e..8fb713b2ba 100644
--- a/migrate/upload-file-using-console.md
+++ b/migrate/upload-file-using-console.md
@@ -8,6 +8,7 @@ keywords: [import]
import ImportPrerequisitesCloudNoConnection from "versionContent/_partials/_prereqs-cloud-no-connection.mdx";
import EarlyAccessGeneral from "versionContent/_partials/_early_access.mdx";
import NotAvailableFreePlan from "versionContent/_partials/_not-available-in-free-plan.mdx";
+import NotSupportedAzure from "versionContent/_partials/_not-supported-for-azure.mdx";
# Upload a file into your $SERVICE_SHORT using $CONSOLE_LONG
@@ -25,6 +26,8 @@ $CONSOLE_LONG enables you to drag and drop files to upload from your local machi
+
+
@@ -127,6 +130,8 @@ $CONSOLE_LONG enables you to upload CSV and Parquet files, including archives co
- [IAM Role][credentials-iam].
- [Public anonymous user][credentials-public].
+
+
@@ -205,7 +210,6 @@ To import a Parquet file from an S3 bucket:
-
And that is it, you have imported your data to your $SERVICE_LONG.
[credentials-iam]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user.html#roles-creatingrole-user-console
diff --git a/use-timescale/tigerlake.md b/use-timescale/tigerlake.md
index 236ba73e12..235c8a9dad 100644
--- a/use-timescale/tigerlake.md
+++ b/use-timescale/tigerlake.md
@@ -8,6 +8,7 @@ keywords: [data lake, lakehouse, s3, iceberg]
import IntegrationPrereqsCloud from "versionContent/_partials/_integration-prereqs-cloud-only.mdx";
import EarlyAccessGeneral from "versionContent/_partials/_early_access.mdx";
+import NotSupportedAzure from "versionContent/_partials/_not-supported-for-azure.mdx";
# Integrate data lakes with $CLOUD_LONG
@@ -29,6 +30,8 @@ Tiger Lake is currently in private beta. Please contact us to request access.
+
+
## Integrate a data lake with your $SERVICE_LONG
To connect a $SERVICE_LONG to your data lake:
@@ -361,6 +364,9 @@ data lake:
* Writing to the same S3 table bucket from multiple services is not supported, bucket-to-service mapping is one-to-one.
* Iceberg snapshots are pruned automatically if the amount exceeds 2500.
+
+
+
[cmc]: https://console.aws.amazon.com/cloudformation/
[aws-athena]: https://aws.amazon.com/athena/
[apache-spark]: https://spark.apache.org/