diff --git a/solution/1600-1699/1625.Lexicographically Smallest String After Applying Operations/README.md b/solution/1600-1699/1625.Lexicographically Smallest String After Applying Operations/README.md index 46c2a2f6d59c5..afe40a9053eea 100644 --- a/solution/1600-1699/1625.Lexicographically Smallest String After Applying Operations/README.md +++ b/solution/1600-1699/1625.Lexicographically Smallest String After Applying Operations/README.md @@ -216,6 +216,35 @@ func findLexSmallestString(s string, a int, b int) string { } ``` +#### TypeScript + +```ts +function findLexSmallestString(s: string, a: number, b: number): string { + const q: string[] = [s]; + const vis = new Set([s]); + let ans = s; + let i = 0; + while (i < q.length) { + s = q[i++]; + if (ans > s) { + ans = s; + } + const t1 = s + .split('') + .map((c, j) => (j & 1 ? String((Number(c) + a) % 10) : c)) + .join(''); + const t2 = s.slice(-b) + s.slice(0, -b); + for (const t of [t1, t2]) { + if (!vis.has(t)) { + vis.add(t); + q.push(t); + } + } + } + return ans; +} +``` + @@ -364,6 +393,41 @@ func findLexSmallestString(s string, a int, b int) string { } ``` +#### TypeScript + +```ts +function findLexSmallestString(s: string, a: number, b: number): string { + let ans = s; + const n = s.length; + let arr = s.split(''); + for (let _ = 0; _ < n; _++) { + arr = arr.slice(-b).concat(arr.slice(0, -b)); + for (let j = 0; j < 10; j++) { + for (let k = 1; k < n; k += 2) { + arr[k] = String((Number(arr[k]) + a) % 10); + } + if (b & 1) { + for (let p = 0; p < 10; p++) { + for (let k = 0; k < n; k += 2) { + arr[k] = String((Number(arr[k]) + a) % 10); + } + const t = arr.join(''); + if (ans > t) { + ans = t; + } + } + } else { + const t = arr.join(''); + if (ans > t) { + ans = t; + } + } + } + } + return ans; +} +``` + diff --git a/solution/1600-1699/1625.Lexicographically Smallest String After Applying Operations/README_EN.md b/solution/1600-1699/1625.Lexicographically Smallest String After Applying Operations/README_EN.md index 93fbdf73b2432..8707fa473b9b6 100644 --- a/solution/1600-1699/1625.Lexicographically Smallest String After Applying Operations/README_EN.md +++ b/solution/1600-1699/1625.Lexicographically Smallest String After Applying Operations/README_EN.md @@ -91,7 +91,9 @@ There is no way to obtain a string that is lexicographically smaller than " -### Solution 1 +### Solution 1: BFS + +Since the data scale of this problem is relatively small, we can use BFS to brute-force search all possible states and then take the lexicographically smallest state. @@ -212,13 +214,50 @@ func findLexSmallestString(s string, a int, b int) string { } ``` +#### TypeScript + +```ts +function findLexSmallestString(s: string, a: number, b: number): string { + const q: string[] = [s]; + const vis = new Set([s]); + let ans = s; + let i = 0; + while (i < q.length) { + s = q[i++]; + if (ans > s) { + ans = s; + } + const t1 = s + .split('') + .map((c, j) => (j & 1 ? String((Number(c) + a) % 10) : c)) + .join(''); + const t2 = s.slice(-b) + s.slice(0, -b); + for (const t of [t1, t2]) { + if (!vis.has(t)) { + vis.add(t); + q.push(t); + } + } + } + return ans; +} +``` + -### Solution 2 +### Solution 2: Enumeration + +We observe that for the addition operation, a digit will return to its original state after at most $10$ additions; for the rotation operation, the string will also return to its original state after at most $n$ rotations. + +Therefore, the rotation operation produces at most $n$ states. If the rotation count $b$ is even, the addition operation only affects digits at odd indices, resulting in a total of $n \times 10$ states; if the rotation count $b$ is odd, the addition operation affects both odd and even index digits, resulting in a total of $n \times 10 \times 10$ states. + +Thus, we can directly enumerate all possible string states and take the lexicographically smallest one. + +The time complexity is $O(n^2 \times 10^2)$ and the space complexity is $O(n)$, where $n$ is the length of string $s$. @@ -352,6 +391,41 @@ func findLexSmallestString(s string, a int, b int) string { } ``` +#### TypeScript + +```ts +function findLexSmallestString(s: string, a: number, b: number): string { + let ans = s; + const n = s.length; + let arr = s.split(''); + for (let _ = 0; _ < n; _++) { + arr = arr.slice(-b).concat(arr.slice(0, -b)); + for (let j = 0; j < 10; j++) { + for (let k = 1; k < n; k += 2) { + arr[k] = String((Number(arr[k]) + a) % 10); + } + if (b & 1) { + for (let p = 0; p < 10; p++) { + for (let k = 0; k < n; k += 2) { + arr[k] = String((Number(arr[k]) + a) % 10); + } + const t = arr.join(''); + if (ans > t) { + ans = t; + } + } + } else { + const t = arr.join(''); + if (ans > t) { + ans = t; + } + } + } + } + return ans; +} +``` + diff --git a/solution/1600-1699/1625.Lexicographically Smallest String After Applying Operations/Solution.ts b/solution/1600-1699/1625.Lexicographically Smallest String After Applying Operations/Solution.ts new file mode 100644 index 0000000000000..314d95ea7211d --- /dev/null +++ b/solution/1600-1699/1625.Lexicographically Smallest String After Applying Operations/Solution.ts @@ -0,0 +1,24 @@ +function findLexSmallestString(s: string, a: number, b: number): string { + const q: string[] = [s]; + const vis = new Set([s]); + let ans = s; + let i = 0; + while (i < q.length) { + s = q[i++]; + if (ans > s) { + ans = s; + } + const t1 = s + .split('') + .map((c, j) => (j & 1 ? String((Number(c) + a) % 10) : c)) + .join(''); + const t2 = s.slice(-b) + s.slice(0, -b); + for (const t of [t1, t2]) { + if (!vis.has(t)) { + vis.add(t); + q.push(t); + } + } + } + return ans; +} diff --git a/solution/1600-1699/1625.Lexicographically Smallest String After Applying Operations/Solution2.ts b/solution/1600-1699/1625.Lexicographically Smallest String After Applying Operations/Solution2.ts new file mode 100644 index 0000000000000..7a47651916135 --- /dev/null +++ b/solution/1600-1699/1625.Lexicographically Smallest String After Applying Operations/Solution2.ts @@ -0,0 +1,30 @@ +function findLexSmallestString(s: string, a: number, b: number): string { + let ans = s; + const n = s.length; + let arr = s.split(''); + for (let _ = 0; _ < n; _++) { + arr = arr.slice(-b).concat(arr.slice(0, -b)); + for (let j = 0; j < 10; j++) { + for (let k = 1; k < n; k += 2) { + arr[k] = String((Number(arr[k]) + a) % 10); + } + if (b & 1) { + for (let p = 0; p < 10; p++) { + for (let k = 0; k < n; k += 2) { + arr[k] = String((Number(arr[k]) + a) % 10); + } + const t = arr.join(''); + if (ans > t) { + ans = t; + } + } + } else { + const t = arr.join(''); + if (ans > t) { + ans = t; + } + } + } + } + return ans; +} diff --git a/solution/3700-3799/3716.Find Churn Risk Customers/README.md b/solution/3700-3799/3716.Find Churn Risk Customers/README.md index b2d0d929b002f..f9105f85b17f6 100644 --- a/solution/3700-3799/3716.Find Churn Risk Customers/README.md +++ b/solution/3700-3799/3716.Find Churn Risk Customers/README.md @@ -20,7 +20,7 @@ tags:
 +------------------+---------+
-| Column Name      | Type    | 
+| Column Name      | Type    |
 +------------------+---------+
 | event_id         | int     |
 | user_id          | int     |
@@ -157,14 +157,126 @@ monthly_amount 表示此次事件后的月度订阅费用。
 
 
 
-### 方法一
+### 方法一:分组统计 + 连接 + 条件筛选
+
+我们先通过窗口函数获取每个用户按照事件日期和事件 ID 降序排列的最后一条记录,得到每个用户的最新事件信息。然后,我们通过分组统计每个用户的订阅历史信息,包括订阅开始日期、最后事件日期、历史最高订阅费用以及降级事件的数量。最后,我们将最新事件信息与历史统计信息进行连接,并根据题目要求的条件进行筛选,得到流失风险客户列表。
 
 
 
 #### MySQL
 
 ```sql
+WITH
+    user_with_last_event AS (
+        SELECT
+            s.*,
+            ROW_NUMBER() OVER (
+                PARTITION BY user_id
+                ORDER BY event_date DESC, event_id DESC
+            ) AS rn
+        FROM subscription_events s
+    ),
+    user_history AS (
+        SELECT
+            user_id,
+            MIN(event_date) AS start_date,
+            MAX(event_date) AS last_event_date,
+            MAX(monthly_amount) AS max_historical_amount,
+            SUM(
+                CASE
+                    WHEN event_type = 'downgrade' THEN 1
+                    ELSE 0
+                END
+            ) AS downgrade_count
+        FROM subscription_events
+        GROUP BY user_id
+    ),
+    latest_event AS (
+        SELECT
+            user_id,
+            event_type AS last_event_type,
+            plan_name AS current_plan,
+            monthly_amount AS current_monthly_amount
+        FROM user_with_last_event
+        WHERE rn = 1
+    )
+SELECT
+    l.user_id,
+    l.current_plan,
+    l.current_monthly_amount,
+    h.max_historical_amount,
+    DATEDIFF(h.last_event_date, h.start_date) AS days_as_subscriber
+FROM
+    latest_event l
+    JOIN user_history h ON l.user_id = h.user_id
+WHERE
+    l.last_event_type <> 'cancel'
+    AND h.downgrade_count >= 1
+    AND l.current_monthly_amount < 0.5 * h.max_historical_amount
+    AND DATEDIFF(h.last_event_date, h.start_date) >= 60
+ORDER BY days_as_subscriber DESC, l.user_id ASC;
+```
 
+#### Pandas
+
+```python
+import pandas as pd
+
+
+def find_churn_risk_customers(subscription_events: pd.DataFrame) -> pd.DataFrame:
+    subscription_events["event_date"] = pd.to_datetime(
+        subscription_events["event_date"]
+    )
+    subscription_events = subscription_events.sort_values(
+        ["user_id", "event_date", "event_id"]
+    )
+    last_events = (
+        subscription_events.groupby("user_id")
+        .tail(1)[["user_id", "event_type", "plan_name", "monthly_amount"]]
+        .rename(
+            columns={
+                "event_type": "last_event_type",
+                "plan_name": "current_plan",
+                "monthly_amount": "current_monthly_amount",
+            }
+        )
+    )
+
+    agg_df = (
+        subscription_events.groupby("user_id")
+        .agg(
+            start_date=("event_date", "min"),
+            last_event_date=("event_date", "max"),
+            max_historical_amount=("monthly_amount", "max"),
+            downgrade_count=("event_type", lambda x: (x == "downgrade").sum()),
+        )
+        .reset_index()
+    )
+
+    merged = pd.merge(agg_df, last_events, on="user_id", how="inner")
+    merged["days_as_subscriber"] = (
+        merged["last_event_date"] - merged["start_date"]
+    ).dt.days
+
+    result = merged[
+        (merged["last_event_type"] != "cancel")
+        & (merged["downgrade_count"] >= 1)
+        & (merged["current_monthly_amount"] < 0.5 * merged["max_historical_amount"])
+        & (merged["days_as_subscriber"] >= 60)
+    ][
+        [
+            "user_id",
+            "current_plan",
+            "current_monthly_amount",
+            "max_historical_amount",
+            "days_as_subscriber",
+        ]
+    ]
+
+    result = result.sort_values(
+        ["days_as_subscriber", "user_id"], ascending=[False, True]
+    ).reset_index(drop=True)
+    return result
 ```
 
 
diff --git a/solution/3700-3799/3716.Find Churn Risk Customers/README_EN.md b/solution/3700-3799/3716.Find Churn Risk Customers/README_EN.md
index cfe6dc8d4e52d..9b779dbdf67d6 100644
--- a/solution/3700-3799/3716.Find Churn Risk Customers/README_EN.md	
+++ b/solution/3700-3799/3716.Find Churn Risk Customers/README_EN.md	
@@ -20,7 +20,7 @@ tags:
 
 
 +------------------+---------+
-| Column Name      | Type    | 
+| Column Name      | Type    |
 +------------------+---------+
 | event_id         | int     |
 | user_id          | int     |
@@ -156,14 +156,126 @@ For cancel events, monthly_amount is 0.
 
 
 
-### Solution 1
+### Solution 1: Grouping Statistics + Join + Conditional Filtering
+
+We first use a window function to get the last record for each user sorted by event date and event ID in descending order, obtaining the latest event information for each user. Then, we group and aggregate the subscription history information for each user, including the subscription start date, last event date, historical maximum subscription fee, and the number of downgrade events. Finally, we join the latest event information with the historical statistics and filter according to the conditions specified in the problem to get the list of customers at risk of churn.
 
 
 
 #### MySQL
 
 ```sql
+WITH
+    user_with_last_event AS (
+        SELECT
+            s.*,
+            ROW_NUMBER() OVER (
+                PARTITION BY user_id
+                ORDER BY event_date DESC, event_id DESC
+            ) AS rn
+        FROM subscription_events s
+    ),
+    user_history AS (
+        SELECT
+            user_id,
+            MIN(event_date) AS start_date,
+            MAX(event_date) AS last_event_date,
+            MAX(monthly_amount) AS max_historical_amount,
+            SUM(
+                CASE
+                    WHEN event_type = 'downgrade' THEN 1
+                    ELSE 0
+                END
+            ) AS downgrade_count
+        FROM subscription_events
+        GROUP BY user_id
+    ),
+    latest_event AS (
+        SELECT
+            user_id,
+            event_type AS last_event_type,
+            plan_name AS current_plan,
+            monthly_amount AS current_monthly_amount
+        FROM user_with_last_event
+        WHERE rn = 1
+    )
+SELECT
+    l.user_id,
+    l.current_plan,
+    l.current_monthly_amount,
+    h.max_historical_amount,
+    DATEDIFF(h.last_event_date, h.start_date) AS days_as_subscriber
+FROM
+    latest_event l
+    JOIN user_history h ON l.user_id = h.user_id
+WHERE
+    l.last_event_type <> 'cancel'
+    AND h.downgrade_count >= 1
+    AND l.current_monthly_amount < 0.5 * h.max_historical_amount
+    AND DATEDIFF(h.last_event_date, h.start_date) >= 60
+ORDER BY days_as_subscriber DESC, l.user_id ASC;
+```
 
+#### Pandas
+
+```python
+import pandas as pd
+
+
+def find_churn_risk_customers(subscription_events: pd.DataFrame) -> pd.DataFrame:
+    subscription_events["event_date"] = pd.to_datetime(
+        subscription_events["event_date"]
+    )
+    subscription_events = subscription_events.sort_values(
+        ["user_id", "event_date", "event_id"]
+    )
+    last_events = (
+        subscription_events.groupby("user_id")
+        .tail(1)[["user_id", "event_type", "plan_name", "monthly_amount"]]
+        .rename(
+            columns={
+                "event_type": "last_event_type",
+                "plan_name": "current_plan",
+                "monthly_amount": "current_monthly_amount",
+            }
+        )
+    )
+
+    agg_df = (
+        subscription_events.groupby("user_id")
+        .agg(
+            start_date=("event_date", "min"),
+            last_event_date=("event_date", "max"),
+            max_historical_amount=("monthly_amount", "max"),
+            downgrade_count=("event_type", lambda x: (x == "downgrade").sum()),
+        )
+        .reset_index()
+    )
+
+    merged = pd.merge(agg_df, last_events, on="user_id", how="inner")
+    merged["days_as_subscriber"] = (
+        merged["last_event_date"] - merged["start_date"]
+    ).dt.days
+
+    result = merged[
+        (merged["last_event_type"] != "cancel")
+        & (merged["downgrade_count"] >= 1)
+        & (merged["current_monthly_amount"] < 0.5 * merged["max_historical_amount"])
+        & (merged["days_as_subscriber"] >= 60)
+    ][
+        [
+            "user_id",
+            "current_plan",
+            "current_monthly_amount",
+            "max_historical_amount",
+            "days_as_subscriber",
+        ]
+    ]
+
+    result = result.sort_values(
+        ["days_as_subscriber", "user_id"], ascending=[False, True]
+    ).reset_index(drop=True)
+    return result
 ```
 
 
diff --git a/solution/3700-3799/3716.Find Churn Risk Customers/Solution.py b/solution/3700-3799/3716.Find Churn Risk Customers/Solution.py
new file mode 100644
index 0000000000000..9fbbf4af97517
--- /dev/null
+++ b/solution/3700-3799/3716.Find Churn Risk Customers/Solution.py	
@@ -0,0 +1,57 @@
+import pandas as pd
+
+
+def find_churn_risk_customers(subscription_events: pd.DataFrame) -> pd.DataFrame:
+    subscription_events["event_date"] = pd.to_datetime(
+        subscription_events["event_date"]
+    )
+    subscription_events = subscription_events.sort_values(
+        ["user_id", "event_date", "event_id"]
+    )
+    last_events = (
+        subscription_events.groupby("user_id")
+        .tail(1)[["user_id", "event_type", "plan_name", "monthly_amount"]]
+        .rename(
+            columns={
+                "event_type": "last_event_type",
+                "plan_name": "current_plan",
+                "monthly_amount": "current_monthly_amount",
+            }
+        )
+    )
+
+    agg_df = (
+        subscription_events.groupby("user_id")
+        .agg(
+            start_date=("event_date", "min"),
+            last_event_date=("event_date", "max"),
+            max_historical_amount=("monthly_amount", "max"),
+            downgrade_count=("event_type", lambda x: (x == "downgrade").sum()),
+        )
+        .reset_index()
+    )
+
+    merged = pd.merge(agg_df, last_events, on="user_id", how="inner")
+    merged["days_as_subscriber"] = (
+        merged["last_event_date"] - merged["start_date"]
+    ).dt.days
+
+    result = merged[
+        (merged["last_event_type"] != "cancel")
+        & (merged["downgrade_count"] >= 1)
+        & (merged["current_monthly_amount"] < 0.5 * merged["max_historical_amount"])
+        & (merged["days_as_subscriber"] >= 60)
+    ][
+        [
+            "user_id",
+            "current_plan",
+            "current_monthly_amount",
+            "max_historical_amount",
+            "days_as_subscriber",
+        ]
+    ]
+
+    result = result.sort_values(
+        ["days_as_subscriber", "user_id"], ascending=[False, True]
+    ).reset_index(drop=True)
+    return result
diff --git a/solution/3700-3799/3716.Find Churn Risk Customers/Solution.sql b/solution/3700-3799/3716.Find Churn Risk Customers/Solution.sql
new file mode 100644
index 0000000000000..d8b922a5ed031
--- /dev/null
+++ b/solution/3700-3799/3716.Find Churn Risk Customers/Solution.sql	
@@ -0,0 +1,49 @@
+WITH
+    user_with_last_event AS (
+        SELECT
+            s.*,
+            ROW_NUMBER() OVER (
+                PARTITION BY user_id
+                ORDER BY event_date DESC, event_id DESC
+            ) AS rn
+        FROM subscription_events s
+    ),
+    user_history AS (
+        SELECT
+            user_id,
+            MIN(event_date) AS start_date,
+            MAX(event_date) AS last_event_date,
+            MAX(monthly_amount) AS max_historical_amount,
+            SUM(
+                CASE
+                    WHEN event_type = 'downgrade' THEN 1
+                    ELSE 0
+                END
+            ) AS downgrade_count
+        FROM subscription_events
+        GROUP BY user_id
+    ),
+    latest_event AS (
+        SELECT
+            user_id,
+            event_type AS last_event_type,
+            plan_name AS current_plan,
+            monthly_amount AS current_monthly_amount
+        FROM user_with_last_event
+        WHERE rn = 1
+    )
+SELECT
+    l.user_id,
+    l.current_plan,
+    l.current_monthly_amount,
+    h.max_historical_amount,
+    DATEDIFF(h.last_event_date, h.start_date) AS days_as_subscriber
+FROM
+    latest_event l
+    JOIN user_history h ON l.user_id = h.user_id
+WHERE
+    l.last_event_type <> 'cancel'
+    AND h.downgrade_count >= 1
+    AND l.current_monthly_amount < 0.5 * h.max_historical_amount
+    AND DATEDIFF(h.last_event_date, h.start_date) >= 60
+ORDER BY days_as_subscriber DESC, l.user_id ASC;