In this problem, we are given an array $a$ of length $n$ and a target array $b$ of length $m$. Each element $a_i$ has an associated deletion cost $p_i$. We need to find the minimum cost to transform $a$ in to $b$ using a specific "strange function" $f(a)$, or determine if it is impossible.
Condition Analysis
The function $f(a)$ generates a sequencee by processing elements of $a$ one by one. An element is added to the result if it is strictly greater than all previously added elements. For $f(a)$ to equal $b$, the following conditions must be met:
- $b$ must be a subsequence of $a$.
- Since $b$ is formed by elements strictly increasing, $b$ itself must be strictly increasing.
- For any two elements $a_{z_i}$ and $a_{z_{i+1}}$ that are kept to form $b_i$ and $b_{i+1}$, any element $a_k$ located between them ($z_i < k < z_{i+1}$) must be deleted if $a_k > b_i$. If $a_k \leq b_i$, it will not be picked up by the function $f$ even if it is not deleted. However, if $p_k < 0$, we should delete it anyway to reduce the total cost.
To simplify the implementation, we can append a sentinel value to both arrays: $a_{n+1} = \infty$ and $b_{m+1} = \infty$, with $p_{n+1} = 0$. This ensures that the entire target sequence $b$ is processed.
Dynamic Programming with Fenwick Tree
Let $dp[j]$ represent the minimum cost to form the prefix $b[1 \dots j]$ using a prefix of array $a$. When we encounter an element $a_i$ such that $a_i = b_j$, we can transition from $dp[j-1]$.
The cost involves:
- The previous state $dp[j-1]$.
- The sum of $p_k$ for all $k < i$ that were not part of the $b$ prefix and must be deleted.
Specifically, an element $a_k$ must be deleted if:
- $p_k < 0$: Deleting it always helps.
- $p_k > 0$ and $a_k > b_{j-1}$: If not deleted, $a_k$ would be picked up by the function $f$, violating the sequence $b$.
We can maintain the sum of these costs using a Binary Indexed Tree (Fenwick Tree) to handle the condition $a_k > b_{j-1}$ efficiently.
#include <iostream>
#include <vector>
#include <algorithm>
using namespace std;
typedef long long ll;
const ll INF = 1e18;
struct FenwickTree {
int size;
vector<ll> tree;
FenwickTree(int n) : size(n), tree(n + 1, 0) {}
void update(int i, ll delta) {
for (; i <= size; i += i & -i) tree[i] += delta;
}
ll query(int i) {
ll s = 0;
for (; i > 0; i -= i & -i) s += tree[i];
return s;
}
ll queryRange(int l, int r) {
if (l > r) return 0;
return query(r) - query(l - 1);
}
};
int main() {
ios::sync_with_stdio(false);
cin.tie(nullptr);
int n;
cin >> n;
vector<int> a(n + 2);
vector<int> p(n + 2);
for (int i = 1; i <= n; i++) cin >> a[i];
for (int i = 1; i <= n; i++) cin >> p[i];
int m;
cin >> m;
vector<int> b(m + 2);
vector<int> pos_in_b(n + 2, 0);
for (int i = 1; i <= m; i++) {
cin >> b[i];
pos_in_b[b[i]] = i;
}
// Append sentinels
n++; a[n] = n; p[n] = 0;
m++; b[m] = n; pos_in_b[n] = m;
FenwickTree ft(n);
vector<ll> dp(m + 1, INF);
dp[0] = 0;
ll negative_p_sum = 0;
for (int i = 1; i <= n; i++) {
int idx = pos_in_b[a[i]];
ll current_dp = INF;
if (idx > 0 && dp[idx - 1] != INF) {
current_dp = dp[idx - 1] + ft.queryRange(b[idx - 1] + 1, n) + negative_p_sum;
}
if (p[i] < 0) {
negative_p_sum += p[i];
} else {
ft.update(a[i], p[i]);
}
if (idx > 0 && current_dp != INF) {
dp[idx] = min(dp[idx], current_dp - ft.queryRange(a[i] + 1, n) - negative_p_sum);
}
}
ll result = dp[m] + ft.queryRange(1, n) + negative_p_sum;
if (result > INF / 2) {
cout << "NO" << endl;
} else {
cout << "YES" << endl;
cout << result << endl;
}
return 0;
}
Linear Time Optimization
The $O(n \log n)$ approach uses a Fenwick Tree to calculate the sum of $p_k$ where $a_k > b_{j-1}$. We can optimize this to $O(n)$ by observing how the threshold $b_{j-1}$ changes. Instead of querying a range every time, we can maintain the total cost contribution of elements that must be deleted in an array that is updated as we iterate through $a$.
By pre-calculating the position of each $a_i$ in $b$ using a mapping or two pointers, we can distribute the "cost" of deleting $a_i$ into buckets corresponding to the relevant intervals of $b$. This eliminates the logarithmic overhead of the Fenwick Tree.
#include <iostream>
#include <vector>
#include <algorithm>
using namespace std;
typedef long long ll;
const ll INF = 1e18;
int main() {
ios::sync_with_stdio(false);
cin.tie(nullptr);
int n;
cin >> n;
vector<int> a(n + 2);
for (int i = 1; i <= n; i++) cin >> a[i];
vector<int> p(n + 2);
for (int i = 1; i <= n; i++) cin >> p[i];
int m;
cin >> m;
vector<int> b(m + 2);
vector<int> val_to_idx(n + 2, 0);
for (int i = 1; i <= m; i++) {
cin >> b[i];
val_to_idx[b[i]] = i;
}
n++; a[n] = n; p[n] = 0;
m++; b[m] = n; val_to_idx[n] = m;
vector<int> b_ptr(n + 1);
for (int i = 1, j = 1; i <= n; i++) {
while (j < m && b[j] < i) j++;
b_ptr[i] = j;
}
vector<ll> dp(m + 1, INF);
vector<ll> bucket_cost(m + 2, 0);
dp[0] = 0;
ll total_neg = 0;
for (int i = 1; i <= n; i++) {
int target_idx = val_to_idx[a[i]];
if (target_idx > 0) {
if (dp[target_idx - 1] != INF) {
dp[target_idx] = min(dp[target_idx], dp[target_idx - 1] + bucket_cost[target_idx] + total_neg);
}
}
if (p[i] < 0) {
total_neg += p[i];
} else {
bucket_cost[b_ptr[a[i]]] += p[i];
}
if (target_idx > 0 && dp[target_idx] != INF) {
dp[target_idx] -= total_neg;
}
}
// Since we subtracted total_neg inside the loop for optimization,
// we correct it here.
ll final_ans = dp[m] + total_neg;
if (final_ans > INF / 2) {
cout << "NO" << endl;
} else {
cout << "YES" << endl;
cout << final_ans << endl;
}
return 0;
}